gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
"""Let's Encrypt display."""
import os
import textwrap
import dialog
import zope.interface
from letsencrypt import interfaces
WIDTH = 72
HEIGHT = 20
# Display exit codes
OK = "ok"
"""Display exit code indicating user acceptance."""
CANCEL = "cancel"
"""Display exit code for a user canceling the display."""
HELP = "help"
"""Display exit code when for when the user requests more help."""
class NcursesDisplay(object):
"""Ncurses-based display."""
zope.interface.implements(interfaces.IDisplay)
def __init__(self, width=WIDTH, height=HEIGHT):
super(NcursesDisplay, self).__init__()
self.dialog = dialog.Dialog()
self.width = width
self.height = height
def notification(self, message, height=10, pause=False):
# pylint: disable=unused-argument
"""Display a notification to the user and wait for user acceptance.
.. todo:: It probably makes sense to use one of the transient message
types for pause. It isn't straightforward how best to approach
the matter though given the context of our messages.
http://pythondialog.sourceforge.net/doc/widgets.html#displaying-transient-messages
:param str message: Message to display
:param int height: Height of the dialog box
:param bool pause: Not applicable to NcursesDisplay
"""
self.dialog.msgbox(message, height, width=self.width)
def menu(self, message, choices,
ok_label="OK", cancel_label="Cancel", help_label=""):
"""Display a menu.
:param str message: title of menu
:param choices: menu lines, len must be > 0
:type choices: list of tuples (`tag`, `item`) tags must be unique or
list of items (tags will be enumerated)
:param str ok_label: label of the OK button
:param str help_label: label of the help button
:returns: tuple of the form (`code`, `tag`) where
`code` - `str` display_util exit code
`tag` - `int` index corresponding to the item chosen
:rtype: tuple
"""
menu_options = {
"choices": choices,
"ok_label": ok_label,
"cancel_label": cancel_label,
"help_button": bool(help_label),
"help_label": help_label,
"width": self.width,
"height": self.height,
"menu_height": self.height - 6,
}
# Can accept either tuples or just the actual choices
if choices and isinstance(choices[0], tuple):
# pylint: disable=star-args
code, selection = self.dialog.menu(message, **menu_options)
# Return the selection index
for i, choice in enumerate(choices):
if choice[0] == selection:
return code, i
return code, -1
else:
# "choices" is not formatted the way the dialog.menu expects...
menu_options["choices"] = [
(str(i), choice) for i, choice in enumerate(choices, 1)
]
# pylint: disable=star-args
code, tag = self.dialog.menu(message, **menu_options)
if code == CANCEL:
return code, -1
return code, int(tag) - 1
def input(self, message):
"""Display an input box to the user.
:param str message: Message to display that asks for input.
:returns: tuple of the form (code, string) where
`code` - int display exit code
`string` - input entered by the user
"""
sections = message.split("\n")
# each section takes at least one line, plus extras if it's longer than self.width
wordlines = [1 + (len(section)/self.width) for section in sections]
height = 6 + sum(wordlines) + len(sections)
return self.dialog.inputbox(message, width=self.width, height=height)
def yesno(self, message, yes_label="Yes", no_label="No"):
"""Display a Yes/No dialog box.
Yes and No label must begin with different letters.
:param str message: message to display to user
:param str yes_label: label on the "yes" button
:param str no_label: label on the "no" button
:returns: if yes_label was selected
:rtype: bool
"""
return self.dialog.DIALOG_OK == self.dialog.yesno(
message, self.height, self.width,
yes_label=yes_label, no_label=no_label)
def checklist(self, message, tags, default_status=True):
"""Displays a checklist.
:param message: Message to display before choices
:param list tags: where each is of type :class:`str` len(tags) > 0
:param bool default_status: If True, items are in a selected state by
default.
:returns: tuple of the form (code, list_tags) where
`code` - int display exit code
`list_tags` - list of str tags selected by the user
"""
choices = [(tag, "", default_status) for tag in tags]
return self.dialog.checklist(
message, width=self.width, height=self.height, choices=choices)
class FileDisplay(object):
"""File-based display."""
zope.interface.implements(interfaces.IDisplay)
def __init__(self, outfile):
super(FileDisplay, self).__init__()
self.outfile = outfile
def notification(self, message, height=10, pause=True):
# pylint: disable=unused-argument
"""Displays a notification and waits for user acceptance.
:param str message: Message to display
:param int height: No effect for FileDisplay
:param bool pause: Whether or not the program should pause for the
user's confirmation
"""
side_frame = "-" * 79
message = self._wrap_lines(message)
self.outfile.write(
"{line}{frame}{line}{msg}{line}{frame}{line}".format(
line=os.linesep, frame=side_frame, msg=message))
if pause:
raw_input("Press Enter to Continue")
def menu(self, message, choices,
ok_label="", cancel_label="", help_label=""):
# pylint: disable=unused-argument
"""Display a menu.
.. todo:: This doesn't enable the help label/button (I wasn't sold on
any interface I came up with for this). It would be a nice feature
:param str message: title of menu
:param choices: Menu lines, len must be > 0
:type choices: list of tuples (tag, item) or
list of descriptions (tags will be enumerated)
:returns: tuple of the form (code, tag) where
code - int display exit code
tag - str corresponding to the item chosen
:rtype: tuple
"""
self._print_menu(message, choices)
code, selection = self._get_valid_int_ans(len(choices))
return code, selection - 1
def input(self, message):
# pylint: disable=no-self-use
"""Accept input from the user.
:param str message: message to display to the user
:returns: tuple of (`code`, `input`) where
`code` - str display exit code
`input` - str of the user's input
:rtype: tuple
"""
ans = raw_input(
textwrap.fill("%s (Enter 'c' to cancel): " % message, 80))
if ans == "c" or ans == "C":
return CANCEL, "-1"
else:
return OK, ans
def yesno(self, message, yes_label="Yes", no_label="No"):
"""Query the user with a yes/no question.
Yes and No label must begin with different letters, and must contain at
least one letter each.
:param str message: question for the user
:param str yes_label: Label of the "Yes" parameter
:param str no_label: Label of the "No" parameter
:returns: True for "Yes", False for "No"
:rtype: bool
"""
side_frame = ("-" * 79) + os.linesep
message = self._wrap_lines(message)
self.outfile.write("{0}{frame}{msg}{0}{frame}".format(
os.linesep, frame=side_frame, msg=message))
while True:
ans = raw_input("{yes}/{no}: ".format(
yes=_parens_around_char(yes_label),
no=_parens_around_char(no_label)))
# Couldn't get pylint indentation right with elif
# elif doesn't matter in this situation
if (ans.startswith(yes_label[0].lower()) or
ans.startswith(yes_label[0].upper())):
return True
if (ans.startswith(no_label[0].lower()) or
ans.startswith(no_label[0].upper())):
return False
def checklist(self, message, tags, default_status=True):
# pylint: disable=unused-argument
"""Display a checklist.
:param str message: Message to display to user
:param list tags: `str` tags to select, len(tags) > 0
:param bool default_status: Not used for FileDisplay
:returns: tuple of (`code`, `tags`) where
`code` - str display exit code
`tags` - list of selected tags
:rtype: tuple
"""
while True:
self._print_menu(message, tags)
code, ans = self.input("Select the appropriate numbers separated "
"by commas and/or spaces")
if code == OK:
indices = separate_list_input(ans)
selected_tags = self._scrub_checklist_input(indices, tags)
if selected_tags:
return code, selected_tags
else:
self.outfile.write(
"** Error - Invalid selection **%s" % os.linesep)
else:
return code, []
def _scrub_checklist_input(self, indices, tags):
# pylint: disable=no-self-use
"""Validate input and transform indices to appropriate tags.
:param list indices: input
:param list tags: Original tags of the checklist
:returns: valid tags the user selected
:rtype: :class:`list` of :class:`str`
"""
# They should all be of type int
try:
indices = [int(index) for index in indices]
except ValueError:
return []
# Remove duplicates
indices = list(set(indices))
# Check all input is within range
for index in indices:
if index < 1 or index > len(tags):
return []
# Transform indices to appropriate tags
return [tags[index - 1] for index in indices]
def _print_menu(self, message, choices):
"""Print a menu on the screen.
:param str message: title of menu
:param choices: Menu lines
:type choices: list of tuples (tag, item) or
list of descriptions (tags will be enumerated)
"""
# Can take either tuples or single items in choices list
if choices and isinstance(choices[0], tuple):
choices = ["%s - %s" % (c[0], c[1]) for c in choices]
# Write out the message to the user
self.outfile.write(
"{new}{msg}{new}".format(new=os.linesep, msg=message))
side_frame = ("-" * 79) + os.linesep
self.outfile.write(side_frame)
# Write out the menu choices
for i, desc in enumerate(choices, 1):
self.outfile.write(
textwrap.fill("{num}: {desc}".format(num=i, desc=desc), 80))
# Keep this outside of the textwrap
self.outfile.write(os.linesep)
self.outfile.write(side_frame)
def _wrap_lines(self, msg): # pylint: disable=no-self-use
"""Format lines nicely to 80 chars.
:param str msg: Original message
:returns: Formatted message respecting newlines in message
:rtype: str
"""
lines = msg.splitlines()
fixed_l = []
for line in lines:
fixed_l.append(textwrap.fill(line, 80))
return os.linesep.join(fixed_l)
def _get_valid_int_ans(self, max_):
"""Get a numerical selection.
:param int max: The maximum entry (len of choices), must be positive
:returns: tuple of the form (`code`, `selection`) where
`code` - str display exit code ('ok' or cancel')
`selection` - int user's selection
:rtype: tuple
"""
selection = -1
if max_ > 1:
input_msg = ("Select the appropriate number "
"[1-{max_}] then [enter] (press 'c' to "
"cancel): ".format(max_=max_))
else:
input_msg = ("Press 1 [enter] to confirm the selection "
"(press 'c' to cancel): ")
while selection < 1:
ans = raw_input(input_msg)
if ans.startswith("c") or ans.startswith("C"):
return CANCEL, -1
try:
selection = int(ans)
if selection < 1 or selection > max_:
selection = -1
raise ValueError
except ValueError:
self.outfile.write(
"{0}** Invalid input **{0}".format(os.linesep))
return OK, selection
def separate_list_input(input_):
"""Separate a comma or space separated list.
:param str input_: input from the user
:returns: strings
:rtype: list
"""
no_commas = input_.replace(",", " ")
# Each string is naturally unicode, this causes problems with M2Crypto SANs
# TODO: check if above is still true when M2Crypto is gone ^
return [str(string) for string in no_commas.split()]
def _parens_around_char(label):
"""Place parens around first character of label.
:param str label: Must contain at least one character
"""
return "({first}){rest}".format(first=label[0], rest=label[1:])
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from base64 import standard_b64encode as b64enc
import copy
from collections import defaultdict
from itertools import chain, ifilter, imap
import operator
import os
import sys
import shlex
import traceback
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
import warnings
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, pack_long
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler
from py4j.java_collections import ListConverter, MapConverter
__all__ = ["RDD"]
def _extract_concise_traceback():
tb = traceback.extract_stack()
if len(tb) == 0:
return "I'm lost!"
# HACK: This function is in a file called 'rdd.py' in the top level of
# everything PySpark. Just trim off the directory name and assume
# everything in that tree is PySpark guts.
file, line, module, what = tb[len(tb) - 1]
sparkpath = os.path.dirname(file)
first_spark_frame = len(tb) - 1
for i in range(0, len(tb)):
file, line, fun, what = tb[i]
if file.startswith(sparkpath):
first_spark_frame = i
break
if first_spark_frame == 0:
file, line, fun, what = tb[0]
return "%s at %s:%d" % (fun, file, line)
sfile, sline, sfun, swhat = tb[first_spark_frame]
ufile, uline, ufun, uwhat = tb[first_spark_frame-1]
return "%s at %s:%d" % (sfun, ufile, uline)
_spark_stack_depth = 0
class _JavaStackTrace(object):
def __init__(self, sc):
self._traceback = _extract_concise_traceback()
self._context = sc
def __enter__(self):
global _spark_stack_depth
if _spark_stack_depth == 0:
self._context._jsc.setCallSite(self._traceback)
_spark_stack_depth += 1
def __exit__(self, type, value, tb):
global _spark_stack_depth
_spark_stack_depth -= 1
if _spark_stack_depth == 0:
self._context._jsc.setCallSite(None)
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
def __repr__(self):
return self._jrdd.toString()
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self._jrdd.cache()
return self
def persist(self, storageLevel):
"""
Set this RDD's storage level to persist its values across operations after the first time
it is computed. This can only be used to assign a new storage level if the RDD does not
have a storage level set yet.
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
else:
return None
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD containing the distinct elements in this RDD.
"""
def func(split, iterator): return imap(f, iterator)
return PipelinedRDD(self, func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator): return chain.from_iterable(imap(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator): return f(iterator)
return self.mapPartitionsWithIndex(func)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator): return ifilter(f, iterator)
return self.mapPartitions(func)
def distinct(self):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x) \
.map(lambda (x, _): x)
def sample(self, withReplacement, fraction, seed):
"""
Return a sampled subset of this RDD (relies on numpy and falls back
on default random generator if numpy is unavailable).
>>> sc.parallelize(range(0, 100)).sample(False, 0.1, 2).collect() #doctest: +SKIP
[2, 3, 20, 21, 24, 41, 42, 66, 67, 89, 90, 98]
"""
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed):
"""
Return a fixed-size sampled subset of this RDD (currently requires numpy).
>>> sc.parallelize(range(0, 10)).takeSample(True, 10, 1) #doctest: +SKIP
[4, 2, 1, 8, 2, 7, 0, 4, 1, 4]
"""
fraction = 0.0
total = 0
multiplier = 3.0
initialCount = self.count()
maxSelected = 0
if (num < 0):
raise ValueError
if initialCount > sys.maxint - 1:
maxSelected = sys.maxint - 1
else:
maxSelected = initialCount
if num > initialCount and not withReplacement:
total = maxSelected
fraction = multiplier * (maxSelected + 1) / initialCount
else:
fraction = multiplier * (num + 1) / initialCount
total = num
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < total:
if seed > sys.maxint - 2:
seed = -1
seed += 1
samples = self.sample(withReplacement, fraction, seed).collect()
sampler = RDDSampler(withReplacement, fraction, seed+1)
sampler.shuffle(samples)
return samples[0:total]
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
return rdd
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
return RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
def _reserialize(self):
if self._jrdd_deserializer == self.ctx.serializer:
return self
else:
return self.map(lambda x: x, preservesPartitioning=True)
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc = lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5), ('little', 4), ('Mary', 1), ('was', 8), ('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self.ctx.defaultParallelism
bounds = list()
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
if numPartitions > 1:
rddSize = self.count()
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda (k, v): k).collect()
samples = sorted(samples, reverse=(not ascending), key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
for i in range(0, numPartitions - 1):
index = (len(samples) - 1) * (i + 1) / numPartitions
bounds.append(samples[index])
def rangePartitionFunc(k):
p = 0
while p < len(bounds) and keyfunc(k) > bounds[p]:
p += 1
if ascending:
return p
else:
return numPartitions-1-p
def mapFunc(iterator):
yield sorted(iterator, reverse=(not ascending), key=lambda (k, v): keyfunc(k))
return (self.partitionBy(numPartitions, partitionFunc=rangePartitionFunc)
.mapPartitions(mapFunc,preservesPartitioning=True)
.flatMap(lambda x: x, preservesPartitioning=True))
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator): yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions)
def pipe(self, command, env={}):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize([1, 2, 3]).pipe('cat').collect()
['1', '2', '3']
"""
def func(iterator):
pipe = Popen(shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
out.write(str(obj).rstrip('\n') + '\n')
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
return (x.rstrip('\n') for x in pipe.stdout)
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print x
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
yield None
self.mapPartitions(processPartition).collect() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print x
... yield None
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
self.mapPartitions(f).collect() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
"""
with _JavaStackTrace(self.context) as st:
bytesInJava = self._jrdd.collect().iterator()
return list(self._collect_iterator_through_file(bytesInJava))
def _collect_iterator_through_file(self, iterator):
# Transferring lots of data through Py4J can be slow because
# socket.readline() is inefficient. Instead, we'll dump the data to a
# file and read it back.
tempFile = NamedTemporaryFile(delete=False, dir=self.ctx._temp_dir)
tempFile.close()
self.ctx._writeToFile(iterator, tempFile.name)
# Read the data into Python and deserialize it:
with open(tempFile.name, 'rb') as tempFile:
for item in self._jrdd_deserializer.load_stream(tempFile):
yield item
os.unlink(tempFile.name)
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
"""
def func(iterator):
acc = None
for obj in iterator:
if acc is None:
acc = obj
else:
acc = f(obj, acc)
if acc is not None:
yield acc
vals = self.mapPartitions(func).collect()
return reduce(f, vals)
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero
value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(obj, acc)
yield acc
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
# TODO: aggregate
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).reduce(operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which corrects for bias in
estimating the standard deviation by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects for bias in
estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for (k, v) in m2.iteritems():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def take(self, num):
"""
Take the first num elements of the RDD.
This currently scans the partitions *one by one*, so it will be slow if
a lot of partitions are required. In that case, use L{collect} to get
the whole RDD instead.
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
"""
def takeUpToNum(iterator):
taken = 0
while taken < num:
yield next(iterator)
taken += 1
# Take only up to num elements from each partition we try
mapped = self.mapPartitions(takeUpToNum)
items = []
# TODO(shivaram): Similar to the scala implementation, update the take
# method to scan multiple splits based on an estimate of how many elements
# we have per-split.
with _JavaStackTrace(self.context) as st:
for partition in range(mapped._jrdd.splits().size()):
partitionsToTake = self.ctx._gateway.new_array(self.ctx._jvm.int, 1)
partitionsToTake[0] = partition
iterator = mapped._jrdd.collectPartitions(partitionsToTake)[0].iterator()
items.extend(mapped._collect_iterator_through_file(iterator))
if len(items) >= num:
break
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
"""
return self.take(1)[0]
def saveAsTextFile(self, path):
"""
Save this RDD as a text file, using string representations of elements.
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
yield x.encode("utf-8")
keyed = PipelinedRDD(self, func)
keyed._bypass_serializer = True
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda (k, v): k)
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda (k, v): v)
def reduceByKey(self, func, numPartitions=None):
"""
Merge the values for each key using an associative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be hash-partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for (k, v) in iterator:
m[k] = v if k not in m else func(m[k], v)
yield m
def mergeMaps(m1, m2):
for (k, v) in m2.iteritems():
m1[k] = v if k not in m1 else func(m1[k], v)
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in other have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
def partitionBy(self, numPartitions, partitionFunc=hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> set(sets[0]).intersection(set(sets[1]))
set([])
"""
if numPartitions is None:
numPartitions = self.ctx.defaultParallelism
# Transferring O(n) objects to Java is too expensive. Instead, we'll
# form the hash buckets in Python, transferring O(numPartitions) objects
# to Java. Each object is a (splitNumber, [objects]) pair.
outputSerializer = self.ctx._unbatched_serializer
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
for (k, v) in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
for (split, items) in buckets.iteritems():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = PipelinedRDD(self, add_shuffle_key)
keyed._bypass_serializer = True
with _JavaStackTrace(self.context) as st:
pairRDD = self.ctx._jvm.PairwiseRDD(keyed._jrdd.rdd()).asJavaPairRDD()
partitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = pairRDD.partitionBy(partitioner).values()
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
# This is required so that id(partitionFunc) remains unique, even if
# partitionFunc is a lambda:
rdd._partitionFunc = partitionFunc
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def f(x): return x
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self.ctx.defaultParallelism
def combineLocally(iterator):
combiners = {}
for x in iterator:
(k, v) = x
if k not in combiners:
combiners[k] = createCombiner(v)
else:
combiners[k] = mergeValue(combiners[k], v)
return combiners.iteritems()
locally_combined = self.mapPartitions(combineLocally)
shuffled = locally_combined.partitionBy(numPartitions)
def _mergeCombiners(iterator):
combiners = {}
for (k, v) in iterator:
if not k in combiners:
combiners[k] = v
else:
combiners[k] = mergeCombiners(combiners[k], v)
return combiners.iteritems()
return shuffled.mapPartitions(_mergeCombiners)
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with into numPartitions partitions.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(x.groupByKey().collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
return a + b
return self.combineByKey(createCombiner, mergeValue, mergeCombiners,
numPartitions)
# TODO: add tests
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
"""
flat_map_fn = lambda (k, v): ((k, x) for x in f(v))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
"""
map_values_fn = lambda (k, v): (k, f(v))
return self.map(map_values_fn, preservesPartitioning=True)
# TODO: support varargs cogroup of several RDDs.
def groupWith(self, other):
"""
Alias for cogroup.
"""
return self.cogroup(other)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as well
as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.cogroup(y).collect())
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup(self, other, numPartitions)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching key
in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
filter_func = lambda (key, vals): len(vals[0]) > 0 and len(vals[1]) == 0
map_func = lambda (key, vals): [(key, val) for val in vals[0]]
return self.cogroup(other, numPartitions).filter(filter_func).flatMap(map_func)
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
rdd = other.map(lambda x: (x, True)) # note: here 'True' is just a placeholder
return self.map(lambda x: (x, True)).subtractByKey(rdd).map(lambda tpl: tpl[0]) # note: here 'True' is just a placeholder
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> sorted(x.cogroup(y).collect())
[(0, ([0], [0])), (1, ([1], [1])), (2, ([], [2])), (3, ([], [3])), (4, ([2], [4]))]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD. Internally, this uses
a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider using `coalesce`,
which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
jrdd = self._jrdd.repartition(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
jrdd = self._jrdd.coalesce(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
# TODO: `lookup` is disabled because we can't make direct comparisons based
# on the key; we need to compare the hash of the key to the hash of the
# keys in the pairs. This could be an expensive operation, since those
# hashes aren't retained.
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
serializer = NoOpSerializer()
else:
serializer = self.ctx.serializer
command = (self.func, self._prev_jrdd_deserializer, serializer)
pickled_command = CloudPickleSerializer().dumps(command)
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in self.ctx._pickled_broadcast_vars],
self.ctx._gateway._gateway_client)
self.ctx._pickled_broadcast_vars.clear()
class_tag = self._prev_jrdd.classTag()
env = MapConverter().convert(self.ctx.environment,
self.ctx._gateway._gateway_client)
includes = ListConverter().convert(self.ctx._python_includes,
self.ctx._gateway._gateway_client)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(),
bytearray(pickled_command), env, includes, self.preservesPartitioning,
self.ctx.pythonExec, broadcast_vars, self.ctx._javaAccumulator,
class_tag)
self._jrdd_val = python_rdd.asJavaRDD()
return self._jrdd_val
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs,optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
|
from __future__ import absolute_import
from . import app
from alta.bims import Bims
from ..utils import get_conf, runJob
from celery import chain
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@app.task(name='presta.app.lims.sync_samples')
def sync_samples(samples, **kwargs):
bika_conf = kwargs.get('conf')
result = kwargs.get('result', '1')
sync_all_analyses = kwargs.get('sync_all_analyses', False)
if samples and len(samples) > 0:
pipeline = chain(
submit_analyses.si(samples, bika_conf, sync_all_analyses, result),
verify_analyses.si(samples, bika_conf, sync_all_analyses),
publish_analyses.si(samples, bika_conf, sync_all_analyses),
publish_analysis_requests.si(samples, bika_conf),
)
pipeline.delay()
else:
logger.info('No samples to sync')
return True
@app.task(name='presta.app.lims.sync_batches')
def sync_batches(batches, **kwargs):
bika_conf = kwargs.get('conf')
if batches and len(batches) > 0:
pipeline = chain(
close_batches.si(batches, bika_conf)
)
pipeline.delay()
else:
logger.info('No batches to sync')
return True
@app.task(name='presta.app.lims.sync_worksheets')
def sync_worksheets(worksheets, **kwargs):
bika_conf = kwargs.get('conf')
if worksheets and len(worksheets) > 0:
pipeline = chain(
close_worksheets.si(worksheets, bika_conf)
)
pipeline.delay()
else:
logger.info('No worksheets to sync')
return True
@app.task(name='presta.app.lims.process_deliveries')
def process_deliveries(deliveries):
if isinstance(deliveries,list) and len(deliveries) > 0:
logger.info('{} deliveries ready to process'.format(len(deliveries)))
for delivery in deliveries:
pipeline = chain(
run_presta_delivery.si(delivery_id=delivery.get('id'))
)
pipeline.delay()
else:
logger.info('No deliveries to sync')
return True
@app.task(name='presta.app.lims.set_delivery_started')
def set_delivery_started(**kwargs):
delivery_id = kwargs.get('delivery_id')
conf = get_conf(logger, None)
bika_conf = conf.get_section('bika')
bika = __init_bika(bika_conf)
if delivery_id:
logger.info('Set delivery {} as started'.format(delivery_id))
res = bika.set_delivery_started(delivery_id)
logger.info('Result {}'.format(res))
return res.get('success')
return True
@app.task(name='presta.app.lims.set_delivery_completed')
def set_delivery_completed(**kwargs):
delivery_id = kwargs.get('delivery_id')
conf = get_conf(logger, None)
bika_conf = conf.get_section('bika')
bika = __init_bika(bika_conf)
if delivery_id:
logger.info('Set delivery {} as completed'.format(delivery_id))
res = bika.set_delivery_completed(delivery_id)
logger.info('Result {}'.format(res))
return res.get('success')
return True
@app.task(name='presta.app.lims.update_delivery_details')
def update_delivery_details(**kwargs):
delivery_id = kwargs.get('delivery_id')
user = kwargs.get('user')
password = kwargs.get('password')
path = kwargs.get('path')
conf = get_conf(logger, None)
bika_conf = conf.get_section('bika')
bika = __init_bika(bika_conf)
if delivery_id:
logger.info('Update details of delivery {}'.format(delivery_id))
res = bika.update_delivery_details(delivery_id, user=user,
password=password, path=path)
logger.info('Result {}'.format(res))
return res.get('success')
return True
@app.task(name='presta.app.lims.sync_analysis_requests')
def sync_analysis_requests(samples, bika_conf):
if samples and len(samples) > 0:
pass
return True
@app.task(name='presta.app.lims.submit_analyses')
def submit_analyses(samples, bika_conf, sync_all_analyses=False, result='1'):
if samples and len(samples) > 0:
bika = __init_bika(bika_conf)
analyses = bika.get_analyses_ready_to_be_synchronized(samples=samples, action='submit',
sync_all_analyses=sync_all_analyses)
if isinstance(analyses, list) and len(analyses) > 0:
logger.info('Submit {} analyses'.format(len(analyses)))
res = bika.submit_analyses(analyses, result)
logger.info('Submit Result {}'.format(res))
return res.get('success')
logger.info('Nothing to submit')
return True
@app.task(name='presta.app.lims.verify_analyses')
def verify_analyses(samples, bika_conf, sync_all_analyses=False):
if samples and len(samples) > 0:
bika = __init_bika(bika_conf)
analyses = bika.get_analyses_ready_to_be_synchronized(samples=samples, action='verify',
sync_all_analyses=sync_all_analyses)
if isinstance(analyses, list) and len(analyses) > 0:
logger.info('Verify {} analyses'.format(len(analyses)))
res = bika.verify_analyses(analyses)
logger.info('Verify Result: {}'.format(res))
return res.get('success')
logger.info('Nothing to verify')
return True
@app.task(name='presta.app.lims.publish_analyses')
def publish_analyses(samples, bika_conf, sync_all_analyses=False):
if samples and len(samples) > 0:
bika = __init_bika(bika_conf)
analyses = bika.get_analyses_ready_to_be_synchronized(samples=samples, action='publish',
sync_all_analyses=sync_all_analyses)
if isinstance(analyses, list) and len(analyses) > 0:
logger.info('Publish {} analyses'.format(len(analyses)))
res = bika.publish_analyses(analyses)
logger.info('Publish Result: {}'.format(res))
return res.get('success')
logger.info('Nothing to publish')
return True
@app.task(name='presta.app.lims.publish_analysis_requests')
def publish_analysis_requests(samples, bika_conf):
if samples and len(samples) > 0:
bika = __init_bika(bika_conf)
analysis_requests = bika.get_analysis_requests_ready_to_be_published(samples=samples)
if isinstance(analysis_requests, list) and len(analysis_requests) > 0:
logger.info('Publish {} analysis requests'.format(len(analysis_requests)))
res = bika.publish_analysis_requests(analysis_requests)
logger.info('Publish Result: {}'.format(res))
return res.get('success')
logger.info('Nothing to publish')
return True
@app.task(name='presta.app.lims.close_batches')
def close_batches(batches, bika_conf):
bika = __init_bika(bika_conf)
batches = bika.get_batches_ready_to_be_closed(batches=batches)
if isinstance(batches, list) and len(batches) > 0:
logger.info('Close {} batches'.format(len(batches)))
res = bika.close_batches(batches)
logger.info('Close Result: {}'.format(res))
return res.get('success')
logger.info('Nothing to close')
return True
@app.task(name='presta.app.lims.close_worksheets')
def close_worksheets(worksheets, bika_conf):
bika = __init_bika(bika_conf)
worksheets = bika.get_worksheets_ready_to_be_closed(worksheets=worksheets)
if isinstance(worksheets,list) and len(worksheets) > 0:
logger.info('Close {} worksheets'.format(len(worksheets)))
res = bika.close_worksheets(worksheets)
logger.info('Close Result: {}'.format(res))
return res.get('success')
logger.info('Nothing to close')
return True
@app.task(name='presta.app.lims.search_batches_to_sync')
def search_batches_to_sync(**kwargs):
emit_events = kwargs.get('emit_events', False)
conf = get_conf(logger, None)
bika_conf = conf.get_section('bika')
bika = __init_bika(bika_conf)
batches, samples = bika.get_batches_ready_to_be_closed(also_samples=True)
if emit_events:
pipeline = chain(
sync_samples.si(samples, conf=bika_conf),
sync_batches.si(batches, conf=bika_conf),
)
pipeline.delay()
return True
@app.task(name='presta.app.lims.search_worksheets_to_sync')
def search_worksheets_to_sync(**kwargs):
emit_events = kwargs.get('emit_events', False)
conf = get_conf(logger, None)
bika_conf = conf.get_section('bika')
bika = __init_bika(bika_conf)
worksheets, samples = bika.get_worksheets_ready_to_be_closed(also_samples=True)
if emit_events:
pipeline = chain(
sync_samples.si(samples, conf=bika_conf),
sync_worksheets.si(worksheets, conf=bika_conf),
)
pipeline.delay()
return True
@app.task(name='presta.app.lims.search_deliveries_to_sync')
def search_deliveries_to_sync(**kwargs):
emit_events = kwargs.get('emit_events', False)
conf = get_conf(logger, None)
bika_conf = conf.get_section('bika')
bika = __init_bika(bika_conf)
deliveries = bika.get_deliveries_ready_to_process()
if emit_events:
pipeline = chain(
process_deliveries.si(deliveries),
)
pipeline.delay()
return True
@app.task(name='presta.app.lims.search_samples_to_sync')
def search_samples_to_sync(**kwargs):
emit_events = kwargs.get('emit_events', False)
conf = get_conf(logger, None)
bika_conf = conf.get_section('bika')
bika = __init_bika(bika_conf)
samples = bika.get_analysis_requests_ready_to_be_published()
if emit_events:
pipeline = chain(
sync_samples.si(samples, conf=bika_conf),
)
pipeline.delay()
return True
@app.task(name='presta.app.lims.run_presta_delivery')
def run_presta_delivery(**kwargs):
emit_events = kwargs.get('emit_events', False)
delivery_id = kwargs.get('delivery_id')
cmd_line = ['presta', 'delivery']
if delivery_id:
cmd_line.extend(['--delivery_id', delivery_id])
if emit_events:
cmd_line.append('--emit_events')
logger.info('Running {}'.format(cmd_line))
result = runJob(cmd_line, logger)
return True if result else False
return False
def __init_bika(bika_conf, role='admin'):
bika_roles = bika_conf.get('roles')
if bika_conf and bika_roles and role in bika_roles:
bika_role = bika_roles.get(role)
url = bika_conf.get('url')
user = bika_role.get('user')
password = bika_role.get('password')
bika = Bims(url, user, password, 'bikalims').bims
return bika
|
|
import pytest
from urlparse import urlparse
from rest_framework import exceptions
from api.base.settings.defaults import API_BASE
from osf.utils import permissions
from osf.models import Registration, NodeLog
from framework.auth import Auth
from api.registrations.serializers import RegistrationSerializer, RegistrationDetailSerializer
from osf_tests.factories import (
ProjectFactory,
RegistrationFactory,
RegistrationApprovalFactory,
AuthUserFactory,
WithdrawnRegistrationFactory,
)
from tests.utils import assert_latest_log
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestRegistrationDetail:
@pytest.fixture()
def public_project(self, user):
return ProjectFactory(
title='Public Project',
is_public=True,
creator=user)
@pytest.fixture()
def private_project(self, user):
return ProjectFactory(title='Private Project', creator=user)
@pytest.fixture()
def public_registration(self, user, public_project):
return RegistrationFactory(
project=public_project,
creator=user,
is_public=True)
@pytest.fixture()
def private_registration(self, user, private_project):
return RegistrationFactory(project=private_project, creator=user)
@pytest.fixture()
def public_url(self, public_registration):
return '/{}registrations/{}/'.format(API_BASE, public_registration._id)
@pytest.fixture()
def private_url(self, private_registration):
return '/{}registrations/{}/'.format(
API_BASE, private_registration._id)
def test_registration_detail(
self, app, user, public_project, private_project,
public_registration, private_registration,
public_url, private_url):
non_contributor = AuthUserFactory()
# test_return_public_registration_details_logged_out
res = app.get(public_url)
assert res.status_code == 200
data = res.json['data']
registered_from = urlparse(
data['relationships']['registered_from']['links']['related']['href']
).path
assert data['attributes']['registration'] is True
assert data['attributes']['current_user_is_contributor'] is False
assert registered_from == '/{}nodes/{}/'.format(
API_BASE, public_project._id)
# test_return_public_registration_details_logged_in
res = app.get(public_url, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
data = res.json['data']
registered_from = urlparse(
data['relationships']['registered_from']['links']['related']['href']).path
assert data['attributes']['registration'] is True
assert data['attributes']['current_user_is_contributor'] is True
assert registered_from == '/{}nodes/{}/'.format(
API_BASE, public_project._id)
# test_return_private_registration_details_logged_out
res = app.get(private_url, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_return_private_project_registrations_logged_in_contributor
res = app.get(private_url, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
data = res.json['data']
registered_from = urlparse(
data['relationships']['registered_from']['links']['related']['href']).path
assert data['attributes']['registration'] is True
assert data['attributes']['current_user_is_contributor'] is True
assert registered_from == '/{}nodes/{}/'.format(
API_BASE, private_project._id)
# test_return_private_registration_details_logged_in_non_contributor
res = app.get(
private_url,
auth=non_contributor.auth,
expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_do_not_return_node_detail
url = '/{}registrations/{}/'.format(API_BASE, public_project._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == exceptions.NotFound.default_detail
# test_do_not_return_node_detail_in_sub_view
url = '/{}registrations/{}/contributors/'.format(
API_BASE, public_project._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == exceptions.NotFound.default_detail
# test_do_not_return_registration_in_node_detail
url = '/{}nodes/{}/'.format(API_BASE, public_registration._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == exceptions.NotFound.default_detail
# test_registration_shows_related_counts
url = '/{}registrations/{}/?related_counts=True'.format(
API_BASE, private_registration._id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0
assert res.json['data']['relationships']['contributors']['links']['related']['meta']['count'] == 1
# test_registration_shows_specific_related_counts
url = '/{}registrations/{}/?related_counts=children'.format(
API_BASE, private_registration._id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0
assert res.json['data']['relationships']['contributors']['links']['related']['meta'] == {}
# test_hide_if_registration
# Registrations are a HideIfRegistration field
node_url = '/{}nodes/{}/'.format(API_BASE, private_project._id)
res = app.get(node_url, auth=user.auth)
assert res.status_code == 200
assert 'registrations' in res.json['data']['relationships']
res = app.get(private_url, auth=user.auth)
assert res.status_code == 200
assert 'registrations' not in res.json['data']['relationships']
@pytest.mark.django_db
class TestRegistrationUpdate:
@pytest.fixture()
def read_only_contributor(self):
return AuthUserFactory()
@pytest.fixture()
def read_write_contributor(self):
return AuthUserFactory()
@pytest.fixture()
def registration_approval(self, user):
return RegistrationApprovalFactory(
state='unapproved', approve=False, user=user)
@pytest.fixture()
def unapproved_registration(self, registration_approval):
return Registration.objects.get(
registration_approval=registration_approval)
@pytest.fixture()
def unapproved_url(self, unapproved_registration):
return '/{}registrations/{}/'.format(
API_BASE, unapproved_registration._id)
@pytest.fixture()
def public_project(self, user):
return ProjectFactory(
title='Public Project',
is_public=True,
creator=user)
@pytest.fixture()
def private_project(self, user):
return ProjectFactory(title='Private Project', creator=user)
@pytest.fixture()
def public_registration(self, user, public_project):
return RegistrationFactory(
project=public_project,
creator=user,
is_public=True)
@pytest.fixture()
def private_registration(
self, user, private_project, read_only_contributor,
read_write_contributor):
private_registration = RegistrationFactory(
project=private_project, creator=user)
private_registration.add_contributor(
read_only_contributor, permissions=[
permissions.READ])
private_registration.add_contributor(
read_write_contributor, permissions=[
permissions.WRITE])
private_registration.save()
return private_registration
@pytest.fixture()
def public_url(self, public_registration):
return '/{}registrations/{}/'.format(API_BASE, public_registration._id)
@pytest.fixture()
def private_url(self, private_registration):
return '/{}registrations/{}/'.format(
API_BASE, private_registration._id)
@pytest.fixture()
def attributes(self):
return {'public': True}
@pytest.fixture()
def make_payload(self, private_registration, attributes):
def payload(
id=private_registration._id,
type='registrations',
attributes=attributes
):
return {
'data': {
'id': id,
'type': type,
'attributes': attributes
}
}
return payload
def test_update_registration(
self, app, user, read_only_contributor,
read_write_contributor, public_registration,
public_url, private_url, make_payload):
private_registration_payload = make_payload()
non_contributor = AuthUserFactory()
# test_update_private_registration_logged_out
res = app.put_json_api(
private_url,
private_registration_payload,
expect_errors=True)
assert res.status_code == 401
# test_update_private_registration_logged_in_admin
res = app.put_json_api(
private_url,
private_registration_payload,
auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['public'] is True
# test_update_private_registration_logged_in_read_only_contributor
res = app.put_json_api(
private_url,
private_registration_payload,
auth=read_only_contributor.auth,
expect_errors=True)
assert res.status_code == 403
# test_update_private_registration_logged_in_read_write_contributor
res = app.put_json_api(
private_url,
private_registration_payload,
auth=read_write_contributor.auth,
expect_errors=True)
assert res.status_code == 403
# test_update_public_registration_to_private
public_to_private_payload = make_payload(
id=public_registration._id, attributes={'public': False})
res = app.put_json_api(
public_url,
public_to_private_payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Registrations can only be turned from private to public.'
res = app.put_json_api(
public_url,
public_to_private_payload,
auth=non_contributor.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'You do not have permission to perform this action.'
def test_fields(
self, app, user, public_registration,
private_registration, public_url,
private_url, make_payload):
# test_public_field_has_invalid_value
invalid_public_payload = make_payload(
id=public_registration._id,
attributes={'public': 'Dr.Strange'})
res = app.put_json_api(
public_url,
invalid_public_payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == '"Dr.Strange" is not a valid boolean.'
# test_fields_other_than_public_are_ignored
attribute_list = {
'public': True,
'category': 'instrumentation',
'title': 'New title',
'description': 'New description'
}
verbose_private_payload = make_payload(attributes=attribute_list)
res = app.put_json_api(
private_url,
verbose_private_payload,
auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['public'] is True
assert res.json['data']['attributes']['category'] == 'project'
assert res.json['data']['attributes']['description'] == private_registration.description
assert res.json['data']['attributes']['title'] == private_registration.title
# test_type_field_must_match
node_type_payload = make_payload(type='node')
res = app.put_json_api(
private_url,
node_type_payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 409
# test_id_field_must_match
mismatch_id_payload = make_payload(id='12345')
res = app.put_json_api(
private_url,
mismatch_id_payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 409
def test_turning_private_registrations_public(
self, app, user, make_payload):
private_project = ProjectFactory(creator=user, is_public=False)
private_registration = RegistrationFactory(
project=private_project, creator=user, is_public=False)
private_to_public_payload = make_payload(id=private_registration._id)
url = '/{}registrations/{}/'.format(API_BASE, private_registration._id)
res = app.put_json_api(url, private_to_public_payload, auth=user.auth)
assert res.json['data']['attributes']['public'] is True
private_registration.reload()
assert private_registration.is_public
def test_registration_fields_are_read_only(self):
writeable_fields = [
'type',
'public',
'draft_registration',
'registration_choice',
'lift_embargo',
'tags',
'custom_citation']
for field in RegistrationSerializer._declared_fields:
reg_field = RegistrationSerializer._declared_fields[field]
if field not in writeable_fields:
assert getattr(reg_field, 'read_only', False) is True
def test_registration_detail_fields_are_read_only(self):
writeable_fields = [
'type',
'public',
'draft_registration',
'registration_choice',
'lift_embargo',
'tags',
'custom_citation']
for field in RegistrationDetailSerializer._declared_fields:
reg_field = RegistrationSerializer._declared_fields[field]
if field not in writeable_fields:
assert getattr(reg_field, 'read_only', False) is True
def test_user_cannot_delete_registration(self, app, user, private_url):
res = app.delete_json_api(
private_url,
expect_errors=True,
auth=user.auth)
assert res.status_code == 405
def test_make_public_unapproved_registration_raises_error(
self, app, user, unapproved_registration, unapproved_url, make_payload):
attribute_list = {
'public': True,
'withdrawn': True
}
unapproved_registration_payload = make_payload(
id=unapproved_registration._id, attributes=attribute_list)
res = app.put_json_api(
unapproved_url,
unapproved_registration_payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'An unapproved registration cannot be made public.'
@pytest.mark.django_db
class TestRegistrationTags:
@pytest.fixture()
def user_admin(self):
return AuthUserFactory()
@pytest.fixture()
def user_read_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project_public(self, user_admin, user_read_contrib):
project_public = ProjectFactory(
title='Project One',
is_public=True,
creator=user_admin)
project_public.add_contributor(
user_admin,
permissions=permissions.CREATOR_PERMISSIONS,
save=True)
project_public.add_contributor(
user_read_contrib,
permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS,
save=True)
return project_public
@pytest.fixture()
def registration_public(self, project_public, user_admin):
return RegistrationFactory(
project=project_public,
creator=user_admin,
is_public=True)
@pytest.fixture()
def registration_private(self, project_public, user_admin):
return RegistrationFactory(
project=project_public,
creator=user_admin,
is_public=False)
@pytest.fixture()
def registration_withdrawn(self, project_public, user_admin):
return RegistrationFactory(
project=project_public,
creator=user_admin,
is_public=True)
@pytest.fixture()
def withdrawn_registration(self, registration_withdrawn, user_admin):
registration_withdrawn.add_tag(
'existing-tag', auth=Auth(user=user_admin))
registration_withdrawn.save()
withdrawn_registration = WithdrawnRegistrationFactory(
registration=registration_withdrawn, user=user_admin)
withdrawn_registration.justification = 'We made a major error.'
withdrawn_registration.save()
return withdrawn_registration
@pytest.fixture()
def url_registration_public(self, registration_public):
return '/{}registrations/{}/'.format(
API_BASE, registration_public._id)
@pytest.fixture()
def url_registration_private(self, registration_private):
return '/{}registrations/{}/'.format(
API_BASE, registration_private._id)
@pytest.fixture()
def url_registration_withdrawn(
self, registration_withdrawn, withdrawn_registration):
return '/{}registrations/{}/'.format(
API_BASE, registration_withdrawn._id)
@pytest.fixture()
def new_tag_payload_public(self, registration_public):
return {
'data': {
'id': registration_public._id,
'type': 'registrations',
'attributes': {
'tags': ['new-tag'],
}
}
}
@pytest.fixture()
def new_tag_payload_private(self, registration_private):
return {
'data': {
'id': registration_private._id,
'type': 'registrations',
'attributes': {
'tags': ['new-tag'],
}
}
}
@pytest.fixture()
def new_tag_payload_withdrawn(self, registration_withdrawn):
return {
'data': {
'id': registration_withdrawn._id,
'type': 'registrations',
'attributes': {
'tags': ['new-tag', 'existing-tag'],
}
}
}
def test_registration_tags(
self, app, registration_public, registration_private,
url_registration_public, url_registration_private,
new_tag_payload_public, new_tag_payload_private,
user_admin, user_non_contrib):
# test_registration_starts_with_no_tags
res = app.get(url_registration_public)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 0
# test_registration_does_not_expose_system_tags
registration_public.add_system_tag('systag', save=True)
res = app.get(url_registration_public)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 0
# test_contributor_can_add_tag_to_public_registration
with assert_latest_log(NodeLog.TAG_ADDED, registration_public):
res = app.patch_json_api(
url_registration_public,
new_tag_payload_public,
auth=user_admin.auth)
assert res.status_code == 200
# Ensure data is correct from the PATCH response
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'new-tag'
# Ensure data is correct in the database
registration_public.reload()
assert registration_public.tags.count() == 1
assert registration_public.tags.first()._id == 'new-tag'
# Ensure data is correct when GETting the resource again
reload_res = app.get(url_registration_public)
assert len(reload_res.json['data']['attributes']['tags']) == 1
assert reload_res.json['data']['attributes']['tags'][0] == 'new-tag'
# test_contributor_can_add_tag_to_private_registration
with assert_latest_log(NodeLog.TAG_ADDED, registration_private):
res = app.patch_json_api(
url_registration_private,
new_tag_payload_private,
auth=user_admin.auth)
assert res.status_code == 200
# Ensure data is correct from the PATCH response
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'new-tag'
# Ensure data is correct in the database
registration_private.reload()
assert registration_private.tags.count() == 1
assert registration_private.tags.first()._id == 'new-tag'
# Ensure data is correct when GETting the resource again
reload_res = app.get(
url_registration_private,
auth=user_admin.auth)
assert len(reload_res.json['data']['attributes']['tags']) == 1
assert reload_res.json['data']['attributes']['tags'][0] == 'new-tag'
# test_non_contributor_cannot_add_tag_to_registration
res = app.patch_json_api(
url_registration_public,
new_tag_payload_public,
expect_errors=True,
auth=user_non_contrib.auth)
assert res.status_code == 403
# test_partial_update_registration_does_not_clear_tagsb
new_payload = {
'data': {
'id': registration_private._id,
'type': 'registrations',
'attributes': {
'public': True
}
}
}
res = app.patch_json_api(
url_registration_private,
new_payload,
auth=user_admin.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 1
def test_tags_add_and_remove_properly(
self, app, user_admin, registration_public,
new_tag_payload_public, url_registration_public):
with assert_latest_log(NodeLog.TAG_ADDED, registration_public):
res = app.patch_json_api(
url_registration_public,
new_tag_payload_public,
auth=user_admin.auth)
assert res.status_code == 200
# Ensure adding tag data is correct from the PATCH response
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'new-tag'
with assert_latest_log(NodeLog.TAG_REMOVED, registration_public), assert_latest_log(NodeLog.TAG_ADDED, registration_public, 1):
# Ensure removing and adding tag data is correct from the PATCH
# response
res = app.patch_json_api(
url_registration_public,
{
'data': {
'id': registration_public._id,
'type': 'registrations',
'attributes': {'tags': ['newer-tag']}
}
}, auth=user_admin.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'newer-tag'
with assert_latest_log(NodeLog.TAG_REMOVED, registration_public):
# Ensure removing tag data is correct from the PATCH response
res = app.patch_json_api(
url_registration_public,
{
'data': {
'id': registration_public._id,
'type': 'registrations',
'attributes': {'tags': []}
}
}, auth=user_admin.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 0
def test_tags_for_withdrawn_registration(
self, app, registration_withdrawn, user_admin,
url_registration_withdrawn, new_tag_payload_withdrawn):
res = app.patch_json_api(
url_registration_withdrawn,
new_tag_payload_withdrawn,
auth=user_admin.auth,
expect_errors=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == 'Cannot add tags to withdrawn registrations.'
res = app.patch_json_api(
url_registration_withdrawn,
{
'data': {
'id': registration_withdrawn._id,
'type': 'registrations',
'attributes': {'tags': []}
}
},
auth=user_admin.auth,
expect_errors=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == 'Cannot remove tags of withdrawn registrations.'
|
|
#!/usr/bin/env python
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility for diff'ing two versions of the DB schema.
Each release cycle the plan is to compact all of the migrations from that
release into a single file. This is a manual and, unfortunately, error-prone
process. To ensure that the schema doesn't change, this tool can be used to
diff the compacted DB schema to the original, uncompacted form.
The schema versions are specified by providing a git ref (a branch name or
commit hash) and a SQLAlchemy-Migrate version number:
Run like:
./tools/db/schema_diff.py mysql master:latest my_branch:82
"""
from __future__ import print_function
import datetime
import glob
import os
import subprocess
import sys
from nova.openstack.common.gettextutils import _
### Dump
def dump_db(db_driver, db_name, migration_version, dump_filename):
db_driver.create(db_name)
try:
migrate(db_driver, db_name, migration_version)
db_driver.dump(db_name, dump_filename)
finally:
db_driver.drop(db_name)
### Diff
def diff_files(filename1, filename2):
pipeline = ['diff -U 3 %(filename1)s %(filename2)s' % locals()]
# Use colordiff if available
if subprocess.call(['which', 'colordiff']) == 0:
pipeline.append('colordiff')
pipeline.append('less -R')
cmd = ' | '.join(pipeline)
subprocess.check_call(cmd, shell=True)
### Database
class MySQL(object):
def create(self, name):
subprocess.check_call(['mysqladmin', '-u', 'root', 'create', name])
def drop(self, name):
subprocess.check_call(['mysqladmin', '-f', '-u', 'root', 'drop', name])
def dump(self, name, dump_filename):
subprocess.check_call(
'mysqldump -u root %(name)s > %(dump_filename)s' % locals(),
shell=True)
def url(self, name):
return 'mysql://root@localhost/%s' % name
class Postgres(object):
def create(self, name):
subprocess.check_call(['createdb', name])
def drop(self, name):
subprocess.check_call(['dropdb', name])
def dump(self, name, dump_filename):
subprocess.check_call(
'pg_dump %(name)s > %(dump_filename)s' % locals(),
shell=True)
def url(self, name):
return 'postgresql://localhost/%s' % name
def _get_db_driver_class(db_type):
if db_type == "mysql":
return MySQL
elif db_type == "postgres":
return Postgres
else:
raise Exception(_("database %s not supported") % db_type)
### Migrate
MIGRATE_REPO = os.path.join(os.getcwd(), "nova/db/sqlalchemy/migrate_repo")
def migrate(db_driver, db_name, migration_version):
earliest_version = _migrate_get_earliest_version()
# NOTE(sirp): sqlalchemy-migrate currently cannot handle the skipping of
# migration numbers.
_migrate_cmd(
db_driver, db_name, 'version_control', str(earliest_version - 1))
upgrade_cmd = ['upgrade']
if migration_version != 'latest':
upgrade_cmd.append(str(migration_version))
_migrate_cmd(db_driver, db_name, *upgrade_cmd)
def _migrate_cmd(db_driver, db_name, *cmd):
manage_py = os.path.join(MIGRATE_REPO, 'manage.py')
args = ['python', manage_py]
args += cmd
args += ['--repository=%s' % MIGRATE_REPO,
'--url=%s' % db_driver.url(db_name)]
subprocess.check_call(args)
def _migrate_get_earliest_version():
versions_glob = os.path.join(MIGRATE_REPO, 'versions', '???_*.py')
versions = []
for path in glob.iglob(versions_glob):
filename = os.path.basename(path)
prefix = filename.split('_', 1)[0]
try:
version = int(prefix)
except ValueError:
pass
versions.append(version)
versions.sort()
return versions[0]
### Git
def git_current_branch_name():
ref_name = git_symbolic_ref('HEAD', quiet=True)
current_branch_name = ref_name.replace('refs/heads/', '')
return current_branch_name
def git_symbolic_ref(ref, quiet=False):
args = ['git', 'symbolic-ref', ref]
if quiet:
args.append('-q')
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
return stdout.strip()
def git_checkout(branch_name):
subprocess.check_call(['git', 'checkout', branch_name])
def git_has_uncommited_changes():
return subprocess.call(['git', 'diff', '--quiet', '--exit-code']) == 1
### Command
def die(msg):
print("ERROR: %s" % msg, file=sys.stderr)
sys.exit(1)
def usage(msg=None):
if msg:
print("ERROR: %s" % msg, file=sys.stderr)
prog = "schema_diff.py"
args = ["<mysql|postgres>", "<orig-branch:orig-version>",
"<new-branch:new-version>"]
print("usage: %s %s" % (prog, ' '.join(args)), file=sys.stderr)
sys.exit(1)
def parse_options():
try:
db_type = sys.argv[1]
except IndexError:
usage("must specify DB type")
try:
orig_branch, orig_version = sys.argv[2].split(':')
except IndexError:
usage('original branch and version required (e.g. master:82)')
try:
new_branch, new_version = sys.argv[3].split(':')
except IndexError:
usage('new branch and version required (e.g. master:82)')
return db_type, orig_branch, orig_version, new_branch, new_version
def main():
timestamp = datetime.datetime.utcnow().strftime("%Y%m%d_%H%M%S")
ORIG_DB = 'orig_db_%s' % timestamp
NEW_DB = 'new_db_%s' % timestamp
ORIG_DUMP = ORIG_DB + ".dump"
NEW_DUMP = NEW_DB + ".dump"
options = parse_options()
db_type, orig_branch, orig_version, new_branch, new_version = options
# Since we're going to be switching branches, ensure user doesn't have any
# uncommited changes
if git_has_uncommited_changes():
die("You have uncommited changes. Please commit them before running "
"this command.")
db_driver = _get_db_driver_class(db_type)()
users_branch = git_current_branch_name()
git_checkout(orig_branch)
try:
# Dump Original Schema
dump_db(db_driver, ORIG_DB, orig_version, ORIG_DUMP)
# Dump New Schema
git_checkout(new_branch)
dump_db(db_driver, NEW_DB, new_version, NEW_DUMP)
diff_files(ORIG_DUMP, NEW_DUMP)
finally:
git_checkout(users_branch)
if os.path.exists(ORIG_DUMP):
os.unlink(ORIG_DUMP)
if os.path.exists(NEW_DUMP):
os.unlink(NEW_DUMP)
if __name__ == "__main__":
main()
|
|
import logging
import json
import textwrap
from json.encoder import JSONEncoder
from logging import StreamHandler, Formatter, FileHandler
from ethereum.utils import bcolors, is_numeric
import sys
DEFAULT_LOGLEVEL = 'INFO'
JSON_FORMAT = '%(message)s'
PRINT_FORMAT = '%(levelname)s:%(name)s\t%(message)s'
FILE_PREFIX = '%(asctime)s'
TRACE = 5
known_loggers = set()
log_listeners = []
def _inject_into_logger(name, code, namespace=None):
# This is a hack to fool the logging module into reporting correct source files.
# It determines the actual source of a logging call by inspecting the stack frame's
# source file. So we use this `eval(compile())` construct to "inject" our additional
# methods into the logging module.
if namespace is None:
namespace = {}
eval(
compile(
code,
logging._srcfile,
'exec'
),
namespace
)
setattr(logging.Logger, name, namespace[name])
# Add `trace()` level to Logger
_inject_into_logger(
'trace',
textwrap.dedent(
"""\
def trace(self, msg, *args, **kwargs):
if self.isEnabledFor(TRACE):
self._log(TRACE, msg, args, **kwargs)
"""
),
{'TRACE': TRACE}
)
logging.TRACE = TRACE
logging.addLevelName(TRACE, "TRACE")
# Add `DEV()` shortcut to loggers
_inject_into_logger(
'DEV',
textwrap.dedent(
"""\
def DEV(self, msg, *args, **kwargs):
'''Shortcut to output highlighted log text'''
kwargs['highlight'] = True
self.critical(msg, *args, **kwargs)
"""
)
)
class LogRecorder(object):
"""
temporarily records all logs, w/o level filtering
use only once!
"""
max_capacity = 1000 * 1000 # check we are not forgotten or abused
def __init__(self, disable_other_handlers=False, log_config=None):
self._records = []
log_listeners.append(self._add_log_record)
self._saved_config = None
if log_config:
self._saved_config = get_configuration()
configure(log_config)
self._saved_handlers = []
if disable_other_handlers:
self._saved_handlers = rootLogger.handlers[:]
rootLogger.handlers = []
def pop_records(self):
# only returns records on the first call
r = self._records[:]
self._records = []
try:
log_listeners.remove(self._add_log_record)
except ValueError:
pass
if self._saved_config:
configure(**self._saved_config)
self._saved_config = None
if self._saved_handlers:
rootLogger.handlers = self._saved_handlers[:]
self._saved_handlers = []
return r
def _add_log_record(self, msg):
self._records.append(msg)
assert len(self._records) < self.max_capacity
def get_configuration():
"""
get a configuration (snapshot) that can be used to call configure
snapshot = get_configuration()
configure(**snapshot)
"""
root = getLogger()
name_levels = [('', logging.getLevelName(root.level))]
name_levels.extend(
(name, logging.getLevelName(logger.level))
for name, logger
in root.manager.loggerDict.items()
if hasattr(logger, 'level')
)
config_string = ','.join('%s:%s' % x for x in name_levels)
return dict(config_string=config_string, log_json=SLogger.manager.log_json)
def get_logger_names():
return sorted(known_loggers, key=lambda x: '' if not x else x)
class BoundLogger(object):
def __init__(self, logger, context):
self.logger = logger
self.context = context
def bind(self, **kwargs):
return BoundLogger(self, kwargs)
def _proxy(self, method_name, *args, **kwargs):
context = self.context.copy()
context.update(kwargs)
return getattr(self.logger, method_name)(*args, **context)
trace = lambda self, *args, **kwargs: self._proxy('trace', *args, **kwargs)
debug = lambda self, *args, **kwargs: self._proxy('debug', *args, **kwargs)
info = lambda self, *args, **kwargs: self._proxy('info', *args, **kwargs)
warn = warning = lambda self, * \
args, **kwargs: self._proxy('warning', *args, **kwargs)
error = lambda self, *args, **kwargs: self._proxy('error', *args, **kwargs)
exception = lambda self, * \
args, **kwargs: self._proxy('exception', *args, **kwargs)
fatal = critical = lambda self, * \
args, **kwargs: self._proxy('critical', *args, **kwargs)
class _LogJSONEncoder(JSONEncoder):
def default(self, o):
return repr(o)
class SLogger(logging.Logger):
def __init__(self, name, level=DEFAULT_LOGLEVEL):
self.warn = self.warning
super(SLogger, self).__init__(name, level=level)
@property
def log_json(self):
return SLogger.manager.log_json
def is_active(self, level_name='trace'):
return self.isEnabledFor(logging._checkLevel(level_name.upper()))
def format_message(self, msg, kwargs, highlight, level):
if getattr(self, 'log_json', False):
message = dict()
message['event'] = '{}.{}'.format(
self.name, msg.lower().replace(' ', '_'))
message['level'] = logging.getLevelName(level)
try:
message.update(kwargs)
try:
msg = json.dumps(message, cls=_LogJSONEncoder)
except TypeError:
# Invalid value. With our custom encoder this can only happen with non-string
# dict keys (see: https://bugs.python.org/issue18820).
message = _stringify_dict_keys(message)
msg = json.dumps(message, cls=_LogJSONEncoder)
except UnicodeDecodeError:
message.update({
k: v if is_numeric(v) or isinstance(v, (float, complex)) else repr(v)
for k, v in kwargs.items()
})
msg = json.dumps(message, cls=_LogJSONEncoder)
else:
msg = "{}{} {}{}".format(
bcolors.WARNING if highlight else "",
msg,
" ".join("{}={!s}".format(k, v) for k, v in kwargs.items()),
bcolors.ENDC if highlight else ""
)
return msg
def bind(self, **kwargs):
return BoundLogger(self, kwargs)
def _log(self, level, msg, args, **kwargs):
exc_info = kwargs.pop('exc_info', None)
extra = kwargs.pop('extra', {})
highlight = kwargs.pop('highlight', False)
extra['kwargs'] = kwargs
extra['original_msg'] = msg
msg = self.format_message(msg, kwargs, highlight, level)
super(SLogger, self)._log(level, msg, args, exc_info, extra)
class RootLogger(SLogger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
super(RootLogger, self).__init__("root", level)
def handle(self, record):
if log_listeners:
rec_dict = getattr(record, 'kwargs', {}).copy()
rec_dict['event'] = getattr(record, 'original_msg', "")
for listener in log_listeners:
listener(rec_dict)
super(RootLogger, self).handle(record)
class SManager(logging.Manager):
def __init__(self, rootnode):
self.loggerClass = SLogger
self.log_json = False
super(SManager, self).__init__(rootnode)
def getLogger(self, name):
logging.setLoggerClass(SLogger)
return super(SManager, self).getLogger(name)
rootLogger = RootLogger(DEFAULT_LOGLEVEL)
SLogger.root = rootLogger
SLogger.manager = SManager(SLogger.root)
def _stringify_dict_keys(input_):
if isinstance(input_, dict):
res = {}
for k, v in input_.items():
v = _stringify_dict_keys(v)
if not isinstance(k, (int, long, bool, None.__class__)):
k = str(k)
res[k] = v
elif isinstance(input_, (list, tuple)):
res = input_.__class__([_stringify_dict_keys(i) for i in input_])
else:
res = input_
return res
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
logger = SLogger.manager.getLogger(name)
return logger
else:
return rootLogger
def configure(config_string=None, log_json=False, log_file=None):
if not config_string:
config_string = ":{}".format(DEFAULT_LOGLEVEL)
if log_json:
SLogger.manager.log_json = True
log_format = JSON_FORMAT
else:
SLogger.manager.log_json = False
log_format = PRINT_FORMAT
if len(rootLogger.handlers) == 0:
#handler = StreamHandler()
handler = StreamHandler(sys.stdout)
formatter = Formatter(log_format)
handler.setFormatter(formatter)
rootLogger.addHandler(handler)
if log_file:
if not any(isinstance(hndlr, FileHandler)
for hndlr in rootLogger.handlers):
handler = FileHandler(log_file)
formatter = Formatter("{} {}".format(FILE_PREFIX, log_format))
handler.setFormatter(formatter)
rootLogger.addHandler(handler)
# Reset logging levels before applying new config below
for name, logger in SLogger.manager.loggerDict.items():
if hasattr(logger, 'setLevel'):
# Guard against `logging.PlaceHolder` instances
logger.setLevel(logging.NOTSET)
if config_string == ":{}".format(DEFAULT_LOGLEVEL):
logger.propagate = True
else:
logger.propagate = True
for name_levels in config_string.split(','):
name, _, level = name_levels.partition(':')
logger = getLogger(name)
logger.setLevel(level.upper())
logger.propagate = True
configure_logging = configure
def set_level(name, level):
assert not isinstance(level, int)
logger = getLogger(name)
logger.setLevel(getattr(logging, level.upper()))
def get_logger(name=None):
known_loggers.add(name)
return getLogger(name)
def DEBUG(msg, *args, **kwargs):
"""temporary logger during development that is always on"""
logger = getLogger("DEBUG")
if len(logger.handlers) == 0:
logger.addHandler(StreamHandler())
logger.propagate = False
logger.setLevel(logging.DEBUG)
logger.DEV(msg, *args, **kwargs)
|
|
#!/bin/env python
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
# $Id: gen_junit_report.py 1141953 2011-07-01 14:42:56Z rhuijben $
"""
gen_junit_report.py -- The script is to generate the junit report for
Subversion tests. The script uses the log file, tests.log created by
"make check" process. It parses the log file and generate the junit
files for each test separately in the specified output directory. The
script can take --log-file and --output-dir arguments.
"""
import sys
import os
import getopt
def replace_from_map(data, encode):
"""replace substrings in DATA with replacements defined in ENCODING"""
for pattern, replacement in encode.items():
data = data.replace(pattern, replacement)
return data
xml_encode_map = {
'&': '&',
'<': '<',
'>': '>',
'"': '"',
"'": ''',
}
def xml_encode(data):
"""encode the xml characters in the data"""
return replace_from_map(data, xml_encode_map)
special_encode_map = {
']]>': ']]]]><![CDATA[>', # CDATA terminator sequence
'\000': '␀', # U+2400 SYMBOL FOR NULL
'\001': '␁', # U+2401 SYMBOL FOR START OF HEADING
'\002': '␂', # U+2402 SYMBOL FOR START OF TEXT
'\003': '␃', # U+2403 SYMBOL FOR END OF TEXT
'\004': '␄', # U+2404 SYMBOL FOR END OF TRANSMISSION
'\005': '␅', # U+2405 SYMBOL FOR ENQUIRY
'\006': '␆', # U+2406 SYMBOL FOR ACKNOWLEDGE
'\007': '␇', # U+2407 SYMBOL FOR BELL
'\010': '␈', # U+2408 SYMBOL FOR BACKSPACE
'\011': '␉', # U+2409 SYMBOL FOR HORIZONTAL TABULATION
#'\012': '␊', # U+240A SYMBOL FOR LINE FEED
'\013': '␋', # U+240B SYMBOL FOR VERTICAL TABULATION
'\014': '␌', # U+240C SYMBOL FOR FORM FEED
#'\015': '␍', # U+240D SYMBOL FOR CARRIAGE RETURN
'\016': '␎', # U+240E SYMBOL FOR SHIFT OUT
'\017': '␏', # U+240F SYMBOL FOR SHIFT IN
'\020': '␐', # U+2410 SYMBOL FOR DATA LINK ESCAPE
'\021': '␑', # U+2411 SYMBOL FOR DEVICE CONTROL ONE
'\022': '␒', # U+2412 SYMBOL FOR DEVICE CONTROL TWO
'\023': '␓', # U+2413 SYMBOL FOR DEVICE CONTROL THREE
'\024': '␔', # U+2414 SYMBOL FOR DEVICE CONTROL FOUR
'\025': '␕', # U+2415 SYMBOL FOR NEGATIVE ACKNOWLEDGE
'\026': '␖', # U+2416 SYMBOL FOR SYNCHRONOUS IDLE
'\027': '␗', # U+2417 SYMBOL FOR END OF TRAMSNISSION BLOCK
'\030': '␘', # U+2418 SYMBOL FOR CANCEL
'\031': '␙', # U+2419 SYMBOL FOR END OF MEDIUM
'\032': '␚', # U+241A SYMBOL FOR SUBSTITUTE
'\033': '␛', # U+241B SYMBOL FOR ESCAPE
'\034': '␜', # U+241C SYMBOL FOR FILE SEPARATOR
'\035': '␝', # U+241D SYMBOL FOR GROUP SEPARATOR
'\036': '␞', # U+241E SYMBOL FOR RECORD SEPARATOR
'\037': '␟', # U+241F SYMBOL FOR UNIT SEPARATOR
}
def escape_special_characters(data):
"""remove special characters in test failure reasons"""
if data:
data = replace_from_map(data, special_encode_map)
return data
def start_junit():
"""define the beginning of xml document"""
head = """<?xml version="1.0" encoding="UTF-8"?>"""
return head
def start_testsuite(test_name):
"""start testsuite. The value for the attributes are replaced later
when the junit file handling is concluded"""
sub_test_name = test_name.replace('.', '-')
start = """<testsuite time="ELAPSED_%s" tests="TOTAL_%s" name="%s"
failures="FAIL_%s" errors="FAIL_%s" skipped="SKIP_%s">""" % \
(test_name, test_name, sub_test_name, test_name, test_name, test_name)
return start
def junit_testcase_ok(test_name, casename):
"""mark the test case as PASSED"""
casename = xml_encode(casename)
sub_test_name = test_name.replace('.', '-')
case = """<testcase time="ELAPSED_CASE_%s" name="%s" classname="%s"/>""" % \
(test_name, casename, sub_test_name)
return case
def junit_testcase_fail(test_name, casename, reason=None):
"""mark the test case as FAILED"""
casename = xml_encode(casename)
sub_test_name = test_name.replace('.', '-')
reason = escape_special_characters(reason)
case = """<testcase time="ELAPSED_CASE_%s" name="%s" classname="%s">
<failure type="Failed"><![CDATA[%s]]></failure>
</testcase>""" % (test_name, casename, sub_test_name, reason)
return case
def junit_testcase_xfail(test_name, casename, reason=None):
"""mark the test case as XFAILED"""
casename = xml_encode(casename)
sub_test_name = test_name.replace('.', '-')
reason = escape_special_characters(reason)
case = """<testcase time="ELAPSED_CASE_%s" name="%s" classname="%s">
<system-out><![CDATA[%s]]></system-out>
</testcase>""" % (test_name, casename, sub_test_name, reason)
return case
def junit_testcase_skip(test_name, casename):
"""mark the test case as SKIPPED"""
casename = xml_encode(casename)
sub_test_name = test_name.replace('.', '-')
case = """<testcase time="ELAPSED_CASE_%s" name="%s" classname="%s">
<skipped message="Skipped"/>
</testcase>""" % (test_name, casename, sub_test_name)
return case
def end_testsuite():
"""mark the end of testsuite"""
end = """</testsuite>"""
return end
def update_stat(test_name, junit, count):
"""update the test statistics in the junit string"""
junit_str = '\n'.join(junit)
t_count = count[test_name]
total = float(t_count['pass'] + t_count['fail'] + t_count['skip'])
elapsed = float(t_count['elapsed'])
case_time = 0
if total > 0: # there are tests with no test cases
case_time = elapsed/total
total_patt = 'TOTAL_%s' % test_name
fail_patt = 'FAIL_%s' % test_name
skip_patt = 'SKIP_%s' % test_name
elapsed_patt = 'ELAPSED_%s' % test_name
elapsed_case_patt = 'ELAPSED_CASE_%s' % test_name
# replace the pattern in junit string with actual statistics
junit_str = junit_str.replace(total_patt, "%s" % total)
junit_str = junit_str.replace(fail_patt, "%s" % t_count['fail'])
junit_str = junit_str.replace(skip_patt, "%s" % t_count['skip'])
junit_str = junit_str.replace(elapsed_patt, "%.3f" % elapsed)
junit_str = junit_str.replace(elapsed_case_patt, "%.3f" % case_time)
return junit_str
def main():
"""main method"""
try:
opts, args = getopt.getopt(sys.argv[1:], 'l:d:h',
['log-file=', 'output-dir=', 'help'])
except getopt.GetoptError, err:
usage(err)
log_file = None
output_dir = None
for opt, value in opts:
if (opt in ('-h', '--help')):
usage()
elif (opt in ('-l', '--log-file')):
log_file = value
elif (opt in ('-d', '--output-dir')):
output_dir = value
else:
usage('Unable to recognize option')
if not log_file or not output_dir:
usage("The options --log-file and --output-dir are mandatory")
# create junit output directory, if not exists
if not os.path.exists(output_dir):
print("Directory '%s' not exists, creating ..." % output_dir)
try:
os.makedirs(output_dir)
except OSError, err:
sys.stderr.write("ERROR: %s\n" % err)
sys.exit(1)
patterns = {
'start' : 'START:',
'end' : 'END:',
'pass' : 'PASS:',
'skip' : 'SKIP:',
'fail' : 'FAIL:',
'xfail' : 'XFAIL:',
'elapsed' : 'ELAPSED:'
}
junit = []
junit.append(start_junit())
reason = None
count = {}
fp = None
try:
fp = open(log_file, 'r')
except IOError, err:
sys.stderr.write("ERROR: %s\n" % err)
sys.exit(1)
for line in fp.readlines():
line = line.strip()
if line.startswith(patterns['start']):
reason = ""
test_name = line.split(' ')[1]
# replace '.' in test name with '_' to avoid confusing class
# name in test result displayed in the CI user interface
test_name.replace('.', '_')
count[test_name] = {
'pass' : 0,
'skip' : 0,
'fail' : 0,
'xfail' : 0,
'elapsed' : 0,
'total' : 0
}
junit.append(start_testsuite(test_name))
elif line.startswith(patterns['end']):
junit.append(end_testsuite())
elif line.startswith(patterns['pass']):
reason = ""
casename = line.strip(patterns['pass']).strip()
junit.append(junit_testcase_ok(test_name, casename))
count[test_name]['pass'] += 1
elif line.startswith(patterns['skip']):
reason = ""
casename = line.strip(patterns['skip']).strip()
junit.append(junit_testcase_skip(test_name, casename))
count[test_name]['skip'] += 1
elif line.startswith(patterns['fail']):
casename = line.strip(patterns['fail']).strip()
junit.append(junit_testcase_fail(test_name, casename, reason))
count[test_name]['fail'] += 1
reason = ""
elif line.startswith(patterns['xfail']):
casename = line.strip(patterns['xfail']).strip()
junit.append(junit_testcase_xfail(test_name, casename, reason))
count[test_name]['pass'] += 1
reason = ""
elif line.startswith(patterns['elapsed']):
reason = ""
elapsed = line.split(' ')[2].strip()
(hrs, mins, secs) = elapsed.split(':')
secs_taken = int(hrs)*24 + int(mins)*60 + float(secs)
count[test_name]['elapsed'] = secs_taken
junit_str = update_stat(test_name, junit, count)
test_junit_file = os.path.join(output_dir,
"%s.junit.xml" % test_name)
w_fp = open (test_junit_file, 'w')
w_fp.writelines(junit_str)
w_fp.close()
junit = []
elif len(line):
reason = "%s\n%s" % (reason, line)
fp.close()
def usage(errorMsg=None):
script_name = os.path.basename(sys.argv[0])
sys.stdout.write("""USAGE: %s: [--help|h] --log-file|l --output-dir|d
Options:
--help|-h Display help message
--log-file|l The log file to parse for generating junit xml files
--output-dir|d The directory to create the junit xml file for each
test
""" % script_name)
if errorMsg is not None:
sys.stderr.write("\nERROR: %s\n" % errorMsg)
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 5889
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
|
import traceback
import pyquery
from scrapy.conf import settings
from scrapy.spiders import CrawlSpider, Rule
from scrapy.exceptions import DropItem
from scrapy.link import Link
from src.items import NewsItem
from src.utils.database.quarantine import get_quarantine_database
class NewsSpider(CrawlSpider):
"""
Base News Spider which every spider should inherit from.
"""
def __init__(self):
super(NewsSpider, self).__init__()
if not hasattr(self, 'name'):
raise ValueError('name required')
if not hasattr(self, 'publisher'):
raise ValueError('publisher required')
if not hasattr(self, 'allowed_domains'):
raise ValueError('allowed_domains required')
self.log_exceptions = 0
self.codes = {}
self.scraped = 0
# reference to pyquery instance
self.pq = None
# default evaluation order for normal fields
self.field_order = [
('link', self.item_link),
('code', self.item_code),
('title', self.item_title),
('content', self.item_content),
('category', self.item_category),
('source', self.item_source),
('date_published', self.item_date_published),
('comments', self.item_comments),
('images', self.item_images),
('publisher', self.item_publisher)
]
# override crawling rules with only one URL?
if hasattr(self, 'crawl_only_url') and settings.get('DEBUG'):
class CustomLink(object):
def __init__(self, url):
self.url = url
def extract_links(self, response):
return [Link(url=self.url)]
self.rules = (
Rule(CustomLink(self.crawl_only_url),
callback='parse_item_wrapper'),
)
self._compile_rules()
def init_pyquery(self, response):
if response.body:
return pyquery.PyQuery(response.body)
return None
def parse_item(self, response):
pq = self.init_pyquery(response)
if pq:
self.pq = pq.clone()
# if the deriving class inherits from a mixin then we initialise
# the mixin in that way
if hasattr(self, 'init_mixin_response'):
self.init_mixin_response(response)
item = NewsItem()
exception = None
try:
for field_name, field_method in self.field_order:
item[field_name] = field_method(response)
yield item
except DropItem as exception:
# first parse all the variations and raise the Drop
# exceptions after
pass
# override to ensure consistency
self.pq = pq
if exception:
raise exception
def init_request(self):
self.log('Initialized ProductSpider')
return self.initialized()
def parse_item_wrapper(self, response):
"""Wrapper for parse_item enabling exception notifications."""
try:
item = self.parse_item(response)
return item
except Exception, ex:
url = None
if response.url:
url = response.url
quarantine_database = get_quarantine_database()
if quarantine_database and settings.get('QUARANTINE_MODE'):
e = {
'exception': str(type(ex)),
'stacktrace': traceback.format_exc(),
'link': url
}
quarantine_database.save_exception(e)
if settings.get('DEBUG'):
self.log('Spider Exception trying to parse: ' + url)
self.log(str(type(ex)) + " - " + traceback.format_exc())
if not isinstance(ex, DropItem):
self.log_exceptions += 1
raise
finally:
self.scraped += 1
def start_requests(self):
self._init()
return super(NewsSpider, self).start_requests()
def _init(self):
"""
Initializes spider.
"""
self.scraped = 0
def _conditional_override(self, method_name, *args, **kwargs):
"""
Checks whether `method_name` exists in a super class of
the current class. If so
call it with the given arguments in `*args`.
If the method does not exist return None.
If a child class inherits from another class which
implements those methods then it will use those instead.
"""
if not hasattr(super(NewsSpider, self), method_name):
return None
else:
method = getattr(super(NewsSpider, self), method_name)
return method(*args, **kwargs)
def item_link(self, response):
return response.url
def item_code(self, response):
"""Stub implementation of item_code."""
return self._conditional_override('item_code', response)
def item_title(self, response):
"""Stub implementation of item_title."""
return self._conditional_override('item_title', response)
def item_content(self, response):
"""Stub implementation of item_content."""
return self._conditional_override('item_content', response)
def item_category(self, response):
"""Stub implementation of item_category."""
return self._conditional_override('item_category', response)
def item_source(self, response):
"""Stub implementation of item_source."""
return self._conditional_override('item_source', response)
def item_date_published(self, response):
"""Stub implementation of item_date_published."""
return self._conditional_override('item_date_published', response)
def item_comments(self, response):
"""Stub implementation of item_comments."""
return self._conditional_override('item_comments', response)
def item_images(self, response):
"""Stub implementation of item_images."""
return self._conditional_override('item_images', response)
def item_publisher(self, response):
"""Stub implementation of item_publisher."""
publisher = self._conditional_override('item_publisher', response)
if publisher:
return publisher
return self.publisher
def build_item_code(self, product_code):
"""Used to populate self.codes."""
return ('{0}'.format(product_code)).lower()
|
|
#!/usr/bin/env python
import string
from kuralib import lngapp
class App:
def __init__(self):
self.tables = {}
def addDef(self, **args):
for (tableName, tableDef) in args.items():
#
# Store tableNama/table definition
#
tableDef.name=tableName
self.tables[tableName]=tableDef
#
# updating field labels that can be calculated
#
for (fieldname, field) in tableDef.orderedFieldList():
if field.label==None:
if fieldname[-2:]=="nr":
field.label=fieldname[:-2]
field.label=string.replace(fieldname, "_", " ")
field.label=field.label.capitalize()
def tableType(type):
if type == 0:
return "numeric primary key"
elif type == 1:
return "Recursive table with a numeric primary key"
elif type == 2:
return "Numeric primary key and a sequential counter"
elif type == 3:
return "Code table with a textual primary key"
elif type == 4:
return "System code table with a textual primary key"
else:
return "Unknown type"
def dataTypes(type):
if type == 0:
return "Integer"
elif type == 1:
return "String"
elif type == 2:
return "Text"
elif type == 3:
return "Boolean"
elif type == 4:
return "Date/time"
def buildFieldDef(fieldDef):
if fieldDef.pk:
return "%s, %i (pk)" % (dataTypes(fieldDef.datatype), fieldDef.length)
elif fieldDef.sequence:
return "%s, %i (seq)" % (dataTypes(fieldDef.datatype), fieldDef.length)
elif fieldDef.nullable:
return "%s, %i (null allowed)" % (dataTypes(fieldDef.datatype), fieldDef.length)
elif not fieldDef.owner:
return "%s, %i (lookup)" % (dataTypes(fieldDef.datatype), fieldDef.length)
def buildFields(tableDef):
r = [fieldHeader]
for field, fieldDef in tableDef.fields.items():
r.append(fieldRow % {"field": field,
"definition": buildFieldDef(fieldDef)})
r.append("</tbody></informaltable>")
return "\n".join(r)
def buildLookupTables(table, app):
if len(table.lookuptables) == 0:
return ""
r = ["""
<refsect1><title>Lookup tables (parents)</title>
<informaltable><tgroup cols="5">
<thead>
<row>
<entry>Name</entry>
<entry>Keypair</entry>
<entry>Descriptors</entry>
<entry>Related table</entry>
<entry>Related alias</entry></row>
<tbody>"""]
for table in table.lookuptables:
rel = app.relations[table]
r.append(""" <row>
<entry>%s</entry>
<entry>%s</entry>
<entry>%s</entry>
<entry><link linkend="%s">%s</link></entry>
<entry>%s</entry>
</row>""" %
(rel.name,
unicode(rel.keys),
unicode(rel.descriptors),
rel.rtable, rel.rtable,
rel.ralias))
r.append("""</tbody></tgroup></informaltable></refsect1>
""")
return "\n".join(r)
def buildChildRelations(table):
if len(table.childtables.keys()) == 0:
return ""
r = ["""<refsect1><title>Lookup tables (parents)</title>
<informaltable><tgroup cols="3">
<thead>
<row>
<entry>Childtable</entry>
<entry>Local key</entry>
<entry>Foreign key</entry>
</row>
<tbody>"""]
for child in table.childtables.values():
r.append(""" <row>
<entry><link linkend="%s">%s</link></entry>
<entry>%s</entry>
<entry>%s</entry>
</row>""" %
(child.childTable,
child.childTable,
child.keys.local,
child.keys.foreign))
r.append("""</tbody></tgroup></informaltable>
</refsect1>""")
return "\n".join(r)
def main():
app = App()
lngapp.setRepository(app)
keys = app.tables.keys()
keys.sort()
for key in keys:
table = app.tables[key]
print tableEntry % {"tablename": table.name,
"tablename": table.name,
"tablename": table.name,
"comment": table.comment,
"tabletype": tableType(table.tabletype),
"alias": table.alias,
"primarykey": table.primarykey,
"sequencebase": table.sequencebase,
"hint": table.hint,
"descriptors": ", ".join(table.descriptors),
"fieldorder": ", ".join(table.fieldOrder),
"fields": buildFields(table),
"indexes": ", ".join(table.indexes),
"uniqueIndexes": ", ".join(table.unique_indexes),
"childtables": buildChildRelations(table),
"lookuptables": buildLookupTables(table, app)}
fieldHeader = """
<informaltable>
<tgroup cols="2">
<thead>
<row><entry>Fieldname</entry><entry>Field definition</entry></row>
</thead>
<tbody>"""
fieldRow = """<row><entry>%(field)s</entry><entry>%(definition)s</entry></row>"""
tableEntry = """<refentry id="%(tablename)s">
<refmeta>
<refentrytitle>%(tablename)s</refentrytitle>
</refmeta>
<refnamediv>
<refname>%(tablename)s</refname>
<refpurpose>%(comment)s</refpurpose>
</refnamediv>
<refsect1>
<title>Attributes</title>
<informaltable>
<tgroup cols="2">
<tbody>
<row>
<entry>Table type</entry>
<entry>%(tabletype)s</entry>
</row>
<row>
<entry>Table alias</entry>
<entry>%(alias)s</entry>
</row>
<row>
<entry>Primary key field</entry>
<entry>%(primarykey)s</entry>
</row>
<row>
<entry>Sequence field</entry>
<entry>%(sequencebase)s</entry>
</row>
<row>
<entry>Hint</entry>
<entry>%(hint)s</entry>
</row>
<row>
<entry>Table described by</entry>
<entry>%(descriptors)s</entry>
</row>
<row>
<entry>Fieldorder</entry>
<entry>%(fieldorder)s</entry>
</row>
<row>
<entry>Indexes</entry>
<entry>%(indexes)s</entry>
</row>
<row>
<entry>Unique Indexes</entry>
<entry>%(uniqueIndexes)s</entry>
</row>
</tbody>
</tgroup>
</informaltable>
<refsect1><title>Fields</title>
%(fields)s
</refsect1>
%(childtables)s
%(lookuptables)s
</refentry>
"""
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test objects for interacting with a bitcoind node over the p2p protocol.
The P2PInterface objects interact with the bitcoind nodes under test using the
node's p2p interface. They can be used to send messages to the node, and
callbacks can be registered that execute when messages are received from the
node. Messages are sent to/received from the node on an asyncio event loop.
State held inside the objects must be guarded by the p2p_lock to avoid data
races between the main testing thread and the event loop.
P2PConnection: A low-level connection object to a node's P2P interface
P2PInterface: A high-level interface object for communicating to a node over P2P
P2PDataStore: A p2p interface class that keeps a store of transactions and blocks
and can respond correctly to getdata and getheaders messages
P2PTxInvStore: A p2p interface class that inherits from P2PDataStore, and keeps
a count of how many times each txid has been announced."""
import asyncio
from collections import defaultdict
from io import BytesIO
import logging
import struct
import sys
import threading
from test_framework.messages import (
CBlockHeader,
MAX_HEADERS_RESULTS,
msg_addr,
msg_addrv2,
msg_block,
MSG_BLOCK,
msg_blocktxn,
msg_cfcheckpt,
msg_cfheaders,
msg_cfilter,
msg_cmpctblock,
msg_feefilter,
msg_filteradd,
msg_filterclear,
msg_filterload,
msg_getaddr,
msg_getblocks,
msg_getblocktxn,
msg_getdata,
msg_getheaders,
msg_headers,
msg_inv,
msg_mempool,
msg_merkleblock,
msg_notfound,
msg_ping,
msg_pong,
msg_sendaddrv2,
msg_sendcmpct,
msg_sendheaders,
msg_tx,
MSG_TX,
MSG_TYPE_MASK,
msg_verack,
msg_version,
MSG_WTX,
msg_wtxidrelay,
NODE_NETWORK,
NODE_WITNESS,
sha256,
)
from test_framework.util import (
MAX_NODES,
p2p_port,
wait_until_helper,
)
logger = logging.getLogger("TestFramework.p2p")
# The minimum P2P version that this test framework supports
MIN_P2P_VERSION_SUPPORTED = 60001
# The P2P version that this test framework implements and sends in its `version` message
# Version 70016 supports wtxid relay
P2P_VERSION = 70016
# The services that this test framework offers in its `version` message
P2P_SERVICES = NODE_NETWORK | NODE_WITNESS
# The P2P user agent string that this test framework sends in its `version` message
P2P_SUBVERSION = "/python-p2p-tester:0.0.3/"
# Value for relay that this test framework sends in its `version` message
P2P_VERSION_RELAY = 1
MESSAGEMAP = {
b"addr": msg_addr,
b"addrv2": msg_addrv2,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cfcheckpt": msg_cfcheckpt,
b"cfheaders": msg_cfheaders,
b"cfilter": msg_cfilter,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
b"filteradd": msg_filteradd,
b"filterclear": msg_filterclear,
b"filterload": msg_filterload,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"merkleblock": msg_merkleblock,
b"notfound": msg_notfound,
b"ping": msg_ping,
b"pong": msg_pong,
b"sendaddrv2": msg_sendaddrv2,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
b"wtxidrelay": msg_wtxidrelay,
}
MAGIC_BYTES = {
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
"testnet3": b"\x0b\x11\x09\x07", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
"signet": b"\x0a\x03\xcf\x40", # signet
}
class P2PConnection(asyncio.Protocol):
"""A low-level connection object to a node's P2P interface.
This class is responsible for:
- opening and closing the TCP connection to the node
- reading bytes from and writing bytes to the socket
- deserializing and serializing the P2P message header
- logging messages as they are sent and received
This class contains no logic for handing the P2P message payloads. It must be
sub-classed and the on_message() callback overridden."""
def __init__(self):
# The underlying transport of the connection.
# Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe
self._transport = None
@property
def is_connected(self):
return self._transport is not None
def peer_connect_helper(self, dstaddr, dstport, net, timeout_factor):
assert not self.is_connected
self.timeout_factor = timeout_factor
self.dstaddr = dstaddr
self.dstport = dstport
# The initial message to send after the connection was made:
self.on_connection_send_msg = None
self.recvbuf = b""
self.magic_bytes = MAGIC_BYTES[net]
def peer_connect(self, dstaddr, dstport, *, net, timeout_factor):
self.peer_connect_helper(dstaddr, dstport, net, timeout_factor)
loop = NetworkThread.network_event_loop
logger.debug('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport))
coroutine = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport)
return lambda: loop.call_soon_threadsafe(loop.create_task, coroutine)
def peer_accept_connection(self, connect_id, connect_cb=lambda: None, *, net, timeout_factor):
self.peer_connect_helper('0', 0, net, timeout_factor)
logger.debug('Listening for Bitcoin Node with id: {}'.format(connect_id))
return lambda: NetworkThread.listen(self, connect_cb, idx=connect_id)
def peer_disconnect(self):
# Connection could have already been closed by other end.
NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort())
# Connection and disconnection methods
def connection_made(self, transport):
"""asyncio callback when a connection is opened."""
assert not self._transport
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self._transport = transport
if self.on_connection_send_msg:
self.send_message(self.on_connection_send_msg)
self.on_connection_send_msg = None # Never used again
self.on_open()
def connection_lost(self, exc):
"""asyncio callback when a connection is closed."""
if exc:
logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc))
else:
logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport))
self._transport = None
self.recvbuf = b""
self.on_close()
# Socket read methods
def data_received(self, t):
"""asyncio callback when data is read from the socket."""
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
"""Try to read P2P messages from the recv buffer.
This method reads data from the buffer in a loop. It deserializes,
parses and verifies the P2P header, then passes the P2P payload to
the on_message callback for processing."""
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.magic_bytes:
raise ValueError("magic bytes mismatch: {} != {}".format(repr(self.magic_bytes), repr(self.recvbuf)))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
msgtype = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if msgtype not in MESSAGEMAP:
raise ValueError("Received unknown msgtype from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, msgtype, repr(msg)))
f = BytesIO(msg)
t = MESSAGEMAP[msgtype]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
"""Callback for processing a P2P payload. Must be overridden by derived class."""
raise NotImplementedError
# Socket write methods
def send_message(self, message):
"""Send a P2P message over the socket.
This method takes a P2P payload, builds the P2P header and adds
the message to the send buffer to be sent over the socket."""
tmsg = self.build_message(message)
self._log_message("send", message)
return self.send_raw_message(tmsg)
def send_raw_message(self, raw_message_bytes):
if not self.is_connected:
raise IOError('Not connected')
def maybe_write():
if not self._transport:
return
if self._transport.is_closing():
return
self._transport.write(raw_message_bytes)
NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
# Class utility methods
def build_message(self, message):
"""Build a serialized P2P message"""
msgtype = message.msgtype
data = message.serialize()
tmsg = self.magic_bytes
tmsg += msgtype
tmsg += b"\x00" * (12 - len(msgtype))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
return tmsg
def _log_message(self, direction, msg):
"""Logs a message being sent or received over the connection."""
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
"""A high-level P2P interface class for communicating with a Bitcoin node.
This class provides high-level callbacks for processing P2P message
payloads, as well as convenience methods for interacting with the
node over P2P.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour."""
def __init__(self, support_addrv2=False, wtxidrelay=True):
super().__init__()
# Track number of messages of each type received.
# Should be read-only in a test.
self.message_count = defaultdict(int)
# Track the most recent message of each type.
# To wait for a message to be received, pop that message from
# this and use self.wait_until.
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
self.support_addrv2 = support_addrv2
# If the peer supports wtxid-relay
self.wtxidrelay = wtxidrelay
def peer_connect_send_version(self, services):
# Send a version msg
vt = msg_version()
vt.nVersion = P2P_VERSION
vt.strSubVer = P2P_SUBVERSION
vt.relay = P2P_VERSION_RELAY
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.on_connection_send_msg = vt # Will be sent in connection_made callback
def peer_connect(self, *args, services=P2P_SERVICES, send_version=True, **kwargs):
create_conn = super().peer_connect(*args, **kwargs)
if send_version:
self.peer_connect_send_version(services)
return create_conn
def peer_accept_connection(self, *args, services=NODE_NETWORK | NODE_WITNESS, **kwargs):
create_conn = super().peer_accept_connection(*args, **kwargs)
self.peer_connect_send_version(services)
return create_conn
# Message receiving methods
def on_message(self, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type."""
with p2p_lock:
try:
msgtype = message.msgtype.decode('ascii')
self.message_count[msgtype] += 1
self.last_message[msgtype] = message
getattr(self, 'on_' + msgtype)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_addrv2(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cfcheckpt(self, message): pass
def on_cfheaders(self, message): pass
def on_cfilter(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
def on_filteradd(self, message): pass
def on_filterclear(self, message): pass
def on_filterload(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_merkleblock(self, message): pass
def on_notfound(self, message): pass
def on_pong(self, message): pass
def on_sendaddrv2(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_wtxidrelay(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
pass
def on_version(self, message):
assert message.nVersion >= MIN_P2P_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_P2P_VERSION_SUPPORTED)
if message.nVersion >= 70016 and self.wtxidrelay:
self.send_message(msg_wtxidrelay())
if self.support_addrv2:
self.send_message(msg_sendaddrv2())
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_until(self, test_function_in, *, timeout=60, check_connected=True):
def test_function():
if check_connected:
assert self.is_connected
return test_function_in()
wait_until_helper(test_function, timeout=timeout, lock=p2p_lock, timeout_factor=self.timeout_factor)
def wait_for_connect(self, timeout=60):
test_function = lambda: self.is_connected
wait_until_helper(test_function, timeout=timeout, lock=p2p_lock)
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.is_connected
self.wait_until(test_function, timeout=timeout, check_connected=False)
# Message receiving helper methods
def wait_for_tx(self, txid, timeout=60):
def test_function():
if not self.last_message.get('tx'):
return False
return self.last_message['tx'].tx.rehash() == txid
self.wait_until(test_function, timeout=timeout)
def wait_for_block(self, blockhash, timeout=60):
def test_function():
return self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
self.wait_until(test_function, timeout=timeout)
def wait_for_header(self, blockhash, timeout=60):
def test_function():
last_headers = self.last_message.get('headers')
if not last_headers:
return False
return last_headers.headers[0].rehash() == int(blockhash, 16)
self.wait_until(test_function, timeout=timeout)
def wait_for_merkleblock(self, blockhash, timeout=60):
def test_function():
last_filtered_block = self.last_message.get('merkleblock')
if not last_filtered_block:
return False
return last_filtered_block.merkleblock.header.rehash() == int(blockhash, 16)
self.wait_until(test_function, timeout=timeout)
def wait_for_getdata(self, hash_list, timeout=60):
"""Waits for a getdata message.
The object hashes in the inventory vector must match the provided hash_list."""
def test_function():
last_data = self.last_message.get("getdata")
if not last_data:
return False
return [x.hash for x in last_data.inv] == hash_list
self.wait_until(test_function, timeout=timeout)
def wait_for_getheaders(self, timeout=60):
"""Waits for a getheaders message.
Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block header has been requested."""
def test_function():
return self.last_message.get("getheaders")
self.wait_until(test_function, timeout=timeout)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
def test_function():
return self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
self.wait_until(test_function, timeout=timeout)
def wait_for_verack(self, timeout=60):
def test_function():
return "verack" in self.last_message
self.wait_until(test_function, timeout=timeout)
# Message sending helper functions
def send_and_ping(self, message, timeout=60):
self.send_message(message)
self.sync_with_ping(timeout=timeout)
def sync_send_with_ping(self, timeout=60):
"""Ensure SendMessages is called on this connection"""
# Calling sync_with_ping twice requires that the node calls
# `ProcessMessage` twice, and thus ensures `SendMessages` must have
# been called at least once
self.sync_with_ping()
self.sync_with_ping()
def sync_with_ping(self, timeout=60):
"""Ensure ProcessMessages is called on this connection"""
self.send_message(msg_ping(nonce=self.ping_counter))
def test_function():
return self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
self.wait_until(test_function, timeout=timeout)
self.ping_counter += 1
# One lock for synchronizing all data access between the network event loop (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
# This lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
p2p_lock = threading.Lock()
class NetworkThread(threading.Thread):
network_event_loop = None
def __init__(self):
super().__init__(name="NetworkThread")
# There is only one event loop and no more than one thread must be created
assert not self.network_event_loop
NetworkThread.listeners = {}
NetworkThread.protos = {}
NetworkThread.network_event_loop = asyncio.new_event_loop()
def run(self):
"""Start the network thread."""
self.network_event_loop.run_forever()
def close(self, timeout=10):
"""Close the connections and network event loop."""
self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
wait_until_helper(lambda: not self.network_event_loop.is_running(), timeout=timeout)
self.network_event_loop.close()
self.join(timeout)
# Safe to remove event loop.
NetworkThread.network_event_loop = None
@classmethod
def listen(cls, p2p, callback, port=None, addr=None, idx=1):
""" Ensure a listening server is running on the given port, and run the
protocol specified by `p2p` on the next connection to it. Once ready
for connections, call `callback`."""
if port is None:
assert 0 < idx <= MAX_NODES
port = p2p_port(MAX_NODES - idx)
if addr is None:
addr = '127.0.0.1'
coroutine = cls.create_listen_server(addr, port, callback, p2p)
cls.network_event_loop.call_soon_threadsafe(cls.network_event_loop.create_task, coroutine)
@classmethod
async def create_listen_server(cls, addr, port, callback, proto):
def peer_protocol():
"""Returns a function that does the protocol handling for a new
connection. To allow different connections to have different
behaviors, the protocol function is first put in the cls.protos
dict. When the connection is made, the function removes the
protocol function from that dict, and returns it so the event loop
can start executing it."""
response = cls.protos.get((addr, port))
cls.protos[(addr, port)] = None
return response
if (addr, port) not in cls.listeners:
# When creating a listener on a given (addr, port) we only need to
# do it once. If we want different behaviors for different
# connections, we can accomplish this by providing different
# `proto` functions
listener = await cls.network_event_loop.create_server(peer_protocol, addr, port)
logger.debug("Listening server on %s:%d should be started" % (addr, port))
cls.listeners[(addr, port)] = listener
cls.protos[(addr, port)] = proto
callback(addr, port)
class P2PDataStore(P2PInterface):
"""A P2P data store class.
Keeps a block and transaction store and responds correctly to getdata and getheaders requests."""
def __init__(self):
super().__init__()
# store of blocks. key is block hash, value is a CBlock object
self.block_store = {}
self.last_block_hash = ''
# store of txs. key is txid, value is a CTransaction object
self.tx_store = {}
self.getdata_requests = []
def on_getdata(self, message):
"""Check for the tx/block in our stores and if found, reply with an inv message."""
for inv in message.inv:
self.getdata_requests.append(inv.hash)
if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys():
self.send_message(msg_tx(self.tx_store[inv.hash]))
elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys():
self.send_message(msg_block(self.block_store[inv.hash]))
else:
logger.debug('getdata message type {} received.'.format(hex(inv.type)))
def on_getheaders(self, message):
"""Search back through our block store for the locator, and reply with a headers message if found."""
locator, hash_stop = message.locator, message.hashstop
# Assume that the most recent block added is the tip
if not self.block_store:
return
headers_list = [self.block_store[self.last_block_hash]]
while headers_list[-1].sha256 not in locator.vHave:
# Walk back through the block store, adding headers to headers_list
# as we go.
prev_block_hash = headers_list[-1].hashPrevBlock
if prev_block_hash in self.block_store:
prev_block_header = CBlockHeader(self.block_store[prev_block_hash])
headers_list.append(prev_block_header)
if prev_block_header.sha256 == hash_stop:
# if this is the hashstop header, stop here
break
else:
logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash)))
break
# Truncate the list if there are too many headers
headers_list = headers_list[:-MAX_HEADERS_RESULTS - 1:-1]
response = msg_headers(headers_list)
if response is not None:
self.send_message(response)
def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60):
"""Send blocks to test node and test whether the tip advances.
- add all blocks to our block_store
- send a headers message for the final block
- the on_getheaders handler will ensure that any getheaders are responded to
- if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will
ensure that any getdata messages are responded to. Otherwise send the full block unsolicited.
- if success is True: assert that the node's tip advances to the most recent block
- if success is False: assert that the node's tip doesn't advance
- if reject_reason is set: assert that the correct reject message is logged"""
with p2p_lock:
for block in blocks:
self.block_store[block.sha256] = block
self.last_block_hash = block.sha256
reject_reason = [reject_reason] if reject_reason else []
with node.assert_debug_log(expected_msgs=reject_reason):
if force_send:
for b in blocks:
self.send_message(msg_block(block=b))
else:
self.send_message(msg_headers([CBlockHeader(block) for block in blocks]))
self.wait_until(
lambda: blocks[-1].sha256 in self.getdata_requests,
timeout=timeout,
check_connected=success,
)
if expect_disconnect:
self.wait_for_disconnect(timeout=timeout)
else:
self.sync_with_ping(timeout=timeout)
if success:
self.wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout)
else:
assert node.getbestblockhash() != blocks[-1].hash
def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None):
"""Send txs to test node and test whether they're accepted to the mempool.
- add all txs to our tx_store
- send tx messages for all txs
- if success is True/False: assert that the txs are/are not accepted to the mempool
- if expect_disconnect is True: Skip the sync with ping
- if reject_reason is set: assert that the correct reject message is logged."""
with p2p_lock:
for tx in txs:
self.tx_store[tx.sha256] = tx
reject_reason = [reject_reason] if reject_reason else []
with node.assert_debug_log(expected_msgs=reject_reason):
for tx in txs:
self.send_message(msg_tx(tx))
if expect_disconnect:
self.wait_for_disconnect()
else:
self.sync_with_ping()
raw_mempool = node.getrawmempool()
if success:
# Check that all txs are now in the mempool
for tx in txs:
assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash)
else:
# Check that none of the txs are now in the mempool
for tx in txs:
assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash)
class P2PTxInvStore(P2PInterface):
"""A P2PInterface which stores a count of how many times each txid has been announced."""
def __init__(self):
super().__init__()
self.tx_invs_received = defaultdict(int)
def on_inv(self, message):
super().on_inv(message) # Send getdata in response.
# Store how many times invs have been received for each tx.
for i in message.inv:
if (i.type == MSG_TX) or (i.type == MSG_WTX):
# save txid
self.tx_invs_received[i.hash] += 1
def get_invs(self):
with p2p_lock:
return list(self.tx_invs_received.keys())
def wait_for_broadcast(self, txns, timeout=60):
"""Waits for the txns (list of txids) to complete initial broadcast.
The mempool should mark unbroadcast=False for these transactions.
"""
# Wait until invs have been received (and getdatas sent) for each txid.
self.wait_until(lambda: set(self.tx_invs_received.keys()) == set([int(tx, 16) for tx in txns]), timeout=timeout)
# Flush messages and wait for the getdatas to be processed
self.sync_with_ping()
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for metadata service."""
import base64
import hashlib
import hmac
import re
try:
import cPickle as pickle
except ImportError:
import pickle
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
import six
import webob
from nova.api.metadata import base
from nova.api.metadata import handler
from nova.api.metadata import password
from nova import block_device
from nova.compute import flavors
from nova.conductor import api as conductor_api
from nova import context
from nova import db
from nova.db.sqlalchemy import api
from nova import exception
from nova.network import api as network_api
from nova.network import model as network_model
from nova.network.neutronv2 import api as neutronapi
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_network
from nova.tests.unit.objects import test_security_group
from nova.virt import netutils
CONF = cfg.CONF
USER_DATA_STRING = (b"This is an encoded string")
ENCODE_USER_DATA_STRING = base64.b64encode(USER_DATA_STRING)
FAKE_SEED = '7qtD24mpMR2'
def fake_inst_obj(context):
inst = objects.Instance(
context=context,
id=1,
user_id='fake_user',
uuid='b65cee2f-8c69-4aeb-be2f-f79742548fc2',
project_id='test',
key_name="key",
key_data="ssh-rsa AAAAB3Nzai....N3NtHw== someuser@somehost",
host='test',
launch_index=1,
reservation_id='r-xxxxxxxx',
user_data=ENCODE_USER_DATA_STRING,
image_ref=7,
kernel_id=None,
ramdisk_id=None,
vcpus=1,
fixed_ips=[],
root_device_name='/dev/sda1',
hostname='test.novadomain',
display_name='my_displayname',
metadata={},
default_ephemeral_device=None,
default_swap_device=None,
system_metadata={},
availability_zone=None)
nwinfo = network_model.NetworkInfo([])
inst.info_cache = objects.InstanceInfoCache(context=context,
instance_uuid=inst.uuid,
network_info=nwinfo)
with mock.patch.object(inst, 'save'):
inst.set_flavor(flavors.get_default_flavor())
return inst
def fake_keypair_obj(name, data):
return objects.KeyPair(name=name,
type='fake_type',
public_key=data)
def return_non_existing_address(*args, **kwarg):
raise exception.NotFound()
def fake_InstanceMetadata(stubs, inst_data, address=None,
sgroups=None, content=None, extra_md=None,
vd_driver=None, network_info=None,
network_metadata=None):
content = content or []
extra_md = extra_md or {}
if sgroups is None:
sgroups = [dict(test_security_group.fake_secgroup,
name='default')]
def sg_get(*args, **kwargs):
return sgroups
stubs.Set(api, 'security_group_get_by_instance', sg_get)
return base.InstanceMetadata(inst_data, address=address,
content=content, extra_md=extra_md,
vd_driver=vd_driver, network_info=network_info,
network_metadata=network_metadata)
def fake_request(stubs, mdinst, relpath, address="127.0.0.1",
fake_get_metadata=None, headers=None,
fake_get_metadata_by_instance_id=None, app=None):
def get_metadata_by_remote_address(address):
return mdinst
if app is None:
app = handler.MetadataRequestHandler()
if fake_get_metadata is None:
fake_get_metadata = get_metadata_by_remote_address
if stubs:
stubs.Set(app, 'get_metadata_by_remote_address', fake_get_metadata)
if fake_get_metadata_by_instance_id:
stubs.Set(app, 'get_metadata_by_instance_id',
fake_get_metadata_by_instance_id)
request = webob.Request.blank(relpath)
request.remote_addr = address
if headers is not None:
request.headers.update(headers)
response = request.get_response(app)
return response
class MetadataTestCase(test.TestCase):
def setUp(self):
super(MetadataTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_inst_obj(self.context)
self.flags(use_local=True, group='conductor')
self.keypair = fake_keypair_obj(self.instance.key_name,
self.instance.key_data)
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
def test_can_pickle_metadata(self):
# Make sure that InstanceMetadata is possible to pickle. This is
# required for memcache backend to work correctly.
md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone())
pickle.dumps(md, protocol=0)
def test_user_data(self):
inst = self.instance.obj_clone()
inst['user_data'] = base64.b64encode("happy")
md = fake_InstanceMetadata(self.stubs, inst)
self.assertEqual(
md.get_ec2_metadata(version='2009-04-04')['user-data'], "happy")
def test_no_user_data(self):
inst = self.instance.obj_clone()
inst.user_data = None
md = fake_InstanceMetadata(self.stubs, inst)
obj = object()
self.assertEqual(
md.get_ec2_metadata(version='2009-04-04').get('user-data', obj),
obj)
def test_security_groups(self):
inst = self.instance.obj_clone()
sgroups = [dict(test_security_group.fake_secgroup, name='default'),
dict(test_security_group.fake_secgroup, name='other')]
expected = ['default', 'other']
md = fake_InstanceMetadata(self.stubs, inst, sgroups=sgroups)
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['security-groups'], expected)
def test_local_hostname_fqdn(self):
md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone())
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['local-hostname'],
"%s.%s" % (self.instance['hostname'], CONF.dhcp_domain))
def test_format_instance_mapping(self):
# Make sure that _format_instance_mappings works.
ctxt = None
instance_ref0 = objects.Instance(**{'id': 0,
'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
'root_device_name': None,
'default_ephemeral_device': None,
'default_swap_device': None})
instance_ref1 = objects.Instance(**{'id': 0,
'uuid': 'b65cee2f-8c69-4aeb-be2f-f79742548fc2',
'root_device_name': '/dev/sda1',
'default_ephemeral_device': None,
'default_swap_device': None})
def fake_bdm_get(ctxt, uuid, use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 87654321,
'snapshot_id': None,
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'delete_on_termination': True,
'device_name': '/dev/sdh'}),
fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': 'swap',
'delete_on_termination': None,
'device_name': '/dev/sdc'}),
fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': None,
'delete_on_termination': None,
'device_name': '/dev/sdb'})]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_bdm_get)
expected = {'ami': 'sda1',
'root': '/dev/sda1',
'ephemeral0': '/dev/sdb',
'swap': '/dev/sdc',
'ebs0': '/dev/sdh'}
conductor_api.LocalAPI()
self.assertEqual(base._format_instance_mapping(ctxt,
instance_ref0), block_device._DEFAULT_MAPPINGS)
self.assertEqual(base._format_instance_mapping(ctxt,
instance_ref1), expected)
def test_pubkey(self):
md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone())
pubkey_ent = md.lookup("/2009-04-04/meta-data/public-keys")
self.assertEqual(base.ec2_md_print(pubkey_ent),
"0=%s" % self.instance['key_name'])
self.assertEqual(base.ec2_md_print(pubkey_ent['0']['openssh-key']),
self.instance['key_data'])
def test_image_type_ramdisk(self):
inst = self.instance.obj_clone()
inst['ramdisk_id'] = 'ari-853667c0'
md = fake_InstanceMetadata(self.stubs, inst)
data = md.lookup("/latest/meta-data/ramdisk-id")
self.assertIsNotNone(data)
self.assertTrue(re.match('ari-[0-9a-f]{8}', data))
def test_image_type_kernel(self):
inst = self.instance.obj_clone()
inst['kernel_id'] = 'aki-c2e26ff2'
md = fake_InstanceMetadata(self.stubs, inst)
data = md.lookup("/2009-04-04/meta-data/kernel-id")
self.assertTrue(re.match('aki-[0-9a-f]{8}', data))
self.assertEqual(
md.lookup("/ec2/2009-04-04/meta-data/kernel-id"), data)
def test_image_type_no_kernel_raises(self):
inst = self.instance.obj_clone()
md = fake_InstanceMetadata(self.stubs, inst)
self.assertRaises(base.InvalidMetadataPath,
md.lookup, "/2009-04-04/meta-data/kernel-id")
def test_check_version(self):
inst = self.instance.obj_clone()
md = fake_InstanceMetadata(self.stubs, inst)
self.assertTrue(md._check_version('1.0', '2009-04-04'))
self.assertFalse(md._check_version('2009-04-04', '1.0'))
self.assertFalse(md._check_version('2009-04-04', '2008-09-01'))
self.assertTrue(md._check_version('2008-09-01', '2009-04-04'))
self.assertTrue(md._check_version('2009-04-04', '2009-04-04'))
def test_InstanceMetadata_uses_passed_network_info(self):
network_info = []
self.mox.StubOutWithMock(netutils, "get_injected_network_template")
netutils.get_injected_network_template(network_info).AndReturn(False)
self.mox.ReplayAll()
base.InstanceMetadata(fake_inst_obj(self.context),
network_info=network_info)
@mock.patch.object(netutils, "get_network_metadata", autospec=True)
def test_InstanceMetadata_gets_network_metadata(self, mock_netutils):
network_data = {'links': [], 'networks': [], 'services': []}
mock_netutils.return_value = network_data
md = base.InstanceMetadata(fake_inst_obj(self.context))
self.assertEqual(network_data, md.network_metadata)
def test_InstanceMetadata_invoke_metadata_for_config_drive(self):
fakes.stub_out_key_pair_funcs(self.stubs)
inst = self.instance.obj_clone()
inst_md = base.InstanceMetadata(inst)
for (path, value) in inst_md.metadata_for_config_drive():
self.assertIsNotNone(path)
def test_InstanceMetadata_queries_network_API_when_needed(self):
network_info_from_api = []
self.mox.StubOutWithMock(netutils, "get_injected_network_template")
netutils.get_injected_network_template(
network_info_from_api).AndReturn(False)
self.mox.ReplayAll()
base.InstanceMetadata(fake_inst_obj(self.context))
def test_local_ipv4(self):
nw_info = fake_network.fake_get_instance_nw_info(self.stubs,
num_networks=2)
expected_local = "192.168.1.100"
md = fake_InstanceMetadata(self.stubs, self.instance,
network_info=nw_info, address="fake")
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(expected_local, data['meta-data']['local-ipv4'])
def test_local_ipv4_from_nw_info(self):
nw_info = fake_network.fake_get_instance_nw_info(self.stubs,
num_networks=2)
expected_local = "192.168.1.100"
md = fake_InstanceMetadata(self.stubs, self.instance,
network_info=nw_info)
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['local-ipv4'], expected_local)
def test_local_ipv4_from_address(self):
expected_local = "fake"
md = fake_InstanceMetadata(self.stubs, self.instance,
network_info=[], address="fake")
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['local-ipv4'], expected_local)
@mock.patch.object(base64, 'b64encode', lambda data: FAKE_SEED)
@mock.patch('nova.cells.rpcapi.CellsAPI.get_keypair_at_top')
@mock.patch.object(objects.KeyPair, 'get_by_name')
@mock.patch.object(jsonutils, 'dumps')
def _test_as_json_with_options(self, mock_json_dumps,
mock_keypair, mock_cells_keypair,
is_cells=False, os_version=base.GRIZZLY):
if is_cells:
self.flags(enable=True, group='cells')
self.flags(cell_type='compute', group='cells')
mock_keypair = mock_cells_keypair
instance = self.instance
keypair = self.keypair
md = fake_InstanceMetadata(self.stubs, instance)
expected_metadata = {
'uuid': md.uuid,
'hostname': md._get_hostname(),
'name': instance.display_name,
'launch_index': instance.launch_index,
'availability_zone': md.availability_zone,
}
if md.launch_metadata:
expected_metadata['meta'] = md.launch_metadata
if md.files:
expected_metadata['files'] = md.files
if md.extra_md:
expected_metadata['extra_md'] = md.extra_md
if md.network_config:
expected_metadata['network_config'] = md.network_config
if instance.key_name:
expected_metadata['public_keys'] = {
keypair.name: keypair.public_key
}
expected_metadata['keys'] = [{'type': keypair.type,
'data': keypair.public_key,
'name': keypair.name}]
if md._check_os_version(base.GRIZZLY, os_version):
expected_metadata['random_seed'] = FAKE_SEED
if md._check_os_version(base.LIBERTY, os_version):
expected_metadata['project_id'] = instance.project_id
mock_keypair.return_value = keypair
md._metadata_as_json(os_version, 'non useless path parameter')
if instance.key_name:
mock_keypair.assert_called_once_with(mock.ANY,
instance.user_id,
instance.key_name)
self.assertIsInstance(mock_keypair.call_args[0][0],
context.RequestContext)
self.assertEqual(md.md_mimetype, base.MIME_TYPE_APPLICATION_JSON)
mock_json_dumps.assert_called_once_with(expected_metadata)
def test_as_json(self):
for os_version in base.OPENSTACK_VERSIONS:
self._test_as_json_with_options(os_version=os_version)
def test_as_json_with_cells_mode(self):
for os_version in base.OPENSTACK_VERSIONS:
self._test_as_json_with_options(is_cells=True,
os_version=os_version)
class OpenStackMetadataTestCase(test.TestCase):
def setUp(self):
super(OpenStackMetadataTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_inst_obj(self.context)
self.flags(use_local=True, group='conductor')
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
def test_top_level_listing(self):
# request for /openstack/<version>/ should show metadata.json
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
result = mdinst.lookup("/openstack")
# trailing / should not affect anything
self.assertEqual(result, mdinst.lookup("/openstack/"))
# the 'content' should not show up in directory listing
self.assertNotIn(base.CONTENT_DIR, result)
self.assertIn('2012-08-10', result)
self.assertIn('latest', result)
def test_version_content_listing(self):
# request for /openstack/<version>/ should show metadata.json
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
listing = mdinst.lookup("/openstack/2012-08-10")
self.assertIn("meta_data.json", listing)
def test_returns_apis_supported_in_liberty_version(self):
mdinst = fake_InstanceMetadata(self.stubs, self.instance)
liberty_supported_apis = mdinst.lookup("/openstack/2015-10-15")
self.assertEqual([base.MD_JSON_NAME, base.UD_NAME, base.PASS_NAME,
base.VD_JSON_NAME, base.NW_JSON_NAME],
liberty_supported_apis)
def test_returns_apis_supported_in_havana_version(self):
mdinst = fake_InstanceMetadata(self.stubs, self.instance)
havana_supported_apis = mdinst.lookup("/openstack/2013-10-17")
self.assertEqual([base.MD_JSON_NAME, base.UD_NAME, base.PASS_NAME,
base.VD_JSON_NAME], havana_supported_apis)
def test_returns_apis_supported_in_folsom_version(self):
mdinst = fake_InstanceMetadata(self.stubs, self.instance)
folsom_supported_apis = mdinst.lookup("/openstack/2012-08-10")
self.assertEqual([base.MD_JSON_NAME, base.UD_NAME],
folsom_supported_apis)
def test_returns_apis_supported_in_grizzly_version(self):
mdinst = fake_InstanceMetadata(self.stubs, self.instance)
grizzly_supported_apis = mdinst.lookup("/openstack/2013-04-04")
self.assertEqual([base.MD_JSON_NAME, base.UD_NAME, base.PASS_NAME],
grizzly_supported_apis)
def test_metadata_json(self):
fakes.stub_out_key_pair_funcs(self.stubs)
inst = self.instance.obj_clone()
content = [
('/etc/my.conf', "content of my.conf"),
('/root/hello', "content of /root/hello"),
]
mdinst = fake_InstanceMetadata(self.stubs, inst,
content=content)
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
mdjson = mdinst.lookup("/openstack/latest/meta_data.json")
mddict = jsonutils.loads(mdjson)
self.assertEqual(mddict['uuid'], self.instance['uuid'])
self.assertIn('files', mddict)
self.assertIn('public_keys', mddict)
self.assertEqual(mddict['public_keys'][self.instance['key_name']],
self.instance['key_data'])
self.assertIn('launch_index', mddict)
self.assertEqual(mddict['launch_index'], self.instance['launch_index'])
# verify that each of the things we put in content
# resulted in an entry in 'files', that their content
# there is as expected, and that /content lists them.
for (path, content) in content:
fent = [f for f in mddict['files'] if f['path'] == path]
self.assertEqual(1, len(fent))
fent = fent[0]
found = mdinst.lookup("/openstack%s" % fent['content_path'])
self.assertEqual(found, content)
def test_x509_keypair(self):
# check if the x509 content is set, if the keypair type is x509.
fakes.stub_out_key_pair_funcs(self.stubs, type='x509')
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
mddict = jsonutils.loads(mdjson)
# keypair is stubbed-out, so it's public_key is 'public_key'.
expected = {'name': self.instance['key_name'],
'type': 'x509',
'data': 'public_key'}
self.assertEqual([expected], mddict['keys'])
def test_extra_md(self):
# make sure extra_md makes it through to metadata
fakes.stub_out_key_pair_funcs(self.stubs)
inst = self.instance.obj_clone()
extra = {'foo': 'bar', 'mylist': [1, 2, 3],
'mydict': {"one": 1, "two": 2}}
mdinst = fake_InstanceMetadata(self.stubs, inst, extra_md=extra)
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
mddict = jsonutils.loads(mdjson)
for key, val in six.iteritems(extra):
self.assertEqual(mddict[key], val)
def test_password(self):
# make sure extra_md makes it through to metadata
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
result = mdinst.lookup("/openstack/latest/password")
self.assertEqual(result, password.handle_password)
def test_userdata(self):
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
userdata_found = mdinst.lookup("/openstack/2012-08-10/user_data")
self.assertEqual(USER_DATA_STRING, userdata_found)
# since we had user-data in this instance, it should be in listing
self.assertIn('user_data', mdinst.lookup("/openstack/2012-08-10"))
inst.user_data = None
mdinst = fake_InstanceMetadata(self.stubs, inst)
# since this instance had no user-data it should not be there.
self.assertNotIn('user_data', mdinst.lookup("/openstack/2012-08-10"))
self.assertRaises(base.InvalidMetadataPath,
mdinst.lookup, "/openstack/2012-08-10/user_data")
def test_random_seed(self):
fakes.stub_out_key_pair_funcs(self.stubs)
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
# verify that 2013-04-04 has the 'random' field
mdjson = mdinst.lookup("/openstack/2013-04-04/meta_data.json")
mddict = jsonutils.loads(mdjson)
self.assertIn("random_seed", mddict)
self.assertEqual(len(base64.b64decode(mddict["random_seed"])), 512)
# verify that older version do not have it
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
self.assertNotIn("random_seed", jsonutils.loads(mdjson))
def test_project_id(self):
fakes.stub_out_key_pair_funcs(self.stubs)
mdinst = fake_InstanceMetadata(self.stubs, self.instance)
# verify that 2015-10-15 has the 'project_id' field
mdjson = mdinst.lookup("/openstack/2015-10-15/meta_data.json")
mddict = jsonutils.loads(mdjson)
self.assertIn("project_id", mddict)
self.assertEqual(mddict["project_id"], self.instance.project_id)
# verify that older version do not have it
mdjson = mdinst.lookup("/openstack/2013-10-17/meta_data.json")
self.assertNotIn("project_id", jsonutils.loads(mdjson))
def test_no_dashes_in_metadata(self):
# top level entries in meta_data should not contain '-' in their name
fakes.stub_out_key_pair_funcs(self.stubs)
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
mdjson = jsonutils.loads(
mdinst.lookup("/openstack/latest/meta_data.json"))
self.assertEqual([], [k for k in mdjson.keys() if k.find("-") != -1])
def test_vendor_data_presence(self):
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
# verify that 2013-10-17 has the vendor_data.json file
result = mdinst.lookup("/openstack/2013-10-17")
self.assertIn('vendor_data.json', result)
# verify that older version do not have it
result = mdinst.lookup("/openstack/2013-04-04")
self.assertNotIn('vendor_data.json', result)
def test_vendor_data_response(self):
inst = self.instance.obj_clone()
mydata = {'mykey1': 'value1', 'mykey2': 'value2'}
class myVdriver(base.VendorDataDriver):
def __init__(self, *args, **kwargs):
super(myVdriver, self).__init__(*args, **kwargs)
data = mydata.copy()
uuid = kwargs['instance']['uuid']
data.update({'inst_uuid': uuid})
self.data = data
def get(self):
return self.data
mdinst = fake_InstanceMetadata(self.stubs, inst, vd_driver=myVdriver)
# verify that 2013-10-17 has the vendor_data.json file
vdpath = "/openstack/2013-10-17/vendor_data.json"
vd = jsonutils.loads(mdinst.lookup(vdpath))
# the instance should be passed through, and our class copies the
# uuid through to 'inst_uuid'.
self.assertEqual(vd['inst_uuid'], inst['uuid'])
# check the other expected values
for k, v in mydata.items():
self.assertEqual(vd[k], v)
def test_network_data_presence(self):
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
# verify that 2015-10-15 has the network_data.json file
result = mdinst.lookup("/openstack/2015-10-15")
self.assertIn('network_data.json', result)
# verify that older version do not have it
result = mdinst.lookup("/openstack/2013-10-17")
self.assertNotIn('network_data.json', result)
def test_network_data_response(self):
inst = self.instance.obj_clone()
nw_data = {
"links": [{"ethernet_mac_address": "aa:aa:aa:aa:aa:aa",
"id": "nic0", "type": "ethernet", "vif_id": 1,
"mtu": 1500}],
"networks": [{"id": "network0", "ip_address": "10.10.0.2",
"link": "nic0", "netmask": "255.255.255.0",
"network_id":
"00000000-0000-0000-0000-000000000000",
"routes": [], "type": "ipv4"}],
"services": [{'address': '1.2.3.4', 'type': 'dns'}]}
mdinst = fake_InstanceMetadata(self.stubs, inst,
network_metadata=nw_data)
# verify that 2015-10-15 has the network_data.json file
nwpath = "/openstack/2015-10-15/network_data.json"
nw = jsonutils.loads(mdinst.lookup(nwpath))
# check the other expected values
for k, v in nw_data.items():
self.assertEqual(nw[k], v)
class MetadataHandlerTestCase(test.TestCase):
"""Test that metadata is returning proper values."""
def setUp(self):
super(MetadataHandlerTestCase, self).setUp()
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_inst_obj(self.context)
self.flags(use_local=True, group='conductor')
self.mdinst = fake_InstanceMetadata(self.stubs, self.instance,
address=None, sgroups=None)
def test_callable(self):
def verify(req, meta_data):
self.assertIsInstance(meta_data, CallableMD)
return "foo"
class CallableMD(object):
def lookup(self, path_info):
return verify
response = fake_request(self.stubs, CallableMD(), "/bar")
self.assertEqual(response.status_int, 200)
self.assertEqual(response.body, "foo")
def test_root(self):
expected = "\n".join(base.VERSIONS) + "\nlatest"
response = fake_request(self.stubs, self.mdinst, "/")
self.assertEqual(response.body, expected)
response = fake_request(self.stubs, self.mdinst, "/foo/../")
self.assertEqual(response.body, expected)
def test_root_metadata_proxy_enabled(self):
self.flags(service_metadata_proxy=True,
group='neutron')
expected = "\n".join(base.VERSIONS) + "\nlatest"
response = fake_request(self.stubs, self.mdinst, "/")
self.assertEqual(response.body, expected)
response = fake_request(self.stubs, self.mdinst, "/foo/../")
self.assertEqual(response.body, expected)
def test_version_root(self):
response = fake_request(self.stubs, self.mdinst, "/2009-04-04")
response_ctype = response.headers['Content-Type']
self.assertTrue(response_ctype.startswith("text/plain"))
self.assertEqual(response.body, 'meta-data/\nuser-data')
response = fake_request(self.stubs, self.mdinst, "/9999-99-99")
self.assertEqual(response.status_int, 404)
def test_json_data(self):
fakes.stub_out_key_pair_funcs(self.stubs)
response = fake_request(self.stubs, self.mdinst,
"/openstack/latest/meta_data.json")
response_ctype = response.headers['Content-Type']
self.assertTrue(response_ctype.startswith("application/json"))
response = fake_request(self.stubs, self.mdinst,
"/openstack/latest/vendor_data.json")
response_ctype = response.headers['Content-Type']
self.assertTrue(response_ctype.startswith("application/json"))
def test_user_data_non_existing_fixed_address(self):
self.stubs.Set(network_api.API, 'get_fixed_ip_by_address',
return_non_existing_address)
response = fake_request(None, self.mdinst, "/2009-04-04/user-data",
"127.1.1.1")
self.assertEqual(response.status_int, 404)
def test_fixed_address_none(self):
response = fake_request(None, self.mdinst,
relpath="/2009-04-04/user-data", address=None)
self.assertEqual(response.status_int, 500)
def test_invalid_path_is_404(self):
response = fake_request(self.stubs, self.mdinst,
relpath="/2009-04-04/user-data-invalid")
self.assertEqual(response.status_int, 404)
def test_user_data_with_use_forwarded_header(self):
expected_addr = "192.192.192.2"
def fake_get_metadata(address):
if address == expected_addr:
return self.mdinst
else:
raise Exception("Expected addr of %s, got %s" %
(expected_addr, address))
self.flags(use_forwarded_for=True)
response = fake_request(self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="168.168.168.1",
fake_get_metadata=fake_get_metadata,
headers={'X-Forwarded-For': expected_addr})
self.assertEqual(response.status_int, 200)
response_ctype = response.headers['Content-Type']
self.assertTrue(response_ctype.startswith("text/plain"))
self.assertEqual(response.body,
base64.b64decode(self.instance['user_data']))
response = fake_request(self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="168.168.168.1",
fake_get_metadata=fake_get_metadata,
headers=None)
self.assertEqual(response.status_int, 500)
@mock.patch('nova.utils.constant_time_compare')
def test_by_instance_id_uses_constant_time_compare(self, mock_compare):
mock_compare.side_effect = test.TestingException
req = webob.Request.blank('/')
hnd = handler.MetadataRequestHandler()
req.headers['X-Instance-ID'] = 'fake-inst'
req.headers['X-Instance-ID-Signature'] = 'fake-sig'
req.headers['X-Tenant-ID'] = 'fake-proj'
self.assertRaises(test.TestingException,
hnd._handle_instance_id_request, req)
self.assertEqual(1, mock_compare.call_count)
def _fake_x_get_metadata(self, instance_id, remote_address):
if remote_address is None:
raise Exception('Expected X-Forwared-For header')
elif instance_id == self.expected_instance_id:
return self.mdinst
else:
# raise the exception to aid with 500 response code test
raise Exception("Expected instance_id of %s, got %s" %
(self.expected_instance_id, instance_id))
def test_user_data_with_neutron_instance_id(self):
self.expected_instance_id = 'a-b-c-d'
signed = hmac.new(
CONF.neutron.metadata_proxy_shared_secret,
self.expected_instance_id,
hashlib.sha256).hexdigest()
# try a request with service disabled
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
headers={'X-Instance-ID': 'a-b-c-d',
'X-Tenant-ID': 'test',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 200)
# now enable the service
self.flags(service_metadata_proxy=True,
group='neutron')
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Instance-ID': 'a-b-c-d',
'X-Tenant-ID': 'test',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 200)
response_ctype = response.headers['Content-Type']
self.assertTrue(response_ctype.startswith("text/plain"))
self.assertEqual(response.body,
base64.b64decode(self.instance['user_data']))
# mismatched signature
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Instance-ID': 'a-b-c-d',
'X-Tenant-ID': 'test',
'X-Instance-ID-Signature': ''})
self.assertEqual(response.status_int, 403)
# missing X-Tenant-ID from request
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Instance-ID': 'a-b-c-d',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 400)
# mismatched X-Tenant-ID
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Instance-ID': 'a-b-c-d',
'X-Tenant-ID': 'FAKE',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 404)
# without X-Forwarded-For
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
headers={'X-Instance-ID': 'a-b-c-d',
'X-Tenant-ID': 'test',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 500)
# unexpected Instance-ID
signed = hmac.new(
CONF.neutron.metadata_proxy_shared_secret,
'z-z-z-z',
hashlib.sha256).hexdigest()
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Instance-ID': 'z-z-z-z',
'X-Tenant-ID': 'test',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 500)
def test_get_metadata(self):
def _test_metadata_path(relpath):
# recursively confirm a http 200 from all meta-data elements
# available at relpath.
response = fake_request(self.stubs, self.mdinst,
relpath=relpath)
for item in response.body.split('\n'):
if 'public-keys' in relpath:
# meta-data/public-keys/0=keyname refers to
# meta-data/public-keys/0
item = item.split('=')[0]
if item.endswith('/'):
path = relpath + '/' + item
_test_metadata_path(path)
continue
path = relpath + '/' + item
response = fake_request(self.stubs, self.mdinst, relpath=path)
self.assertEqual(response.status_int, 200, message=path)
_test_metadata_path('/2009-04-04/meta-data')
def _metadata_handler_with_instance_id(self, hnd):
expected_instance_id = 'a-b-c-d'
signed = hmac.new(
CONF.neutron.metadata_proxy_shared_secret,
expected_instance_id,
hashlib.sha256).hexdigest()
self.flags(service_metadata_proxy=True, group='neutron')
response = fake_request(
None, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata=False,
app=hnd,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Instance-ID': 'a-b-c-d',
'X-Tenant-ID': 'test',
'X-Instance-ID-Signature': signed})
self.assertEqual(200, response.status_int)
self.assertEqual(base64.b64decode(self.instance['user_data']),
response.body)
@mock.patch.object(base, 'get_metadata_by_instance_id')
def test_metadata_handler_with_instance_id(self, get_by_uuid):
# test twice to ensure that the cache works
get_by_uuid.return_value = self.mdinst
self.flags(metadata_cache_expiration=15)
hnd = handler.MetadataRequestHandler()
self._metadata_handler_with_instance_id(hnd)
self._metadata_handler_with_instance_id(hnd)
self.assertEqual(1, get_by_uuid.call_count)
@mock.patch.object(base, 'get_metadata_by_instance_id')
def test_metadata_handler_with_instance_id_no_cache(self, get_by_uuid):
# test twice to ensure that disabling the cache works
get_by_uuid.return_value = self.mdinst
self.flags(metadata_cache_expiration=0)
hnd = handler.MetadataRequestHandler()
self._metadata_handler_with_instance_id(hnd)
self._metadata_handler_with_instance_id(hnd)
self.assertEqual(2, get_by_uuid.call_count)
def _metadata_handler_with_remote_address(self, hnd):
response = fake_request(
None, self.mdinst,
fake_get_metadata=False,
app=hnd,
relpath="/2009-04-04/user-data",
address="192.192.192.2")
self.assertEqual(200, response.status_int)
self.assertEqual(base64.b64decode(self.instance.user_data),
response.body)
@mock.patch.object(base, 'get_metadata_by_address')
def test_metadata_handler_with_remote_address(self, get_by_uuid):
# test twice to ensure that the cache works
get_by_uuid.return_value = self.mdinst
self.flags(metadata_cache_expiration=15)
hnd = handler.MetadataRequestHandler()
self._metadata_handler_with_remote_address(hnd)
self._metadata_handler_with_remote_address(hnd)
self.assertEqual(1, get_by_uuid.call_count)
@mock.patch.object(base, 'get_metadata_by_address')
def test_metadata_handler_with_remote_address_no_cache(self, get_by_uuid):
# test twice to ensure that disabling the cache works
get_by_uuid.return_value = self.mdinst
self.flags(metadata_cache_expiration=0)
hnd = handler.MetadataRequestHandler()
self._metadata_handler_with_remote_address(hnd)
self._metadata_handler_with_remote_address(hnd)
self.assertEqual(2, get_by_uuid.call_count)
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_metadata_lb_proxy(self, mock_get_client):
self.flags(service_metadata_proxy=True, group='neutron')
self.expected_instance_id = 'a-b-c-d'
# with X-Metadata-Provider
proxy_lb_id = 'edge-x'
mock_client = mock_get_client()
mock_client.list_ports.return_value = {
'ports': [{'device_id': 'a-b-c-d', 'tenant_id': 'test'}]}
mock_client.list_subnets.return_value = {
'subnets': [{'network_id': 'f-f-f-f'}]}
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Metadata-Provider': proxy_lb_id})
self.assertEqual(200, response.status_int)
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_metadata_lb_proxy_chain(self, mock_get_client):
self.flags(service_metadata_proxy=True, group='neutron')
self.expected_instance_id = 'a-b-c-d'
# with X-Metadata-Provider
proxy_lb_id = 'edge-x'
def fake_list_ports(ctx, **kwargs):
if kwargs.get('fixed_ips') == 'ip_address=192.192.192.2':
return {
'ports': [{
'device_id': 'a-b-c-d',
'tenant_id': 'test'}]}
else:
return {'ports':
[]}
mock_client = mock_get_client()
mock_client.list_ports.side_effect = fake_list_ports
mock_client.list_subnets.return_value = {
'subnets': [{'network_id': 'f-f-f-f'}]}
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="10.10.10.10",
fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2, 10.10.10.10',
'X-Metadata-Provider': proxy_lb_id})
self.assertEqual(200, response.status_int)
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_metadata_lb_proxy_signed(self, mock_get_client):
shared_secret = "testing1234"
self.flags(
metadata_proxy_shared_secret=shared_secret,
service_metadata_proxy=True, group='neutron')
self.expected_instance_id = 'a-b-c-d'
# with X-Metadata-Provider
proxy_lb_id = 'edge-x'
signature = hmac.new(
shared_secret,
proxy_lb_id,
hashlib.sha256).hexdigest()
mock_client = mock_get_client()
mock_client.list_ports.return_value = {
'ports': [{'device_id': 'a-b-c-d', 'tenant_id': 'test'}]}
mock_client.list_subnets.return_value = {
'subnets': [{'network_id': 'f-f-f-f'}]}
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Metadata-Provider': proxy_lb_id,
'X-Metadata-Provider-Signature': signature})
self.assertEqual(200, response.status_int)
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_metadata_lb_proxy_signed_fail(self, mock_get_client):
shared_secret = "testing1234"
bad_secret = "testing3468"
self.flags(
metadata_proxy_shared_secret=shared_secret,
service_metadata_proxy=True, group='neutron')
self.expected_instance_id = 'a-b-c-d'
# with X-Metadata-Provider
proxy_lb_id = 'edge-x'
signature = hmac.new(
bad_secret,
proxy_lb_id,
hashlib.sha256).hexdigest()
mock_client = mock_get_client()
mock_client.list_ports.return_value = {
'ports': [{'device_id': 'a-b-c-d', 'tenant_id': 'test'}]}
mock_client.list_subnets.return_value = {
'subnets': [{'network_id': 'f-f-f-f'}]}
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Metadata-Provider': proxy_lb_id,
'X-Metadata-Provider-Signature': signature})
self.assertEqual(403, response.status_int)
class MetadataPasswordTestCase(test.TestCase):
def setUp(self):
super(MetadataPasswordTestCase, self).setUp()
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_inst_obj(self.context)
self.flags(use_local=True, group='conductor')
self.mdinst = fake_InstanceMetadata(self.stubs, self.instance,
address=None, sgroups=None)
self.flags(use_local=True, group='conductor')
def test_get_password(self):
request = webob.Request.blank('')
self.mdinst.password = 'foo'
result = password.handle_password(request, self.mdinst)
self.assertEqual(result, 'foo')
def test_bad_method(self):
request = webob.Request.blank('')
request.method = 'PUT'
self.assertRaises(webob.exc.HTTPBadRequest,
password.handle_password, request, self.mdinst)
@mock.patch('nova.objects.Instance.get_by_uuid')
def _try_set_password(self, get_by_uuid, val='bar'):
request = webob.Request.blank('')
request.method = 'POST'
request.body = val
get_by_uuid.return_value = self.instance
with mock.patch.object(self.instance, 'save') as save:
password.handle_password(request, self.mdinst)
save.assert_called_once_with()
self.assertIn('password_0', self.instance.system_metadata)
def test_set_password(self):
self.mdinst.password = ''
self._try_set_password()
def test_conflict(self):
self.mdinst.password = 'foo'
self.assertRaises(webob.exc.HTTPConflict,
self._try_set_password)
def test_too_large(self):
self.mdinst.password = ''
self.assertRaises(webob.exc.HTTPBadRequest,
self._try_set_password,
val=('a' * (password.MAX_SIZE + 1)))
|
|
#
# (c) Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.http import request as django_request
import mock
from openstack_dashboard import api
from openstack_dashboard.api import base
from openstack_dashboard.api.rest import neutron
from openstack_dashboard.test import helpers as test
from openstack_dashboard.test.test_data import neutron_data
from openstack_dashboard.test.test_data.utils import TestData
TEST = TestData(neutron_data.data)
class NeutronNetworksTestCase(test.TestCase):
def setUp(self):
super(NeutronNetworksTestCase, self).setUp()
self._networks = [test.mock_factory(n)
for n in TEST.api_networks.list()]
@mock.patch.object(neutron.api, 'neutron')
def test_get_list_for_tenant(self, client):
request = self.mock_rest_request()
networks = self._networks
client.network_list_for_tenant.return_value = networks
response = neutron.Networks().get(request)
self.assertStatusCode(response, 200)
self.assertItemsCollectionEqual(response, TEST.api_networks.list())
client.network_list_for_tenant.assert_called_once_with(
request, request.user.tenant_id)
@mock.patch.object(neutron.api, 'neutron')
def test_create(self, client):
self._test_create(
'{"name": "mynetwork"}',
{'name': 'mynetwork'}
)
@mock.patch.object(neutron.api, 'neutron')
def test_create_with_bogus_param(self, client):
self._test_create(
'{"name": "mynetwork","bilbo":"baggins"}',
{'name': 'mynetwork'}
)
@mock.patch.object(neutron.api, 'neutron')
def _test_create(self, supplied_body, expected_call, client):
request = self.mock_rest_request(body=supplied_body)
client.network_create.return_value = self._networks[0]
response = neutron.Networks().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response['location'],
'/api/neutron/networks/'
+ str(TEST.api_networks.first().get("id")))
self.assertEqual(response.json, TEST.api_networks.first())
#
# Services
#
@test.create_stubs({api.base: ('is_service_enabled',)})
@test.create_stubs({api.neutron: ('is_extension_supported',)})
@mock.patch.object(neutron.api, 'neutron')
def test_services_get(self, client):
params = django_request.QueryDict('network_id=the_network')
request = self.mock_rest_request(GET=params)
api.base.is_service_enabled(request, 'network').AndReturn(True)
api.neutron.is_extension_supported(request, 'agent').AndReturn(True)
client.agent_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': '1'}}),
mock.Mock(**{'to_dict.return_value': {'id': '2'}})
]
self.mox.ReplayAll()
response = neutron.Services().get(request)
self.assertStatusCode(response, 200)
client.agent_list.assert_called_once_with(
request, network_id='the_network')
self.assertEqual(response.content.decode('utf-8'),
'{"items": [{"id": "1"}, {"id": "2"}]}')
@test.create_stubs({api.base: ('is_service_enabled',)})
def test_services_get_disabled(self):
request = self.mock_rest_request(
GET={"network_id": self._networks[0].id})
api.base.is_service_enabled(request, 'network').AndReturn(False)
self.mox.ReplayAll()
response = neutron.Services().get(request)
self.assertStatusCode(response, 501)
class NeutronSubnetsTestCase(test.TestCase):
def setUp(self):
super(NeutronSubnetsTestCase, self).setUp()
self._networks = [test.mock_factory(n)
for n in TEST.api_networks.list()]
self._subnets = [test.mock_factory(n)
for n in TEST.api_subnets.list()]
@mock.patch.object(neutron.api, 'neutron')
def test_get(self, client):
params = django_request.QueryDict('network_id=%s' %
self._networks[0].id)
request = self.mock_rest_request(GET=params)
client.subnet_list.return_value = [self._subnets[0]]
response = neutron.Subnets().get(request)
self.assertStatusCode(response, 200)
client.subnet_list.assert_called_once_with(
request, network_id=TEST.api_networks.first().get("id"))
@mock.patch.object(neutron.api, 'neutron')
def test_create(self, client):
request = self.mock_rest_request(
body='{"network_id": "%s",'
' "ip_version": "4",'
' "cidr": "192.168.199.0/24"}' % self._networks[0].id)
client.subnet_create.return_value = self._subnets[0]
response = neutron.Subnets().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response['location'],
'/api/neutron/subnets/' +
str(TEST.api_subnets.first().get("id")))
self.assertEqual(response.json, TEST.api_subnets.first())
class NeutronPortsTestCase(test.TestCase):
def setUp(self):
super(NeutronPortsTestCase, self).setUp()
self._networks = [test.mock_factory(n)
for n in TEST.api_networks.list()]
self._ports = [test.mock_factory(n)
for n in TEST.api_ports.list()]
@mock.patch.object(neutron.api, 'neutron')
def test_get(self, client):
params = django_request.QueryDict('network_id=%s' %
self._networks[0].id)
request = self.mock_rest_request(GET=params)
client.port_list_with_trunk_types.return_value = [self._ports[0]]
response = neutron.Ports().get(request)
self.assertStatusCode(response, 200)
client.port_list_with_trunk_types.assert_called_once_with(
request, network_id=TEST.api_networks.first().get("id"))
class NeutronTrunkTestCase(test.TestCase):
@mock.patch.object(neutron.api, 'neutron')
def test_trunk_delete(self, client):
request = self.mock_rest_request()
neutron.Trunk().delete(request, 1)
client.trunk_delete.assert_called_once_with(request, 1)
@mock.patch.object(neutron.api, 'neutron')
def test_trunk_get(self, client):
trunk_id = TEST.api_trunks.first().get("id")
request = self.mock_rest_request(GET={"trunk_id": trunk_id})
client.trunk_show.return_value = self.trunks.first()
response = neutron.Trunk().get(request, trunk_id=trunk_id)
self.assertStatusCode(response, 200)
client.trunk_show.assert_called_once_with(
request, trunk_id)
class NeutronTrunksTestCase(test.TestCase):
@mock.patch.object(neutron.api, 'neutron')
def test_trunks_get(self, client):
request = self.mock_rest_request(GET=django_request.QueryDict())
client.trunk_list.return_value = self.trunks.list()
response = neutron.Trunks().get(request)
self.assertStatusCode(response, 200)
self.assertItemsCollectionEqual(
response,
[t.to_dict() for t in self.trunks.list()])
class NeutronExtensionsTestCase(test.TestCase):
def setUp(self):
super(NeutronExtensionsTestCase, self).setUp()
self._extensions = [n for n in TEST.api_extensions.list()]
@mock.patch.object(neutron.api, 'neutron')
def test_list_extensions(self, nc):
request = self.mock_rest_request(**{'GET': {}})
nc.list_extensions.return_value = self._extensions
response = neutron.Extensions().get(request)
self.assertStatusCode(response, 200)
self.assertItemsCollectionEqual(response, TEST.api_extensions.list())
nc.list_extensions.assert_called_once_with(request)
class NeutronDefaultQuotasTestCase(test.TestCase):
@test.create_stubs({base: ('is_service_enabled',)})
@mock.patch.object(neutron.api, 'neutron')
def test_quotas_sets_defaults_get_when_service_is_enabled(self, client):
filters = {'user': {'tenant_id': 'tenant'}}
request = self.mock_rest_request(**{'GET': dict(filters)})
base.is_service_enabled(request, 'network').AndReturn(True)
client.tenant_quota_get.return_value = [
base.Quota("network", 100),
base.Quota("q2", 101)]
self.mox.ReplayAll()
response = neutron.DefaultQuotaSets().get(request)
self.assertStatusCode(response, 200)
self.assertItemsCollectionEqual(response, [
{'limit': 100, 'display_name': 'Networks', 'name': 'network'},
{'limit': 101, 'display_name': 'Q2', 'name': 'q2'}])
client.tenant_quota_get.assert_called_once_with(
request,
request.user.tenant_id)
@test.create_stubs({neutron.api.base: ('is_service_enabled',)})
@mock.patch.object(neutron.api, 'neutron')
def test_quota_sets_defaults_get_when_service_is_disabled(self, client):
filters = {'user': {'tenant_id': 'tenant'}}
request = self.mock_rest_request(**{'GET': dict(filters)})
base.is_service_enabled(request, 'network').AndReturn(False)
self.mox.ReplayAll()
response = neutron.DefaultQuotaSets().get(request)
self.assertStatusCode(response, 501)
self.assertEqual(response.content.decode('utf-8'),
'"Service Neutron is disabled."')
client.tenant_quota_get.assert_not_called()
class NeutronQuotaSetsTestCase(test.TestCase):
def setUp(self):
super(NeutronQuotaSetsTestCase, self).setUp()
quota_set = self.neutron_quotas.list()[0]
self._quota_data = {}
for quota in quota_set:
self._quota_data[quota.name] = quota.limit
@mock.patch.object(neutron, 'quotas')
@mock.patch.object(neutron.api, 'neutron')
@mock.patch.object(neutron.api, 'base')
def test_quotas_sets_patch(self, bc, nc, qc):
request = self.mock_rest_request(body='''
{"network": "5", "subnet": "5", "port": "50",
"router": "5", "floatingip": "50",
"security_group": "5", "security_group_rule": "50",
"volumes": "5", "cores": "50"}
''')
qc.get_disabled_quotas.return_value = []
qc.NEUTRON_QUOTA_FIELDS = {n for n in self._quota_data}
bc.is_service_enabled.return_value = True
nc.is_extension_supported.return_value = True
response = neutron.QuotasSets().patch(request, 'spam123')
self.assertStatusCode(response, 204)
self.assertEqual(response.content.decode('utf-8'), '')
nc.tenant_quota_update.assert_called_once_with(
request, 'spam123', network='5',
subnet='5', port='50', router='5',
floatingip='50', security_group='5',
security_group_rule='50')
@mock.patch.object(neutron, 'quotas')
@mock.patch.object(neutron.api, 'neutron')
@mock.patch.object(neutron.api, 'base')
def test_quotas_sets_patch_when_service_is_disabled(self, bc, nc, qc):
request = self.mock_rest_request(body='''
{"network": "5", "subnet": "5", "port": "50",
"router": "5", "floatingip": "50",
"security_group": "5", "security_group_rule": "50",
"volumes": "5", "cores": "50"}
''')
qc.get_disabled_quotas.return_value = []
qc.NEUTRON_QUOTA_FIELDS = {n for n in self._quota_data}
bc.is_service_enabled.return_value = False
response = neutron.QuotasSets().patch(request, 'spam123')
message = \
'"Service Neutron is disabled or quotas extension not available."'
self.assertStatusCode(response, 501)
self.assertEqual(response.content.decode('utf-8'), message)
nc.tenant_quota_update.assert_not_called()
def mock_obj_to_dict(r):
return mock.Mock(**{'to_dict.return_value': r})
def mock_factory(r):
"""mocks all the attributes as well as the to_dict """
mocked = mock_obj_to_dict(r)
mocked.configure_mock(**r)
return mocked
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
out = frappe.response
from frappe.utils import cint
import frappe.defaults
def get_sql_tables(q):
if q.find('WHERE') != -1:
tl = q.split('FROM')[1].split('WHERE')[0].split(',')
elif q.find('GROUP BY') != -1:
tl = q.split('FROM')[1].split('GROUP BY')[0].split(',')
else:
tl = q.split('FROM')[1].split('ORDER BY')[0].split(',')
return [t.strip().strip('`')[3:] for t in tl]
def get_parent_dt(dt):
pdt = ''
if frappe.db.sql('select name from `tabDocType` where istable=1 and name=%s', dt):
import frappe.model.meta
return frappe.model.meta.get_parent_dt(dt)
return pdt
def get_sql_meta(tl):
std_columns = {
'owner':('Owner', '', '', '100'),
'creation':('Created on', 'Date', '', '100'),
'modified':('Last modified on', 'Date', '', '100'),
'modified_by':('Modified By', '', '', '100')
}
meta = {}
for dt in tl:
meta[dt] = std_columns.copy()
# for table doctype, the ID is the parent id
pdt = get_parent_dt(dt)
if pdt:
meta[dt]['parent'] = ('ID', 'Link', pdt, '200')
# get the field properties from DocField
res = frappe.db.sql("select fieldname, label, fieldtype, options, width \
from tabDocField where parent=%s", dt)
for r in res:
if r[0]:
meta[dt][r[0]] = (r[1], r[2], r[3], r[4]);
# name
meta[dt]['name'] = ('ID', 'Link', dt, '200')
return meta
def add_match_conditions(q, tl):
from frappe.desk.reportview import build_match_conditions
sl = []
for dt in tl:
s = build_match_conditions(dt)
if s:
sl.append(s)
# insert the conditions
if sl:
condition_st = q.find('WHERE')!=-1 and ' AND ' or ' WHERE '
condition_end = q.find('ORDER BY')!=-1 and 'ORDER BY' or 'LIMIT'
condition_end = q.find('GROUP BY')!=-1 and 'GROUP BY' or condition_end
if q.find('ORDER BY')!=-1 or q.find('LIMIT')!=-1 or q.find('GROUP BY')!=-1: # if query continues beyond conditions
q = q.split(condition_end)
q = q[0] + condition_st + '(' + ' OR '.join(sl) + ') ' + condition_end + q[1]
else:
q = q + condition_st + '(' + ' OR '.join(sl) + ')'
return q
def guess_type(m):
"""
Returns fieldtype depending on the MySQLdb Description
"""
import MySQLdb
if m in MySQLdb.NUMBER:
return 'Currency'
elif m in MySQLdb.DATE:
return 'Date'
else:
return 'Data'
def build_description_simple():
colnames, coltypes, coloptions, colwidths = [], [], [], []
for m in frappe.db.get_description():
colnames.append(m[0])
coltypes.append(guess_type[m[0]])
coloptions.append('')
colwidths.append('100')
return colnames, coltypes, coloptions, colwidths
def build_description_standard(meta, tl):
desc = frappe.db.get_description()
colnames, coltypes, coloptions, colwidths = [], [], [], []
# merged metadata - used if we are unable to
# get both the table name and field name from
# the description - in case of joins
merged_meta = {}
for d in meta:
merged_meta.update(meta[d])
for f in desc:
fn, dt = f[0], ''
if '.' in fn:
dt, fn = fn.split('.')
if (not dt) and merged_meta.get(fn):
# no "AS" given, find type from merged description
desc = merged_meta[fn]
colnames.append(desc[0] or fn)
coltypes.append(desc[1] or '')
coloptions.append(desc[2] or '')
colwidths.append(desc[3] or '100')
elif meta.get(dt,{}).has_key(fn):
# type specified for a multi-table join
# usually from Report Builder
desc = meta[dt][fn]
colnames.append(desc[0] or fn)
coltypes.append(desc[1] or '')
coloptions.append(desc[2] or '')
colwidths.append(desc[3] or '100')
else:
# nothing found
# guess
colnames.append(fn)
coltypes.append(guess_type(f[1]))
coloptions.append('')
colwidths.append('100')
return colnames, coltypes, coloptions, colwidths
@frappe.whitelist()
def runquery(q='', ret=0, from_export=0):
import frappe.utils
formatted = cint(frappe.form_dict.get('formatted'))
# CASE A: Simple Query
# --------------------
if frappe.form_dict.get('simple_query') or frappe.form_dict.get('is_simple'):
if not q: q = frappe.form_dict.get('simple_query') or frappe.form_dict.get('query')
if q.split()[0].lower() != 'select':
raise Exception('Query must be a SELECT')
as_dict = cint(frappe.form_dict.get('as_dict'))
res = frappe.db.sql(q, as_dict = as_dict, as_list = not as_dict, formatted=formatted)
# build colnames etc from metadata
colnames, coltypes, coloptions, colwidths = [], [], [], []
# CASE B: Standard Query
# -----------------------
else:
if not q: q = frappe.form_dict.get('query')
tl = get_sql_tables(q)
meta = get_sql_meta(tl)
q = add_match_conditions(q, tl)
# replace special variables
q = q.replace('__user', frappe.session.user)
q = q.replace('__today', frappe.utils.nowdate())
res = frappe.db.sql(q, as_list=1, formatted=formatted)
colnames, coltypes, coloptions, colwidths = build_description_standard(meta, tl)
# run server script
# -----------------
style, header_html, footer_html, page_template = '', '', '', ''
out['colnames'] = colnames
out['coltypes'] = coltypes
out['coloptions'] = coloptions
out['colwidths'] = colwidths
out['header_html'] = header_html
out['footer_html'] = footer_html
out['page_template'] = page_template
if style:
out['style'] = style
# just the data - return
if ret==1:
return res
out['values'] = res
# return num of entries
qm = frappe.form_dict.get('query_max') or ''
if qm and qm.strip():
if qm.split()[0].lower() != 'select':
raise Exception('Query (Max) must be a SELECT')
if not frappe.form_dict.get('simple_query'):
qm = add_match_conditions(qm, tl)
out['n_values'] = frappe.utils.cint(frappe.db.sql(qm)[0][0])
@frappe.whitelist()
def runquery_csv():
global out
# run query
res = runquery(from_export = 1)
q = frappe.form_dict.get('query')
rep_name = frappe.form_dict.get('report_name')
if not frappe.form_dict.get('simple_query'):
# Report Name
if not rep_name:
rep_name = get_sql_tables(q)[0]
if not rep_name: rep_name = 'DataExport'
# Headings
heads = []
rows = [[rep_name], out['colnames']] + out['values']
from cStringIO import StringIO
import csv
f = StringIO()
writer = csv.writer(f)
for r in rows:
# encode only unicode type strings and not int, floats etc.
writer.writerow(map(lambda v: isinstance(v, unicode) and v.encode('utf-8') or v, r))
f.seek(0)
out['result'] = unicode(f.read(), 'utf-8')
out['type'] = 'csv'
out['doctype'] = rep_name
def add_limit_to_query(query, args):
"""
Add limit condition to query
can be used by methods called in listing to add limit condition
"""
if args.get('limit_page_length'):
query += """
limit %(limit_start)s, %(limit_page_length)s"""
import frappe.utils
args['limit_start'] = frappe.utils.cint(args.get('limit_start'))
args['limit_page_length'] = frappe.utils.cint(args.get('limit_page_length'))
return query, args
|
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
# Copyright (c) 2011, 2012 Open Networking Foundation
# Copyright (c) 2012, 2013 Big Switch Networks, Inc.
# See the file LICENSE.pyloxi which should have been included in the source distribution
# Automatically generated by LOXI from template module.py
# Do not modify
import struct
import loxi
import util
import loxi.generic_util
import sys
ofp = sys.modules['loxi.of11']
class instruction(loxi.OFObject):
subtypes = {}
def __init__(self, type=None):
if type != None:
self.type = type
else:
self.type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 0)
subclass = instruction.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = instruction()
obj.type = reader.read("!H")[0]
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
return True
def pretty_print(self, q):
q.text("instruction {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
class apply_actions(instruction):
type = 4
def __init__(self, actions=None):
if actions != None:
self.actions = actions
else:
self.actions = []
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
packed.append(loxi.generic_util.pack_list(self.actions))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = apply_actions()
_type = reader.read("!H")[0]
assert(_type == 4)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
obj.actions = loxi.generic_util.unpack_list(reader, ofp.action.action.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.actions != other.actions: return False
return True
def pretty_print(self, q):
q.text("apply_actions {")
with q.group():
with q.indent(2):
q.breakable()
q.text("actions = ");
q.pp(self.actions)
q.breakable()
q.text('}')
instruction.subtypes[4] = apply_actions
class clear_actions(instruction):
type = 5
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = clear_actions()
_type = reader.read("!H")[0]
assert(_type == 5)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("clear_actions {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
instruction.subtypes[5] = clear_actions
class experimenter(instruction):
subtypes = {}
type = 65535
def __init__(self, experimenter=None, data=None):
if experimenter != None:
self.experimenter = experimenter
else:
self.experimenter = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 4)
subclass = experimenter.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = experimenter()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.experimenter = reader.read("!L")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.experimenter != other.experimenter: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("experimenter {")
with q.group():
with q.indent(2):
q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
instruction.subtypes[65535] = experimenter
class goto_table(instruction):
type = 1
def __init__(self, table_id=None):
if table_id != None:
self.table_id = table_id
else:
self.table_id = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!B", self.table_id))
packed.append('\x00' * 3)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = goto_table()
_type = reader.read("!H")[0]
assert(_type == 1)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.table_id = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.table_id != other.table_id: return False
return True
def pretty_print(self, q):
q.text("goto_table {")
with q.group():
with q.indent(2):
q.breakable()
q.text("table_id = ");
q.text("%#x" % self.table_id)
q.breakable()
q.text('}')
instruction.subtypes[1] = goto_table
class write_actions(instruction):
type = 3
def __init__(self, actions=None):
if actions != None:
self.actions = actions
else:
self.actions = []
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
packed.append(loxi.generic_util.pack_list(self.actions))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = write_actions()
_type = reader.read("!H")[0]
assert(_type == 3)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
obj.actions = loxi.generic_util.unpack_list(reader, ofp.action.action.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.actions != other.actions: return False
return True
def pretty_print(self, q):
q.text("write_actions {")
with q.group():
with q.indent(2):
q.breakable()
q.text("actions = ");
q.pp(self.actions)
q.breakable()
q.text('}')
instruction.subtypes[3] = write_actions
class write_metadata(instruction):
type = 2
def __init__(self, metadata=None, metadata_mask=None):
if metadata != None:
self.metadata = metadata
else:
self.metadata = 0
if metadata_mask != None:
self.metadata_mask = metadata_mask
else:
self.metadata_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
packed.append(struct.pack("!Q", self.metadata))
packed.append(struct.pack("!Q", self.metadata_mask))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = write_metadata()
_type = reader.read("!H")[0]
assert(_type == 2)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
obj.metadata = reader.read("!Q")[0]
obj.metadata_mask = reader.read("!Q")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.metadata != other.metadata: return False
if self.metadata_mask != other.metadata_mask: return False
return True
def pretty_print(self, q):
q.text("write_metadata {")
with q.group():
with q.indent(2):
q.breakable()
q.text("metadata = ");
q.text("%#x" % self.metadata)
q.text(","); q.breakable()
q.text("metadata_mask = ");
q.text("%#x" % self.metadata_mask)
q.breakable()
q.text('}')
instruction.subtypes[2] = write_metadata
|
|
#!/usr/bin/python3
from pyspark.mllib.linalg import DenseVector
from pyspark.sql import Row
from collections import namedtuple
import re, csv, os, shlex
import subprocess, shutil
# global vars
Point = namedtuple('Point', ['x', 'y'])
reg = r"[^a-zA-Z| 0-9 | \']"
reg_compiled = re.compile(reg)
gfs_output_path_hdfs = "gs://topic-zoomer/results/"
recFileFolder = "/tmp/Topic_Zoomer_recomputation"
"""
Check if step makes sense wrt dimension of the area
"""
def check_step(topLeft, bottomRight, step):
return min(step, topLeft.y-bottomRight.y, bottomRight.x - topLeft.x)
"""
Get the old squares from the recomputation file
"""
def get_computed_squares():
result = []
recFileName = "/tmp/Topic_Zoomer_recomputation/recomputation.txt"
# remove previous recomputation results (if any)
shutil.rmtree(recFileFolder, ignore_errors=True)
if os.path.isfile(recFileName):
os.remove(recFileName)
copyRecFileCmd = 'hdfs dfs -copyToLocal {} /tmp'.format(recFileFolder)
copyRecFileRes = subprocess.call(shlex.split(copyRecFileCmd))
mergeFileCmd = 'cat {}/* > {}'.format(recFileFolder, recFileName)
mergeFileRes = subprocess.call(mergeFileCmd, shell=True)
if copyRecFileRes or mergeFileRes:
print('CopyRes: {}'.format(copyRecFileRes))
print('MergeRes: {}'.format(mergeFileRes))
print('Something went wrong while copying results')
return result
with open(recFileName, "r") as res:
csvRes = csv.reader(res)
for row in csvRes:
tl = Point(x=float(row[0]), y=float(row[1]))
br = Point(x=float(row[2]), y=float(row[3]))
result.append([tl, br])
return result
"""
Get all the angles' coordinates from top-left
and bottom-right corner
"""
def get_square_points(tl,br):
bl = Point(tl.x, br.y)
tr = Point(br.x, tl.y)
return [tl, tr, bl, br]
"""
Check if two squares are equal
"""
def is_equal(inputTl, inputBr, computedSquares):
return inputTl.x == computedSquares[0].x and inputTl.y == computedSquares[0].y and \
inputBr.x == computedSquares[1].x and inputBr.y == computedSquares[1].y
"""
Compute the square diff betwrrk input and
already computed ones
"""
def get_diff_squares(inputTl, inputBr, computedSquares):
oldSquares = []
output = []
inputSquare = get_square_points(inputTl, inputBr)
common = get_common_squares(inputTl, inputBr, computedSquares)
for s in computedSquares:
oldSquares.append(get_square_points(s[0], s[1]))
for oldS in oldSquares:
oldSTlBr = [oldS[0], oldS[3]]
for c in common:
if point_inside_square(inputBr.x, inputBr.y, oldSTlBr):
tlOut1 = inputTl
brOut1 = Point(inputBr.x, c[0].y)
tlOut2 = Point(inputTl.x, c[0].y)
brOut2 = Point(c[0].x, inputBr.y)
output.append([tlOut1, brOut1])
output.append([tlOut2, brOut2])
elif point_inside_square(inputTl.x, inputTl.y, oldSTlBr):
tlOut1 = Point(c[1].x, c[0].y)
brOut1 = Point(inputBr.x, c[1].y)
tlOut2 = Point(inputTl.x, c[1].y)
brOut2 = inputBr
output.append([tlOut1, brOut1])
output.append([tlOut2, brOut2])
elif point_inside_square(inputSquare[2].x, inputSquare[2].y, oldSTlBr):
tlOut1 = inputTl
brOut1 = Point(inputBr.x, c[0].y)
tlOut2 = Point(c[1].x, c[0].y)
brOut2 = inputBr
output.append([tlOut1, brOut1])
output.append([tlOut2, brOut2])
elif point_inside_square(inputSquare[1].x, inputSquare[1].y, oldSTlBr):
tlOut1 = inputTl
brOut1 = Point(c[0].c, c[1].y)
tlOut2 = Point(inputTl.x, c[1].y)
brOut2 = inputBr
output.append([tlOut1, brOut1])
output.append([tlOut2, brOut2])
else:
print("Something gone wrong in diff")
return output
"""
Get the common squares to avoid recomputation
"""
def get_common_squares(inputTl, inputBr, computedSquares):
output = []
oldSquares = []
inputSquare = get_square_points(inputTl, inputBr)
for s in computedSquares:
oldSquares.append(get_square_points(s[0], s[1]))
for oldS in oldSquares:
oldSTlBr = [oldS[0], oldS[3]]
if point_inside_square(inputBr.x, inputBr.y, oldSTlBr):
tlOut = oldS[0]
brOut = inputBr
output.append([tlOut, brOut])
elif point_inside_square(inputTl.x, inputTl.y, oldSTlBr):
tlOut = inputTl
brOut = oldS[3]
output.append([tlOut, brOut])
elif point_inside_square(inputSquare[2].x, inputSquare[2].y, oldSTlBr):
tlOut = Point(inputTl.x, oldS[0].y)
brOut = Point(oldS[3].x, inputBr.y)
output.append([tlOut, brOut])
elif point_inside_square(inputSquare[1].x, inputSquare[1].y, oldSTlBr):
tlOut = Point(oldS[0].x, inputTl.y)
brOut = Point(inputBr.x, oldS[3].y)
output.append([tlOut, brOut])
else:
print("Something gone wrong in common")
return output
"""
Returns the name of the topic from the vocabulary vector
"""
def topic_render(x, word_numbers, vocab_array):
# specify vector id of words to actual words
terms = x[0]
result = []
for i in range(word_numbers):
term = vocab_array[terms[i]]
result.append(term)
return result
"""
Remove punctuation via regex
"""
def remove_punctuation(row):
return row[0], reg_compiled.sub('', row[1])
"""
Create document corpus as LDA input should be
"""
def create_corpus(x):
return [x[0], DenseVector(x[1].toArray())]
"""
Assign the point to a specific square in the grid
"""
def is_inside(row, topLeft, bottomRight, step, squares):
x = float(row[0])
y = float(row[1])
text = row[2]
idx = 0
if topLeft.x <= x and topLeft.y >= y and \
bottomRight.x >= x and bottomRight.y <= y:
# now I'm inside the selected area
# range among all the possible little squares
for s in squares:
if point_inside_square(x, y, s):
return (idx, text)
else:
idx += 1
def split_string_into_array(row):
return row[0], row[1].lower().strip().split(" ")
def remove_empty_array(array):
return array[0], array[1][0] != ''
def create_row(array):
id = array[0]
words = array[1]
return id, Row(idd=id, words=words)
"""
Determine if a point is inside a given square or not
"""
def point_inside_square(x, y, square):
topLeft = square[0]
bottomRight = square[1]
if topLeft.x <= x and topLeft.y >= y and \
bottomRight.x >= x and bottomRight.y <= y:
return True
else:
return False
"""
Create internal grid with size S*S
"""
def get_squares(topLeft, bottomRight, step):
# every little square is defined as topLeft and
# bottomRight angles (as namedtuples)
Point = namedtuple('Point', ['x', 'y'])
out = []
yMin = topLeft.y
yMax = topLeft.y - step
while yMax >= bottomRight.y:
xMin = topLeft.x
xMax = topLeft.x + step
while xMax <= bottomRight.x:
square = [Point(xMin, yMin), Point(xMax, yMax)]
out.append(square)
# update x boundaries
xMin = xMax
xMax += step
# update y boundaries
yMin = yMax
yMax -= step
return out
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Composes one or more `LinearOperators`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorComposition"]
@tf_export("linalg.LinearOperatorComposition")
@linear_operator.make_composite_tensor
class LinearOperatorComposition(linear_operator.LinearOperator):
"""Composes one or more `LinearOperators`.
This operator composes one or more linear operators `[op1,...,opJ]`,
building a new `LinearOperator` with action defined by:
```
op_composed(x) := op1(op2(...(opJ(x)...))
```
If `opj` acts like [batch] matrix `Aj`, then `op_composed` acts like the
[batch] matrix formed with the multiplication `A1 A2...AJ`.
If `opj` has shape `batch_shape_j + [M_j, N_j]`, then we must have
`N_j = M_{j+1}`, in which case the composed operator has shape equal to
`broadcast_batch_shape + [M_1, N_J]`, where `broadcast_batch_shape` is the
mutual broadcast of `batch_shape_j`, `j = 1,...,J`, assuming the intermediate
batch shapes broadcast. Even if the composed shape is well defined, the
composed operator's methods may fail due to lack of broadcasting ability in
the defining operators' methods.
```python
# Create a 2 x 2 linear operator composed of two 2 x 2 operators.
operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
operator_2 = LinearOperatorFullMatrix([[1., 0.], [0., 1.]])
operator = LinearOperatorComposition([operator_1, operator_2])
operator.to_dense()
==> [[1., 2.]
[3., 4.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 5 linear operators.
matrix_45 = tf.random.normal(shape=[2, 3, 4, 5])
operator_45 = LinearOperatorFullMatrix(matrix)
# Create a [2, 3] batch of 5 x 6 linear operators.
matrix_56 = tf.random.normal(shape=[2, 3, 5, 6])
operator_56 = LinearOperatorFullMatrix(matrix_56)
# Compose to create a [2, 3] batch of 4 x 6 operators.
operator_46 = LinearOperatorComposition([operator_45, operator_56])
# Create a shape [2, 3, 6, 2] vector.
x = tf.random.normal(shape=[2, 3, 6, 2])
operator.matmul(x)
==> Shape [2, 3, 4, 2] Tensor
```
#### Performance
The performance of `LinearOperatorComposition` on any operation is equal to
the sum of the individual operators' operations.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operators,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize a `LinearOperatorComposition`.
`LinearOperatorComposition` is initialized with a list of operators
`[op_1,...,op_J]`. For the `matmul` method to be well defined, the
composition `op_i.matmul(op_{i+1}(x))` must be defined. Other methods have
similar constraints.
Args:
operators: Iterable of `LinearOperator` objects, each with
the same `dtype` and composable shape.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`. Default is the individual
operators names joined with `_o_`.
Raises:
TypeError: If all operators do not have the same `dtype`.
ValueError: If `operators` is empty.
"""
parameters = dict(
operators=operators,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
# Validate operators.
check_ops.assert_proper_iterable(operators)
operators = list(operators)
if not operators:
raise ValueError(
"Expected a non-empty list of operators. Found: %s" % operators)
self._operators = operators
# Validate dtype.
dtype = operators[0].dtype
for operator in operators:
if operator.dtype != dtype:
name_type = (str((o.name, o.dtype)) for o in operators)
raise TypeError(
"Expected all operators to have the same dtype. Found %s"
% " ".join(name_type))
# Auto-set and check hints.
if all(operator.is_non_singular for operator in operators):
if is_non_singular is False:
raise ValueError(
"The composition of non-singular operators is always non-singular.")
is_non_singular = True
# Initialization.
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
if name is None:
name = "_o_".join(operator.name for operator in operators)
with ops.name_scope(name, values=graph_parents):
super(LinearOperatorComposition, self).__init__(
dtype=dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
# TODO(b/143910018) Remove graph_parents in V3.
self._set_graph_parents(graph_parents)
@property
def operators(self):
return self._operators
def _shape(self):
# Get final matrix shape.
domain_dimension = self.operators[0].domain_dimension
for operator in self.operators[1:]:
domain_dimension.assert_is_compatible_with(operator.range_dimension)
domain_dimension = operator.domain_dimension
matrix_shape = tensor_shape.TensorShape(
[self.operators[0].range_dimension,
self.operators[-1].domain_dimension])
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape
for operator in self.operators[1:]:
batch_shape = common_shapes.broadcast_shape(
batch_shape, operator.batch_shape)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
# Avoid messy broadcasting if possible.
if self.shape.is_fully_defined():
return ops.convert_to_tensor(
self.shape.as_list(), dtype=dtypes.int32, name="shape")
# Don't check the matrix dimensions. That would add unnecessary Asserts to
# the graph. Things will fail at runtime naturally if shapes are
# incompatible.
matrix_shape = array_ops.stack([
self.operators[0].range_dimension_tensor(),
self.operators[-1].domain_dimension_tensor()
])
# Dummy Tensor of zeros. Will never be materialized.
zeros = array_ops.zeros(shape=self.operators[0].batch_shape_tensor())
for operator in self.operators[1:]:
zeros += array_ops.zeros(shape=operator.batch_shape_tensor())
batch_shape = array_ops.shape(zeros)
return array_ops.concat((batch_shape, matrix_shape), 0)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# If self.operators = [A, B], and not adjoint, then
# matmul_order_list = [B, A].
# As a result, we return A.matmul(B.matmul(x))
if adjoint:
matmul_order_list = self.operators
else:
matmul_order_list = list(reversed(self.operators))
result = matmul_order_list[0].matmul(
x, adjoint=adjoint, adjoint_arg=adjoint_arg)
for operator in matmul_order_list[1:]:
result = operator.matmul(result, adjoint=adjoint)
return result
def _determinant(self):
result = self.operators[0].determinant()
for operator in self.operators[1:]:
result *= operator.determinant()
return result
def _log_abs_determinant(self):
result = self.operators[0].log_abs_determinant()
for operator in self.operators[1:]:
result += operator.log_abs_determinant()
return result
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
# TODO(langmore) Implement solve using solve_ls if some intermediate
# operator maps to a high dimensional space.
# In that case, an exact solve may still be possible.
# If self.operators = [A, B], and not adjoint, then
# solve_order_list = [A, B].
# As a result, we return B.solve(A.solve(x))
if adjoint:
solve_order_list = list(reversed(self.operators))
else:
solve_order_list = self.operators
solution = solve_order_list[0].solve(
rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
for operator in solve_order_list[1:]:
solution = operator.solve(solution, adjoint=adjoint)
return solution
@property
def _composite_tensor_fields(self):
return ("operators",)
|
|
from .estimator_base import H2OEstimator
class H2ODeepLearningEstimator(H2OEstimator):
"""Build a supervised Deep Neural Network model
Builds a feed-forward multilayer artificial neural network on an H2OFrame
Parameters
----------
model_id : str, optional
The unique id assigned to the resulting model. If none is given, an id will
automatically be generated.
overwrite_with_best_model : bool
If True, overwrite the final model with the best model found during training.
Defaults to True.
checkpoint : H2ODeepLearningModel, optional
Model checkpoint (either key or H2ODeepLearningModel) to resume training with.
use_all_factor_levels : bool
Use all factor levels of categorical variance. Otherwise the first factor level
is omitted (without loss of accuracy). Useful for variable importances and
auto-enabled for autoencoder..
standardize : bool
If enabled, automatically standardize the data. If disabled, the user must
provide properly scaled input data.
activation : str
A string indicating the activation function to use.
Must be either "Tanh", "TanhWithDropout", "Rectifier", "RectifierWithDropout",
"Maxout", or "MaxoutWithDropout"
hidden : list
Hidden layer sizes (e.g. [100,100])
epochs : float
How many times the dataset should be iterated (streamed), can be fractional
train_samples_per_iteration : int
Number of training samples (globally) per MapReduce iteration.
Special values are: 0 one epoch; -1 all available data
(e.g., replicated training data); or -2 auto-tuning (default)
seed : int
Seed for random numbers (affects sampling) - Note: only reproducible when
running single threaded
adaptive_rate : bool
Adaptive learning rate (ADAELTA)
rho : float
Adaptive learning rate time decay factor (similarity to prior updates)
epsilon : float
Adaptive learning rate parameter, similar to learn rate annealing during initial
training phase. Typical values are between 1.0e-10 and 1.0e-4
rate : float
Learning rate (higher => less stable, lower => slower convergence)
rate_annealing : float
Learning rate annealing: \eqn{(rate)/(1 + rate_annealing*samples)
rate_decay : float
Learning rate decay factor between layers (N-th layer: \eqn{rate*\alpha^(N-1))
momentum_start : float
Initial momentum at the beginning of training (try 0.5)
momentum_ramp : float
Number of training samples for which momentum increases
momentum_stable : float
Final momentum after the amp is over (try 0.99)
nesterov_accelerated_gradient : bool
Logical. Use Nesterov accelerated gradient (recommended)
input_dropout_ratio : float
A fraction of the features for each training row to be omitted from training in
order to improve generalization (dimension sampling).
hidden_dropout_ratios : float
Input layer dropout ratio (can improve generalization) specify one value per
hidden layer, defaults to 0.5
l1 : float
L1 regularization (can add stability and improve generalization,
causes many weights to become 0)
l2 : float
L2 regularization (can add stability and improve generalization,
causes many weights to be small)
max_w2 : float
Constraint for squared sum of incoming weights per unit (e.g. Rectifier)
initial_weight_distribution : str
Can be "Uniform", "UniformAdaptive", or "Normal"
initial_weight_scale : str
Uniform: -value ... value, Normal: stddev
loss : str
Loss function: "Automatic", "CrossEntropy" (for classification only),
"Quadratic", "Absolute" (experimental) or "Huber" (experimental)
distribution : str
A character string. The distribution function of the response.
Must be "AUTO", "bernoulli", "multinomial", "poisson", "gamma",
"tweedie", "laplace", "huber", "quantile" or "gaussian"
quantile_alpha : float
Quantile (only for Quantile regression, must be between 0 and 1)
tweedie_power : float
Tweedie power (only for Tweedie distribution, must be between 1 and 2)
score_interval : int
Shortest time interval (in secs) between model scoring
score_training_samples : int
Number of training set samples for scoring (0 for all)
score_validation_samples : int
Number of validation set samples for scoring (0 for all)
score_duty_cycle : float
Maximum duty cycle fraction for scoring (lower: more training, higher: more
scoring)
classification_stop : float
Stopping criterion for classification error fraction on training data
(-1 to disable)
regression_stop : float
Stopping criterion for regression error (MSE) on training data (-1 to disable)
stopping_rounds : int
Early stopping based on convergence of stopping_metric.
Stop if simple moving average of length k of the stopping_metric does not
improve (by stopping_tolerance) for k=stopping_rounds scoring events.
Can only trigger after at least 2k scoring events. Use 0 to disable.
stopping_metric : str
Metric to use for convergence checking, only for _stopping_rounds > 0
Can be one of "AUTO", "deviance", "logloss", "MSE", "AUC", "r2",
"misclassification".
stopping_tolerance : float
Relative tolerance for metric-based stopping criterion (stop if relative
improvement is not at least this much)
quiet_mode : bool
Enable quiet mode for less output to standard output
max_confusion_matrix_size : int
Max. size (number of classes) for confusion matrices to be shown
max_hit_ratio_k : float
Max number (top K) of predictions to use for hit ratio computation
(for multi-class only, 0 to disable)
balance_classes : bool
Balance training data class counts via over/under-sampling (for imbalanced data)
class_sampling_factors : list
Desired over/under-sampling ratios per class (in lexicographic order).
If not specified, sampling factors will be automatically computed to obtain
class balance during training. Requires balance_classes.
max_after_balance_size : float
Maximum relative size of the training data after balancing class counts
(can be less than 1.0)
score_validation_sampling :
Method used to sample validation dataset for scoring
diagnostics : bool
Enable diagnostics for hidden layers
variable_importances : bool
Compute variable importances for input features (Gedeon method) - can be slow
for large networks)
fast_mode : bool
Enable fast mode (minor approximations in back-propagation)
ignore_const_cols : bool
Ignore constant columns (no information can be gained anyway)
force_load_balance : bool
Force extra load balancing to increase training speed for small datasets
(to keep all cores busy)
replicate_training_data : bool
Replicate the entire training dataset onto every node for faster training
single_node_mode : bool
Run on a single node for fine-tuning of model parameters
shuffle_training_data : bool
Enable shuffling of training data (recommended if training data is replicated
and train_samples_per_iteration is close to \eqn{numRows*numNodes
sparse : bool
Sparse data handling (Experimental)
col_major : bool
Use a column major weight matrix for input layer. Can speed up forward
propagation, but might slow down back propagation (Experimental)
average_activation : float
Average activation for sparse auto-encoder (Experimental)
sparsity_beta : bool
Sparsity regularization (Experimental)
max_categorical_features : int
Max. number of categorical features, enforced via hashing Experimental)
reproducible : bool
Force reproducibility on small data (will be slow - only uses 1 thread)
missing_values_handling : str
Handling of missing values. Either "Skip" or "MeanImputation".
export_weights_and_biases : bool
Whether to export Neural Network weights and biases to H2O Frames"
nfolds : int, optional
Number of folds for cross-validation. If nfolds >= 2, then validation must
remain empty.
fold_assignment : str
Cross-validation fold assignment scheme, if fold_column is not specified
Must be "AUTO", "Random" or "Modulo"
keep_cross_validation_predictions : bool
Whether to keep the predictions of the cross-validation models
keep_cross_validation_fold_assignment : bool
Whether to keep the cross-validation fold assignment.
Examples
--------
>>> import h2o as ml
>>> from h2o.estimators.deeplearning import H2ODeepLearningEstimator
>>> ml.init()
>>> rows=[[1,2,3,4,0],[2,1,2,4,1],[2,1,4,2,1],[0,1,2,34,1],[2,3,4,1,0]]*50
>>> fr = ml.H2OFrame(rows)
>>> fr[4] = fr[4].asfactor()
>>> model = H2ODeepLearningEstimator()
>>> model.train(x=range(4), y=4, training_frame=fr)
"""
def __init__(self, model_id=None, overwrite_with_best_model=None, checkpoint=None,
pretrained_autoencoder=None, use_all_factor_levels=None,
standardize=None, activation=None, hidden=None, epochs=None,
train_samples_per_iteration=None, seed=None, adaptive_rate=None,
rho=None, epsilon=None, rate=None, rate_annealing=None, rate_decay=None,
momentum_start=None, momentum_ramp=None, momentum_stable=None,
nesterov_accelerated_gradient=None, input_dropout_ratio=None,
hidden_dropout_ratios=None, l1=None, l2=None, max_w2=None,
initial_weight_distribution=None, initial_weight_scale=None, loss=None,
distribution=None, quantile_alpha=None, tweedie_power=None,
score_interval=None, score_training_samples=None,
score_validation_samples=None, score_duty_cycle=None,
classification_stop=None, regression_stop=None, quiet_mode=None,
max_confusion_matrix_size=None, max_hit_ratio_k=None, balance_classes=None,
class_sampling_factors=None, max_after_balance_size=None,
score_validation_sampling=None, diagnostics=None,
variable_importances=None, fast_mode=None, ignore_const_cols=None,
force_load_balance=None, replicate_training_data=None,
single_node_mode=None, shuffle_training_data=None, sparse=None,
col_major=None, average_activation=None, sparsity_beta=None,
max_categorical_features=None, missing_values_handling=None,
reproducible=None, export_weights_and_biases=None, nfolds=None,
fold_assignment=None, keep_cross_validation_predictions=None,
keep_cross_validation_fold_assignment=None,
stopping_rounds=None, stopping_metric=None, stopping_tolerance=None,
initial_weights=None, initial_biases=None):
super(H2ODeepLearningEstimator, self).__init__()
self._parms = locals()
self._parms = {k:v for k,v in self._parms.items() if k!="self"}
self._parms["autoencoder"] = isinstance(self, H2OAutoEncoderEstimator)
@property
def overwrite_with_best_model(self):
return self._parms["overwrite_with_best_model"]
@overwrite_with_best_model.setter
def overwrite_with_best_model(self, value):
self._parms["overwrite_with_best_model"] = value
@property
def checkpoint(self):
return self._parms["checkpoint"]
@checkpoint.setter
def checkpoint(self, value):
self._parms["checkpoint"] = value
@property
def pretrained_autoencoder(self):
return self._parms["pretrained_autoencoder"]
@pretrained_autoencoder.setter
def pretrained_autoencoder(self, value):
self._parms["pretrained_autoencoder"] = value
@property
def use_all_factor_levels(self):
return self._parms["use_all_factor_levels"]
@use_all_factor_levels.setter
def use_all_factor_levels(self, value):
self._parms["use_all_factor_levels"] = value
@property
def standardize(self):
return self._parms["standardize"]
@standardize.setter
def standardize(self, value):
self._parms["standardize"] = value
@property
def activation(self):
return self._parms["activation"]
@activation.setter
def activation(self, value):
self._parms["activation"] = value
@property
def hidden(self):
return self._parms["hidden"]
@hidden.setter
def hidden(self, value):
self._parms["hidden"] = value
@property
def epochs(self):
return self._parms["epochs"]
@epochs.setter
def epochs(self, value):
self._parms["epochs"] = value
@property
def train_samples_per_iteration(self):
return self._parms["train_samples_per_iteration"]
@train_samples_per_iteration.setter
def train_samples_per_iteration(self, value):
self._parms["train_samples_per_iteration"] = value
@property
def seed(self):
return self._parms["seed"]
@seed.setter
def seed(self, value):
self._parms["seed"] = value
@property
def adaptive_rate(self):
return self._parms["adaptive_rate"]
@adaptive_rate.setter
def adaptive_rate(self, value):
self._parms["adaptive_rate"] = value
@property
def rho(self):
return self._parms["rho"]
@rho.setter
def rho(self, value):
self._parms["rho"] = value
@property
def epsilon(self):
return self._parms["epsilon"]
@epsilon.setter
def epsilon(self, value):
self._parms["epsilon"] = value
@property
def rate(self):
return self._parms["rate"]
@rate.setter
def rate(self, value):
self._parms["rate"] = value
@property
def rate_annealing(self):
return self._parms["rate_annealing"]
@rate_annealing.setter
def rate_annealing(self, value):
self._parms["rate_annealing"] = value
@property
def rate_decay(self):
return self._parms["rate_decay"]
@rate_decay.setter
def rate_decay(self, value):
self._parms["rate_decay"] = value
@property
def momentum_start(self):
return self._parms["momentum_start"]
@momentum_start.setter
def momentum_start(self, value):
self._parms["momentum_start"] = value
@property
def momentum_ramp(self):
return self._parms["momentum_ramp"]
@momentum_ramp.setter
def momentum_ramp(self, value):
self._parms["momentum_ramp"] = value
@property
def momentum_stable(self):
return self._parms["momentum_stable"]
@momentum_stable.setter
def momentum_stable(self, value):
self._parms["momentum_stable"] = value
@property
def nesterov_accelerated_gradient(self):
return self._parms["nesterov_accelerated_gradient"]
@nesterov_accelerated_gradient.setter
def nesterov_accelerated_gradient(self, value):
self._parms["nesterov_accelerated_gradient"] = value
@property
def input_dropout_ratio(self):
return self._parms["input_dropout_ratio"]
@input_dropout_ratio.setter
def input_dropout_ratio(self, value):
self._parms["input_dropout_ratio"] = value
@property
def hidden_dropout_ratios(self):
return self._parms["hidden_dropout_ratios"]
@hidden_dropout_ratios.setter
def hidden_dropout_ratios(self, value):
self._parms["hidden_dropout_ratios"] = value
@property
def l1(self):
return self._parms["l1"]
@l1.setter
def l1(self, value):
self._parms["l1"] = value
@property
def l2(self):
return self._parms["l2"]
@l2.setter
def l2(self, value):
self._parms["l2"] = value
@property
def max_w2(self):
return self._parms["max_w2"]
@max_w2.setter
def max_w2(self, value):
self._parms["max_w2"] = value
@property
def initial_weight_distribution(self):
return self._parms["initial_weight_distribution"]
@initial_weight_distribution.setter
def initial_weight_distribution(self, value):
self._parms["initial_weight_distribution"] = value
@property
def initial_weight_scale(self):
return self._parms["initial_weight_scale"]
@initial_weight_scale.setter
def initial_weight_scale(self, value):
self._parms["initial_weight_scale"] = value
@property
def loss(self):
return self._parms["loss"]
@loss.setter
def loss(self, value):
self._parms["loss"] = value
@property
def distribution(self):
return self._parms["distribution"]
@distribution.setter
def distribution(self, value):
self._parms["distribution"] = value
@property
def quantile_alpha(self):
return self._parms["quantile_alpha"]
@quantile_alpha.setter
def quantile_alpha(self, value):
self._parms["quantile_alpha"] = value
@property
def tweedie_power(self):
return self._parms["tweedie_power"]
@tweedie_power.setter
def tweedie_power(self, value):
self._parms["tweedie_power"] = value
@property
def score_interval(self):
return self._parms["score_interval"]
@score_interval.setter
def score_interval(self, value):
self._parms["score_interval"] = value
@property
def score_training_samples(self):
return self._parms["score_training_samples"]
@score_training_samples.setter
def score_training_samples(self, value):
self._parms["score_training_samples"] = value
@property
def score_validation_samples(self):
return self._parms["score_validation_samples"]
@score_validation_samples.setter
def score_validation_samples(self, value):
self._parms["score_validation_samples"] = value
@property
def score_duty_cycle(self):
return self._parms["score_duty_cycle"]
@score_duty_cycle.setter
def score_duty_cycle(self, value):
self._parms["score_duty_cycle"] = value
@property
def classification_stop(self):
return self._parms["classification_stop"]
@classification_stop.setter
def classification_stop(self, value):
self._parms["classification_stop"] = value
@property
def regression_stop(self):
return self._parms["regression_stop"]
@regression_stop.setter
def regression_stop(self, value):
self._parms["regression_stop"] = value
@property
def stopping_rounds(self):
return self._parms["stopping_rounds"]
@stopping_rounds.setter
def stopping_rounds(self, value):
self._parms["stopping_rounds"] = value
@property
def stopping_metric(self):
return self._parms["stopping_metric"]
@stopping_metric.setter
def stopping_metric(self, value):
self._parms["stopping_metric"] = value
@property
def stopping_tolerance(self):
return self._parms["stopping_tolerance"]
@stopping_tolerance.setter
def stopping_tolerance(self, value):
self._parms["stopping_tolerance"] = value
@property
def quiet_mode(self):
return self._parms["quiet_mode"]
@quiet_mode.setter
def quiet_mode(self, value):
self._parms["quiet_mode"] = value
@property
def max_confusion_matrix_size(self):
return self._parms["max_confusion_matrix_size"]
@max_confusion_matrix_size.setter
def max_confusion_matrix_size(self, value):
self._parms["max_confusion_matrix_size"] = value
@property
def max_hit_ratio_k(self):
return self._parms["max_hit_ratio_k"]
@max_hit_ratio_k.setter
def max_hit_ratio_k(self, value):
self._parms["max_hit_ratio_k"] = value
@property
def balance_classes(self):
return self._parms["balance_classes"]
@balance_classes.setter
def balance_classes(self, value):
self._parms["balance_classes"] = value
@property
def class_sampling_factors(self):
return self._parms["class_sampling_factors"]
@class_sampling_factors.setter
def class_sampling_factors(self, value):
self._parms["class_sampling_factors"] = value
@property
def max_after_balance_size(self):
return self._parms["max_after_balance_size"]
@max_after_balance_size.setter
def max_after_balance_size(self, value):
self._parms["max_after_balance_size"] = value
@property
def score_validation_sampling(self):
return self._parms["score_validation_sampling"]
@score_validation_sampling.setter
def score_validation_sampling(self, value):
self._parms["score_validation_sampling"] = value
@property
def diagnostics(self):
return self._parms["diagnostics"]
@diagnostics.setter
def diagnostics(self, value):
self._parms["diagnostics"] = value
@property
def variable_importances(self):
return self._parms["variable_importances"]
@variable_importances.setter
def variable_importances(self, value):
self._parms["variable_importances"] = value
@property
def fast_mode(self):
return self._parms["fast_mode"]
@fast_mode.setter
def fast_mode(self, value):
self._parms["fast_mode"] = value
@property
def ignore_const_cols(self):
return self._parms["ignore_const_cols"]
@ignore_const_cols.setter
def ignore_const_cols(self, value):
self._parms["ignore_const_cols"] = value
@property
def force_load_balance(self):
return self._parms["force_load_balance"]
@force_load_balance.setter
def force_load_balance(self, value):
self._parms["force_load_balance"] = value
@property
def replicate_training_data(self):
return self._parms["replicate_training_data"]
@replicate_training_data.setter
def replicate_training_data(self, value):
self._parms["replicate_training_data"] = value
@property
def single_node_mode(self):
return self._parms["single_node_mode"]
@single_node_mode.setter
def single_node_mode(self, value):
self._parms["single_node_mode"] = value
@property
def shuffle_training_data(self):
return self._parms["shuffle_training_data"]
@shuffle_training_data.setter
def shuffle_training_data(self, value):
self._parms["shuffle_training_data"] = value
@property
def sparse(self):
return self._parms["sparse"]
@sparse.setter
def sparse(self, value):
self._parms["sparse"] = value
@property
def col_major(self):
return self._parms["col_major"]
@col_major.setter
def col_major(self, value):
self._parms["col_major"] = value
@property
def average_activation(self):
return self._parms["average_activation"]
@average_activation.setter
def average_activation(self, value):
self._parms["average_activation"] = value
@property
def sparsity_beta(self):
return self._parms["sparsity_beta"]
@sparsity_beta.setter
def sparsity_beta(self, value):
self._parms["sparsity_beta"] = value
@property
def max_categorical_features(self):
return self._parms["max_categorical_features"]
@max_categorical_features.setter
def max_categorical_features(self, value):
self._parms["max_categorical_features"] = value
@property
def missing_values_handling(self):
return self._parms["missing_values_handling"]
@missing_values_handling.setter
def missing_values_handling(self, value):
self._parms["missing_values_handling"] = value
@property
def reproducible(self):
return self._parms["reproducible"]
@reproducible.setter
def reproducible(self, value):
self._parms["reproducible"] = value
@property
def export_weights_and_biases(self):
return self._parms["export_weights_and_biases"]
@export_weights_and_biases.setter
def export_weights_and_biases(self, value):
self._parms["export_weights_and_biases"] = value
@property
def nfolds(self):
return self._parms["nfolds"]
@nfolds.setter
def nfolds(self, value):
self._parms["nfolds"] = value
@property
def fold_assignment(self):
return self._parms["fold_assignment"]
@fold_assignment.setter
def fold_assignment(self, value):
self._parms["fold_assignment"] = value
@property
def keep_cross_validation_predictions(self):
return self._parms["keep_cross_validation_predictions"]
@keep_cross_validation_predictions.setter
def keep_cross_validation_predictions(self, value):
self._parms["keep_cross_validation_predictions"] = value
@property
def keep_cross_validation_fold_assignment(self):
return self._parms["keep_cross_validation_fold_assignment"]
@keep_cross_validation_fold_assignment.setter
def keep_cross_validation_fold_assignment(self, value):
self._parms["keep_cross_validation_fold_assignment"] = value
@property
def initial_weights(self):
return self._parms["initial_weights"]
@initial_weights.setter
def initial_weights(self, value):
self._parms["initial_weights"] = value
@property
def initial_biases(self):
return self._parms["initial_biases"]
@initial_biases.setter
def initial_biases(self, value):
self._parms["initial_biases"] = value
class H2OAutoEncoderEstimator(H2ODeepLearningEstimator):
"""
Examples
--------
>>> import h2o as ml
>>> from h2o.estimators.deeplearning import H2OAutoEncoderEstimator
>>> ml.init()
>>> rows=[[1,2,3,4,0]*50,[2,1,2,4,1]*50,[2,1,4,2,1]*50,[0,1,2,34,1]*50,[2,3,4,1,0]*50]
>>> fr = ml.H2OFrame(rows)
>>> fr[4] = fr[4].asfactor()
>>> model = H2OAutoEncoderEstimator()
>>> model.train(x=range(4), training_frame=fr)
"""
pass
|
|
#!/usr/bin/python
#
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Loops Custom Tabs tests and outputs the results into a CSV file."""
import collections
import contextlib
import logging
import optparse
import os
import random
import re
import sys
import time
_SRC_PATH = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir))
sys.path.append(os.path.join(_SRC_PATH, 'third_party', 'catapult', 'devil'))
from devil.android import device_errors
from devil.android import device_utils
from devil.android import flag_changer
from devil.android.perf import cache_control
from devil.android.sdk import intent
sys.path.append(os.path.join(_SRC_PATH, 'build', 'android'))
import devil_chromium
import chrome_setup
# Local build of Chrome (not Chromium).
_CHROME_PACKAGE = 'com.google.android.apps.chrome'
_COMMAND_LINE_FILE = 'chrome-command-line'
_TEST_APP_PACKAGE_NAME = 'org.chromium.customtabs.test'
_INVALID_VALUE = -1
def RunOnce(device, url, speculated_url, parallel_url, warmup,
skip_launcher_activity, speculation_mode, delay_to_may_launch_url,
delay_to_launch_url, cold, pinning_benchmark, pin_filename,
pin_offset, pin_length, extra_brief_memory_mb, chrome_args,
reset_chrome_state):
"""Runs a test on a device once.
Args:
device: (DeviceUtils) device to run the tests on.
url: (str) URL to load. End of the redirect chain when using a
parallel request.
speculated_url: (str) Speculated URL.
parallel_url: ([str]) URL to load in parallel, typically
the start of the redirect chain.
warmup: (bool) Whether to call warmup.
skip_launcher_activity: (bool) Whether to skip the launcher activity.
speculation_mode: (str) Speculation Mode.
delay_to_may_launch_url: (int) Delay to mayLaunchUrl() in ms.
delay_to_launch_url: (int) Delay to launchUrl() in ms.
cold: (bool) Whether the page cache should be dropped.
pinning_benchmark: (bool) Whether to perform the 'pinning benchmark'.
pin_filename: (str) The file to pin on the device.
pin_offset: (int) Start offset of the range to pin.
pin_length: (int) Number of bytes to pin.
extra_brief_memory_mb: (int) Number of MiB to consume before starting
Chrome. Applies only to the 'pinning benchmark' scenario.
chrome_args: ([str]) List of arguments to pass to Chrome.
reset_chrome_state: (bool) Whether to reset the Chrome local state before
the run.
Returns:
The output line (str), like this (one line only):
<warmup>,<prerender_mode>,<delay_to_may_launch_url>,<delay_to_launch>,
<intent_sent_ms>,<page_load_started_ms>,<page_load_finished_ms>,
<first_contentful_paint>
or None on error.
"""
if not device.HasRoot():
device.EnableRoot()
timeout_s = 64
logcat_timeout = int(timeout_s + delay_to_may_launch_url / 1000.
+ delay_to_launch_url / 1000.);
with flag_changer.CustomCommandLineFlags(
device, _COMMAND_LINE_FILE, chrome_args):
launch_intent = intent.Intent(
action='android.intent.action.MAIN',
package=_TEST_APP_PACKAGE_NAME,
activity='org.chromium.customtabs.test.MainActivity',
extras={'url': str(url),
'speculated_url': str(speculated_url),
'parallel_url': str (parallel_url),
'warmup': warmup,
'skip_launcher_activity': skip_launcher_activity,
'speculation_mode': str(speculation_mode),
'delay_to_may_launch_url': delay_to_may_launch_url,
'delay_to_launch_url': delay_to_launch_url,
'pinning_benchmark': pinning_benchmark,
'pin_filename': str(pin_filename),
'pin_offset': pin_offset,
'pin_length': pin_length,
'extra_brief_memory_mb': extra_brief_memory_mb,
'timeout': timeout_s})
result_line_re = re.compile(r'CUSTOMTABSBENCHCSV.*: (.*)')
logcat_monitor = device.GetLogcatMonitor(clear=True)
logcat_monitor.Start()
device.ForceStop(_CHROME_PACKAGE)
device.ForceStop(_TEST_APP_PACKAGE_NAME)
if reset_chrome_state:
chrome_setup.ResetChromeLocalState(device, _CHROME_PACKAGE)
if cold:
cache_control.CacheControl(device).DropRamCaches()
device.StartActivity(launch_intent, blocking=True)
match = None
try:
match = logcat_monitor.WaitFor(result_line_re, timeout=logcat_timeout)
except device_errors.CommandTimeoutError as _:
logging.warning('Timeout waiting for the result line')
logcat_monitor.Stop()
logcat_monitor.Close()
return match.group(1) if match is not None else None
RESULT_FIELDS = ('warmup', 'skip_launcher_activity', 'speculation_mode',
'delay_to_may_launch_url', 'delay_to_launch_url', 'commit',
'plt', 'first_contentful_paint')
Result = collections.namedtuple('Result', RESULT_FIELDS)
def ParseResult(result_line):
"""Parses a result line, and returns it.
Args:
result_line: (str) A result line, as returned by RunOnce().
Returns:
An instance of Result.
"""
tokens = result_line.strip().split(',')
assert len(tokens) == 9
intent_sent_timestamp = int(tokens[5])
return Result(int(tokens[0]), int(tokens[1]), tokens[2], int(tokens[3]),
int(tokens[4]),
max(_INVALID_VALUE, int(tokens[6]) - intent_sent_timestamp),
max(_INVALID_VALUE, int(tokens[7]) - intent_sent_timestamp),
max(_INVALID_VALUE, int(tokens[8]) - intent_sent_timestamp))
def LoopOnDevice(device, configs, output_filename, once=False,
should_stop=None):
"""Loops the tests on a device.
Args:
device: (DeviceUtils) device to run the tests on.
configs: ([dict])
output_filename: (str) Output filename. '-' for stdout.
once: (bool) Run only once.
should_stop: (threading.Event or None) When the event is set, stop looping.
"""
to_stdout = output_filename == '-'
out = sys.stdout if to_stdout else open(output_filename, 'a')
try:
while should_stop is None or not should_stop.is_set():
config = configs[random.randint(0, len(configs) - 1)]
chrome_args = chrome_setup.CHROME_ARGS
if config['speculation_mode'] == 'no_state_prefetch':
# NoStatePrefetch is enabled through an experiment.
chrome_args.extend([
'--force-fieldtrials=trial/group',
'--force-fieldtrial-params=trial.group:mode/no_state_prefetch',
'--enable-features=NoStatePrefetch<trial'])
elif config['speculation_mode'] == 'speculative_prefetch':
# Speculative Prefetch is enabled through an experiment.
chrome_args.extend([
'--force-fieldtrials=trial/group',
'--force-fieldtrial-params=trial.group:mode/external-prefetching',
'--enable-features=SpeculativeResourcePrefetching<trial'])
result = RunOnce(device,
config['url'],
config.get('speculated_url', config['url']),
config.get('parallel_url', ''),
config['warmup'], config['skip_launcher_activity'],
config['speculation_mode'],
config['delay_to_may_launch_url'],
config['delay_to_launch_url'], config['cold'],
config.get('pinning_benchmark', False),
config.get('pin_filename', ''),
config.get('pin_offset', -1),
config.get('pin_length', -1),
config.get('extra_brief_memory_mb', 0),
chrome_args, reset_chrome_state=True)
if result is not None:
out.write(result + '\n')
out.flush()
if once:
return
if should_stop is not None:
should_stop.wait(10.)
else:
time.sleep(10)
finally:
if not to_stdout:
out.close()
def ProcessOutput(filename):
"""Reads an output file, and returns a processed numpy array.
Args:
filename: (str) file to process.
Returns:
A numpy structured array.
"""
import numpy as np
entries = []
with open(filename, 'r') as f:
lines = f.readlines()
entries = [ParseResult(line) for line in lines]
result = np.array(entries,
dtype=[('warmup', np.int32),
('skip_launcher_activity', np.int32),
('speculation_mode', str),
('delay_to_may_launch_url', np.int32),
('delay_to_launch_url', np.int32),
('commit', np.int32), ('plt', np.int32),
('first_contentful_paint', np.int32)])
return result
def _CreateOptionParser():
parser = optparse.OptionParser(description='Loops Custom Tabs tests on a '
'device, and outputs the navigation timings '
'in a CSV file.')
parser.add_option('--device', help='Device ID')
parser.add_option('--speculated_url',
help='URL to call mayLaunchUrl() with.',)
parser.add_option('--url', help='URL to navigate to.',
default='https://www.android.com')
parser.add_option('--parallel_url', help='URL to navigate to.in parallel, '
'e.g. the start of the redirect chain.')
parser.add_option('--warmup', help='Call warmup.', default=False,
action='store_true')
parser.add_option('--skip_launcher_activity',
help='Skip ChromeLauncherActivity.', default=False,
action='store_true')
parser.add_option('--speculation_mode', default='prerender',
help='The speculation mode (prerender, '
'speculative_prefetch or no_state_prefetch).',
choices=['disabled', 'prerender', 'hidden_tab'])
parser.add_option('--delay_to_may_launch_url',
help='Delay before calling mayLaunchUrl() in ms.',
type='int', default=1000)
parser.add_option('--delay_to_launch_url',
help='Delay before calling launchUrl() in ms.',
type='int', default=-1)
parser.add_option('--cold', help='Purge the page cache before each run.',
default=False, action='store_true')
parser.add_option('--output_file', help='Output file (append). "-" for '
'stdout (this is the default)', default='-')
parser.add_option('--once', help='Run only one iteration.',
action='store_true', default=False)
parser.add_option('--pinning_benchmark',
help='Compare startup with/without a preliminary step '
'that pins a range of bytes in the APK into memory with '
'mlock(2).', default=False, action='store_true')
parser.add_option('--extra_brief_memory_mb', help='How much memory to '
'consume in foreground for --pinning_benchmark.',
type='int', default=0)
parser.add_option('--pin_filename', help='The file name on the device to pin '
'to memory.', default='')
parser.add_option('--pin_offset', help='The start offset of the range to be '
'pinned to memory.',
type='int', default=-1)
parser.add_option('--pin_length', help='The length of the range being pinned,'
' where 0 results in no pinning.',
type='int', default=-1)
return parser
def main():
parser = _CreateOptionParser()
options, _ = parser.parse_args()
devil_chromium.Initialize()
devices = device_utils.DeviceUtils.HealthyDevices()
device = devices[0]
if len(devices) != 1 and options.device is None:
logging.error('Several devices attached, must specify one with --device.')
sys.exit(0)
if options.device is not None:
matching_devices = [d for d in devices if str(d) == options.device]
if not matching_devices:
logging.error('Device not found.')
sys.exit(0)
device = matching_devices[0]
config = {
'url': options.url,
'skip_launcher_activity': options.skip_launcher_activity,
'speculated_url': options.speculated_url or options.url,
'parallel_url': options.parallel_url,
'warmup': options.warmup,
'speculation_mode': options.speculation_mode,
'delay_to_may_launch_url': options.delay_to_may_launch_url,
'delay_to_launch_url': options.delay_to_launch_url,
'cold': options.cold,
'pinning_benchmark': options.pinning_benchmark,
'pin_filename': options.pin_filename,
'pin_offset': options.pin_offset,
'pin_length': options.pin_length,
'extra_brief_memory_mb': options.extra_brief_memory_mb,
}
LoopOnDevice(device, [config], options.output_file, once=options.once)
if __name__ == '__main__':
main()
|
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains well known classes.
This files defines well known classes which need extra maintenance including:
- Any
- Duration
- FieldMask
- Struct
- Timestamp
"""
__author__ = '[email protected] (Jie Luo)'
from datetime import datetime
from datetime import timedelta
import six
from google.protobuf.descriptor import FieldDescriptor
_TIMESTAMPFOMAT = '%Y-%m-%dT%H:%M:%S'
_NANOS_PER_SECOND = 1000000000
_NANOS_PER_MILLISECOND = 1000000
_NANOS_PER_MICROSECOND = 1000
_MILLIS_PER_SECOND = 1000
_MICROS_PER_SECOND = 1000000
_SECONDS_PER_DAY = 24 * 3600
_DURATION_SECONDS_MAX = 315576000000
class Error(Exception):
"""Top-level module error."""
class ParseError(Error):
"""Thrown in case of parsing error."""
class Any(object):
"""Class for Any Message type."""
def Pack(self, msg, type_url_prefix='type.googleapis.com/'):
"""Packs the specified message into current Any message."""
if len(type_url_prefix) < 1 or type_url_prefix[-1] != '/':
self.type_url = '%s/%s' % (type_url_prefix, msg.DESCRIPTOR.full_name)
else:
self.type_url = '%s%s' % (type_url_prefix, msg.DESCRIPTOR.full_name)
self.value = msg.SerializeToString()
def Unpack(self, msg):
"""Unpacks the current Any message into specified message."""
descriptor = msg.DESCRIPTOR
if not self.Is(descriptor):
return False
msg.ParseFromString(self.value)
return True
def TypeName(self):
"""Returns the protobuf type name of the inner message."""
# Only last part is to be used: b/25630112
return self.type_url.split('/')[-1]
def Is(self, descriptor):
"""Checks if this Any represents the given protobuf type."""
return self.TypeName() == descriptor.full_name
class Timestamp(object):
"""Class for Timestamp message type."""
def ToJsonString(self):
"""Converts Timestamp to RFC 3339 date string format.
Returns:
A string converted from timestamp. The string is always Z-normalized
and uses 3, 6 or 9 fractional digits as required to represent the
exact time. Example of the return format: '1972-01-01T10:00:20.021Z'
"""
nanos = self.nanos % _NANOS_PER_SECOND
total_sec = self.seconds + (self.nanos - nanos) // _NANOS_PER_SECOND
seconds = total_sec % _SECONDS_PER_DAY
days = (total_sec - seconds) // _SECONDS_PER_DAY
dt = datetime(1970, 1, 1) + timedelta(days, seconds)
result = dt.isoformat()
if (nanos % 1e9) == 0:
# If there are 0 fractional digits, the fractional
# point '.' should be omitted when serializing.
return result + 'Z'
if (nanos % 1e6) == 0:
# Serialize 3 fractional digits.
return result + '.%03dZ' % (nanos / 1e6)
if (nanos % 1e3) == 0:
# Serialize 6 fractional digits.
return result + '.%06dZ' % (nanos / 1e3)
# Serialize 9 fractional digits.
return result + '.%09dZ' % nanos
def FromJsonString(self, value):
"""Parse a RFC 3339 date string format to Timestamp.
Args:
value: A date string. Any fractional digits (or none) and any offset are
accepted as long as they fit into nano-seconds precision.
Example of accepted format: '1972-01-01T10:00:20.021-05:00'
Raises:
ParseError: On parsing problems.
"""
timezone_offset = value.find('Z')
if timezone_offset == -1:
timezone_offset = value.find('+')
if timezone_offset == -1:
timezone_offset = value.rfind('-')
if timezone_offset == -1:
raise ParseError(
'Failed to parse timestamp: missing valid timezone offset.')
time_value = value[0:timezone_offset]
# Parse datetime and nanos.
point_position = time_value.find('.')
if point_position == -1:
second_value = time_value
nano_value = ''
else:
second_value = time_value[:point_position]
nano_value = time_value[point_position + 1:]
date_object = datetime.strptime(second_value, _TIMESTAMPFOMAT)
td = date_object - datetime(1970, 1, 1)
seconds = td.seconds + td.days * _SECONDS_PER_DAY
if len(nano_value) > 9:
raise ParseError(
'Failed to parse Timestamp: nanos {0} more than '
'9 fractional digits.'.format(nano_value))
if nano_value:
nanos = round(float('0.' + nano_value) * 1e9)
else:
nanos = 0
# Parse timezone offsets.
if value[timezone_offset] == 'Z':
if len(value) != timezone_offset + 1:
raise ParseError('Failed to parse timestamp: invalid trailing'
' data {0}.'.format(value))
else:
timezone = value[timezone_offset:]
pos = timezone.find(':')
if pos == -1:
raise ParseError(
'Invalid timezone offset value: {0}.'.format(timezone))
if timezone[0] == '+':
seconds -= (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60
else:
seconds += (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60
# Set seconds and nanos
self.seconds = int(seconds)
self.nanos = int(nanos)
def GetCurrentTime(self):
"""Get the current UTC into Timestamp."""
self.FromDatetime(datetime.utcnow())
def ToNanoseconds(self):
"""Converts Timestamp to nanoseconds since epoch."""
return self.seconds * _NANOS_PER_SECOND + self.nanos
def ToMicroseconds(self):
"""Converts Timestamp to microseconds since epoch."""
return (self.seconds * _MICROS_PER_SECOND +
self.nanos // _NANOS_PER_MICROSECOND)
def ToMilliseconds(self):
"""Converts Timestamp to milliseconds since epoch."""
return (self.seconds * _MILLIS_PER_SECOND +
self.nanos // _NANOS_PER_MILLISECOND)
def ToSeconds(self):
"""Converts Timestamp to seconds since epoch."""
return self.seconds
def FromNanoseconds(self, nanos):
"""Converts nanoseconds since epoch to Timestamp."""
self.seconds = nanos // _NANOS_PER_SECOND
self.nanos = nanos % _NANOS_PER_SECOND
def FromMicroseconds(self, micros):
"""Converts microseconds since epoch to Timestamp."""
self.seconds = micros // _MICROS_PER_SECOND
self.nanos = (micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND
def FromMilliseconds(self, millis):
"""Converts milliseconds since epoch to Timestamp."""
self.seconds = millis // _MILLIS_PER_SECOND
self.nanos = (millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND
def FromSeconds(self, seconds):
"""Converts seconds since epoch to Timestamp."""
self.seconds = seconds
self.nanos = 0
def ToDatetime(self):
"""Converts Timestamp to datetime."""
return datetime.utcfromtimestamp(
self.seconds + self.nanos / float(_NANOS_PER_SECOND))
def FromDatetime(self, dt):
"""Converts datetime to Timestamp."""
td = dt - datetime(1970, 1, 1)
self.seconds = td.seconds + td.days * _SECONDS_PER_DAY
self.nanos = td.microseconds * _NANOS_PER_MICROSECOND
class Duration(object):
"""Class for Duration message type."""
def ToJsonString(self):
"""Converts Duration to string format.
Returns:
A string converted from self. The string format will contains
3, 6, or 9 fractional digits depending on the precision required to
represent the exact Duration value. For example: "1s", "1.010s",
"1.000000100s", "-3.100s"
"""
_CheckDurationValid(self.seconds, self.nanos)
if self.seconds < 0 or self.nanos < 0:
result = '-'
seconds = - self.seconds + int((0 - self.nanos) // 1e9)
nanos = (0 - self.nanos) % 1e9
else:
result = ''
seconds = self.seconds + int(self.nanos // 1e9)
nanos = self.nanos % 1e9
result += '%d' % seconds
if (nanos % 1e9) == 0:
# If there are 0 fractional digits, the fractional
# point '.' should be omitted when serializing.
return result + 's'
if (nanos % 1e6) == 0:
# Serialize 3 fractional digits.
return result + '.%03ds' % (nanos / 1e6)
if (nanos % 1e3) == 0:
# Serialize 6 fractional digits.
return result + '.%06ds' % (nanos / 1e3)
# Serialize 9 fractional digits.
return result + '.%09ds' % nanos
def FromJsonString(self, value):
"""Converts a string to Duration.
Args:
value: A string to be converted. The string must end with 's'. Any
fractional digits (or none) are accepted as long as they fit into
precision. For example: "1s", "1.01s", "1.0000001s", "-3.100s
Raises:
ParseError: On parsing problems.
"""
if len(value) < 1 or value[-1] != 's':
raise ParseError(
'Duration must end with letter "s": {0}.'.format(value))
try:
pos = value.find('.')
if pos == -1:
seconds = int(value[:-1])
nanos = 0
else:
seconds = int(value[:pos])
if value[0] == '-':
nanos = int(round(float('-0{0}'.format(value[pos: -1])) *1e9))
else:
nanos = int(round(float('0{0}'.format(value[pos: -1])) *1e9))
_CheckDurationValid(seconds, nanos)
self.seconds = seconds
self.nanos = nanos
except ValueError:
raise ParseError(
'Couldn\'t parse duration: {0}.'.format(value))
def ToNanoseconds(self):
"""Converts a Duration to nanoseconds."""
return self.seconds * _NANOS_PER_SECOND + self.nanos
def ToMicroseconds(self):
"""Converts a Duration to microseconds."""
micros = _RoundTowardZero(self.nanos, _NANOS_PER_MICROSECOND)
return self.seconds * _MICROS_PER_SECOND + micros
def ToMilliseconds(self):
"""Converts a Duration to milliseconds."""
millis = _RoundTowardZero(self.nanos, _NANOS_PER_MILLISECOND)
return self.seconds * _MILLIS_PER_SECOND + millis
def ToSeconds(self):
"""Converts a Duration to seconds."""
return self.seconds
def FromNanoseconds(self, nanos):
"""Converts nanoseconds to Duration."""
self._NormalizeDuration(nanos // _NANOS_PER_SECOND,
nanos % _NANOS_PER_SECOND)
def FromMicroseconds(self, micros):
"""Converts microseconds to Duration."""
self._NormalizeDuration(
micros // _MICROS_PER_SECOND,
(micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND)
def FromMilliseconds(self, millis):
"""Converts milliseconds to Duration."""
self._NormalizeDuration(
millis // _MILLIS_PER_SECOND,
(millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND)
def FromSeconds(self, seconds):
"""Converts seconds to Duration."""
self.seconds = seconds
self.nanos = 0
def ToTimedelta(self):
"""Converts Duration to timedelta."""
return timedelta(
seconds=self.seconds, microseconds=_RoundTowardZero(
self.nanos, _NANOS_PER_MICROSECOND))
def FromTimedelta(self, td):
"""Convertd timedelta to Duration."""
self._NormalizeDuration(td.seconds + td.days * _SECONDS_PER_DAY,
td.microseconds * _NANOS_PER_MICROSECOND)
def _NormalizeDuration(self, seconds, nanos):
"""Set Duration by seconds and nonas."""
# Force nanos to be negative if the duration is negative.
if seconds < 0 and nanos > 0:
seconds += 1
nanos -= _NANOS_PER_SECOND
self.seconds = seconds
self.nanos = nanos
def _CheckDurationValid(seconds, nanos):
if seconds < -_DURATION_SECONDS_MAX or seconds > _DURATION_SECONDS_MAX:
raise Error(
'Duration is not valid: Seconds {0} must be in range '
'[-315576000000, 315576000000].'.format(seconds))
if nanos <= -_NANOS_PER_SECOND or nanos >= _NANOS_PER_SECOND:
raise Error(
'Duration is not valid: Nanos {0} must be in range '
'[-999999999, 999999999].'.format(nanos))
def _RoundTowardZero(value, divider):
"""Truncates the remainder part after division."""
# For some languanges, the sign of the remainder is implementation
# dependent if any of the operands is negative. Here we enforce
# "rounded toward zero" semantics. For example, for (-5) / 2 an
# implementation may give -3 as the result with the remainder being
# 1. This function ensures we always return -2 (closer to zero).
result = value // divider
remainder = value % divider
if result < 0 and remainder > 0:
return result + 1
else:
return result
class FieldMask(object):
"""Class for FieldMask message type."""
def ToJsonString(self):
"""Converts FieldMask to string according to proto3 JSON spec."""
camelcase_paths = []
for path in self.paths:
camelcase_paths.append(_SnakeCaseToCamelCase(path))
return ','.join(camelcase_paths)
def FromJsonString(self, value):
"""Converts string to FieldMask according to proto3 JSON spec."""
self.Clear()
for path in value.split(','):
self.paths.append(_CamelCaseToSnakeCase(path))
def IsValidForDescriptor(self, message_descriptor):
"""Checks whether the FieldMask is valid for Message Descriptor."""
for path in self.paths:
if not _IsValidPath(message_descriptor, path):
return False
return True
def AllFieldsFromDescriptor(self, message_descriptor):
"""Gets all direct fields of Message Descriptor to FieldMask."""
self.Clear()
for field in message_descriptor.fields:
self.paths.append(field.name)
def CanonicalFormFromMask(self, mask):
"""Converts a FieldMask to the canonical form.
Removes paths that are covered by another path. For example,
"foo.bar" is covered by "foo" and will be removed if "foo"
is also in the FieldMask. Then sorts all paths in alphabetical order.
Args:
mask: The original FieldMask to be converted.
"""
tree = _FieldMaskTree(mask)
tree.ToFieldMask(self)
def Union(self, mask1, mask2):
"""Merges mask1 and mask2 into this FieldMask."""
_CheckFieldMaskMessage(mask1)
_CheckFieldMaskMessage(mask2)
tree = _FieldMaskTree(mask1)
tree.MergeFromFieldMask(mask2)
tree.ToFieldMask(self)
def Intersect(self, mask1, mask2):
"""Intersects mask1 and mask2 into this FieldMask."""
_CheckFieldMaskMessage(mask1)
_CheckFieldMaskMessage(mask2)
tree = _FieldMaskTree(mask1)
intersection = _FieldMaskTree()
for path in mask2.paths:
tree.IntersectPath(path, intersection)
intersection.ToFieldMask(self)
def MergeMessage(
self, source, destination,
replace_message_field=False, replace_repeated_field=False):
"""Merges fields specified in FieldMask from source to destination.
Args:
source: Source message.
destination: The destination message to be merged into.
replace_message_field: Replace message field if True. Merge message
field if False.
replace_repeated_field: Replace repeated field if True. Append
elements of repeated field if False.
"""
tree = _FieldMaskTree(self)
tree.MergeMessage(
source, destination, replace_message_field, replace_repeated_field)
def _IsValidPath(message_descriptor, path):
"""Checks whether the path is valid for Message Descriptor."""
parts = path.split('.')
last = parts.pop()
for name in parts:
field = message_descriptor.fields_by_name[name]
if (field is None or
field.label == FieldDescriptor.LABEL_REPEATED or
field.type != FieldDescriptor.TYPE_MESSAGE):
return False
message_descriptor = field.message_type
return last in message_descriptor.fields_by_name
def _CheckFieldMaskMessage(message):
"""Raises ValueError if message is not a FieldMask."""
message_descriptor = message.DESCRIPTOR
if (message_descriptor.name != 'FieldMask' or
message_descriptor.file.name != 'google/protobuf/field_mask.proto'):
raise ValueError('Message {0} is not a FieldMask.'.format(
message_descriptor.full_name))
def _SnakeCaseToCamelCase(path_name):
"""Converts a path name from snake_case to camelCase."""
result = []
after_underscore = False
for c in path_name:
if c.isupper():
raise Error('Fail to print FieldMask to Json string: Path name '
'{0} must not contain uppercase letters.'.format(path_name))
if after_underscore:
if c.islower():
result.append(c.upper())
after_underscore = False
else:
raise Error('Fail to print FieldMask to Json string: The '
'character after a "_" must be a lowercase letter '
'in path name {0}.'.format(path_name))
elif c == '_':
after_underscore = True
else:
result += c
if after_underscore:
raise Error('Fail to print FieldMask to Json string: Trailing "_" '
'in path name {0}.'.format(path_name))
return ''.join(result)
def _CamelCaseToSnakeCase(path_name):
"""Converts a field name from camelCase to snake_case."""
result = []
for c in path_name:
if c == '_':
raise ParseError('Fail to parse FieldMask: Path name '
'{0} must not contain "_"s.'.format(path_name))
if c.isupper():
result += '_'
result += c.lower()
else:
result += c
return ''.join(result)
class _FieldMaskTree(object):
"""Represents a FieldMask in a tree structure.
For example, given a FieldMask "foo.bar,foo.baz,bar.baz",
the FieldMaskTree will be:
[_root] -+- foo -+- bar
| |
| +- baz
|
+- bar --- baz
In the tree, each leaf node represents a field path.
"""
def __init__(self, field_mask=None):
"""Initializes the tree by FieldMask."""
self._root = {}
if field_mask:
self.MergeFromFieldMask(field_mask)
def MergeFromFieldMask(self, field_mask):
"""Merges a FieldMask to the tree."""
for path in field_mask.paths:
self.AddPath(path)
def AddPath(self, path):
"""Adds a field path into the tree.
If the field path to add is a sub-path of an existing field path
in the tree (i.e., a leaf node), it means the tree already matches
the given path so nothing will be added to the tree. If the path
matches an existing non-leaf node in the tree, that non-leaf node
will be turned into a leaf node with all its children removed because
the path matches all the node's children. Otherwise, a new path will
be added.
Args:
path: The field path to add.
"""
node = self._root
for name in path.split('.'):
if name not in node:
node[name] = {}
elif not node[name]:
# Pre-existing empty node implies we already have this entire tree.
return
node = node[name]
# Remove any sub-trees we might have had.
node.clear()
def ToFieldMask(self, field_mask):
"""Converts the tree to a FieldMask."""
field_mask.Clear()
_AddFieldPaths(self._root, '', field_mask)
def IntersectPath(self, path, intersection):
"""Calculates the intersection part of a field path with this tree.
Args:
path: The field path to calculates.
intersection: The out tree to record the intersection part.
"""
node = self._root
for name in path.split('.'):
if name not in node:
return
elif not node[name]:
intersection.AddPath(path)
return
node = node[name]
intersection.AddLeafNodes(path, node)
def AddLeafNodes(self, prefix, node):
"""Adds leaf nodes begin with prefix to this tree."""
if not node:
self.AddPath(prefix)
for name in node:
child_path = prefix + '.' + name
self.AddLeafNodes(child_path, node[name])
def MergeMessage(
self, source, destination,
replace_message, replace_repeated):
"""Merge all fields specified by this tree from source to destination."""
_MergeMessage(
self._root, source, destination, replace_message, replace_repeated)
def _StrConvert(value):
"""Converts value to str if it is not."""
# This file is imported by c extension and some methods like ClearField
# requires string for the field name. py2/py3 has different text
# type and may use unicode.
if not isinstance(value, str):
return value.encode('utf-8')
return value
def _MergeMessage(
node, source, destination, replace_message, replace_repeated):
"""Merge all fields specified by a sub-tree from source to destination."""
source_descriptor = source.DESCRIPTOR
for name in node:
child = node[name]
field = source_descriptor.fields_by_name[name]
if field is None:
raise ValueError('Error: Can\'t find field {0} in message {1}.'.format(
name, source_descriptor.full_name))
if child:
# Sub-paths are only allowed for singular message fields.
if (field.label == FieldDescriptor.LABEL_REPEATED or
field.cpp_type != FieldDescriptor.CPPTYPE_MESSAGE):
raise ValueError('Error: Field {0} in message {1} is not a singular '
'message field and cannot have sub-fields.'.format(
name, source_descriptor.full_name))
_MergeMessage(
child, getattr(source, name), getattr(destination, name),
replace_message, replace_repeated)
continue
if field.label == FieldDescriptor.LABEL_REPEATED:
if replace_repeated:
destination.ClearField(_StrConvert(name))
repeated_source = getattr(source, name)
repeated_destination = getattr(destination, name)
if field.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE:
for item in repeated_source:
repeated_destination.add().MergeFrom(item)
else:
repeated_destination.extend(repeated_source)
else:
if field.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE:
if replace_message:
destination.ClearField(_StrConvert(name))
if source.HasField(name):
getattr(destination, name).MergeFrom(getattr(source, name))
else:
setattr(destination, name, getattr(source, name))
def _AddFieldPaths(node, prefix, field_mask):
"""Adds the field paths descended from node to field_mask."""
if not node:
field_mask.paths.append(prefix)
return
for name in sorted(node):
if prefix:
child_path = prefix + '.' + name
else:
child_path = name
_AddFieldPaths(node[name], child_path, field_mask)
_INT_OR_FLOAT = six.integer_types + (float,)
def _SetStructValue(struct_value, value):
if value is None:
struct_value.null_value = 0
elif isinstance(value, bool):
# Note: this check must come before the number check because in Python
# True and False are also considered numbers.
struct_value.bool_value = value
elif isinstance(value, six.string_types):
struct_value.string_value = value
elif isinstance(value, _INT_OR_FLOAT):
struct_value.number_value = value
else:
raise ValueError('Unexpected type')
def _GetStructValue(struct_value):
which = struct_value.WhichOneof('kind')
if which == 'struct_value':
return struct_value.struct_value
elif which == 'null_value':
return None
elif which == 'number_value':
return struct_value.number_value
elif which == 'string_value':
return struct_value.string_value
elif which == 'bool_value':
return struct_value.bool_value
elif which == 'list_value':
return struct_value.list_value
elif which is None:
raise ValueError('Value not set')
class Struct(object):
"""Class for Struct message type."""
__slots__ = []
def __getitem__(self, key):
return _GetStructValue(self.fields[key])
def __setitem__(self, key, value):
_SetStructValue(self.fields[key], value)
def get_or_create_list(self, key):
"""Returns a list for this key, creating if it didn't exist already."""
return self.fields[key].list_value
def get_or_create_struct(self, key):
"""Returns a struct for this key, creating if it didn't exist already."""
return self.fields[key].struct_value
# TODO(haberman): allow constructing/merging from dict.
class ListValue(object):
"""Class for ListValue message type."""
def __len__(self):
return len(self.values)
def append(self, value):
_SetStructValue(self.values.add(), value)
def extend(self, elem_seq):
for value in elem_seq:
self.append(value)
def __getitem__(self, index):
"""Retrieves item by the specified index."""
return _GetStructValue(self.values.__getitem__(index))
def __setitem__(self, index, value):
_SetStructValue(self.values.__getitem__(index), value)
def items(self):
for i in range(len(self)):
yield self[i]
def add_struct(self):
"""Appends and returns a struct value as the next value in the list."""
return self.values.add().struct_value
def add_list(self):
"""Appends and returns a list value as the next value in the list."""
return self.values.add().list_value
WKTBASES = {
'google.protobuf.Any': Any,
'google.protobuf.Duration': Duration,
'google.protobuf.FieldMask': FieldMask,
'google.protobuf.ListValue': ListValue,
'google.protobuf.Struct': Struct,
'google.protobuf.Timestamp': Timestamp,
}
|
|
from __future__ import division, absolute_import, print_function
import sys
import numpy as np
from numpy.testing import *
import numpy.core.umath_tests as umt
import numpy.core.operand_flag_tests as opflag_tests
from numpy.compat import asbytes
from numpy.core.test_rational import *
class TestUfunc(TestCase):
def test_pickle(self):
import pickle
assert pickle.loads(pickle.dumps(np.sin)) is np.sin
def test_pickle_withstring(self):
import pickle
astring = asbytes("cnumpy.core\n_ufunc_reconstruct\np0\n"
"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
assert pickle.loads(astring) is np.cos
def test_reduceat_shifting_sum(self) :
L = 6
x = np.arange(L)
idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel()
assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7])
def test_generic_loops(self) :
"""Test generic loops.
The loops to be tested are:
PyUFunc_ff_f_As_dd_d
PyUFunc_ff_f
PyUFunc_dd_d
PyUFunc_gg_g
PyUFunc_FF_F_As_DD_D
PyUFunc_DD_D
PyUFunc_FF_F
PyUFunc_GG_G
PyUFunc_OO_O
PyUFunc_OO_O_method
PyUFunc_f_f_As_d_d
PyUFunc_d_d
PyUFunc_f_f
PyUFunc_g_g
PyUFunc_F_F_As_D_D
PyUFunc_F_F
PyUFunc_D_D
PyUFunc_G_G
PyUFunc_O_O
PyUFunc_O_O_method
PyUFunc_On_Om
Where:
f -- float
d -- double
g -- long double
F -- complex float
D -- complex double
G -- complex long double
O -- python object
It is difficult to assure that each of these loops is entered from the
Python level as the special cased loops are a moving target and the
corresponding types are architecture dependent. We probably need to
define C level testing ufuncs to get at them. For the time being, I've
just looked at the signatures registered in the build directory to find
relevant functions.
Fixme, currently untested:
PyUFunc_ff_f_As_dd_d
PyUFunc_FF_F_As_DD_D
PyUFunc_f_f_As_d_d
PyUFunc_F_F_As_D_D
PyUFunc_On_Om
"""
fone = np.exp
ftwo = lambda x, y : x**y
fone_val = 1
ftwo_val = 1
# check unary PyUFunc_f_f.
msg = "PyUFunc_f_f"
x = np.zeros(10, dtype=np.single)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check unary PyUFunc_d_d.
msg = "PyUFunc_d_d"
x = np.zeros(10, dtype=np.double)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check unary PyUFunc_g_g.
msg = "PyUFunc_g_g"
x = np.zeros(10, dtype=np.longdouble)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check unary PyUFunc_F_F.
msg = "PyUFunc_F_F"
x = np.zeros(10, dtype=np.csingle)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check unary PyUFunc_D_D.
msg = "PyUFunc_D_D"
x = np.zeros(10, dtype=np.cdouble)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check unary PyUFunc_G_G.
msg = "PyUFunc_G_G"
x = np.zeros(10, dtype=np.clongdouble)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check binary PyUFunc_ff_f.
msg = "PyUFunc_ff_f"
x = np.ones(10, dtype=np.single)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# check binary PyUFunc_dd_d.
msg = "PyUFunc_dd_d"
x = np.ones(10, dtype=np.double)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# check binary PyUFunc_gg_g.
msg = "PyUFunc_gg_g"
x = np.ones(10, dtype=np.longdouble)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# check binary PyUFunc_FF_F.
msg = "PyUFunc_FF_F"
x = np.ones(10, dtype=np.csingle)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# check binary PyUFunc_DD_D.
msg = "PyUFunc_DD_D"
x = np.ones(10, dtype=np.cdouble)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# check binary PyUFunc_GG_G.
msg = "PyUFunc_GG_G"
x = np.ones(10, dtype=np.clongdouble)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# class to use in testing object method loops
class foo(object):
def conjugate(self) :
return np.bool_(1)
def logical_xor(self, obj) :
return np.bool_(1)
# check unary PyUFunc_O_O
msg = "PyUFunc_O_O"
x = np.ones(10, dtype=np.object)[0::2]
assert_(np.all(np.abs(x) == 1), msg)
# check unary PyUFunc_O_O_method
msg = "PyUFunc_O_O_method"
x = np.zeros(10, dtype=np.object)[0::2]
for i in range(len(x)) :
x[i] = foo()
assert_(np.all(np.conjugate(x) == True), msg)
# check binary PyUFunc_OO_O
msg = "PyUFunc_OO_O"
x = np.ones(10, dtype=np.object)[0::2]
assert_(np.all(np.add(x, x) == 2), msg)
# check binary PyUFunc_OO_O_method
msg = "PyUFunc_OO_O_method"
x = np.zeros(10, dtype=np.object)[0::2]
for i in range(len(x)) :
x[i] = foo()
assert_(np.all(np.logical_xor(x, x)), msg)
# check PyUFunc_On_Om
# fixme -- I don't know how to do this yet
def test_all_ufunc(self) :
"""Try to check presence and results of all ufuncs.
The list of ufuncs comes from generate_umath.py and is as follows:
===== ==== ============= =============== ========================
done args function types notes
===== ==== ============= =============== ========================
n 1 conjugate nums + O
n 1 absolute nums + O complex -> real
n 1 negative nums + O
n 1 sign nums + O -> int
n 1 invert bool + ints + O flts raise an error
n 1 degrees real + M cmplx raise an error
n 1 radians real + M cmplx raise an error
n 1 arccos flts + M
n 1 arccosh flts + M
n 1 arcsin flts + M
n 1 arcsinh flts + M
n 1 arctan flts + M
n 1 arctanh flts + M
n 1 cos flts + M
n 1 sin flts + M
n 1 tan flts + M
n 1 cosh flts + M
n 1 sinh flts + M
n 1 tanh flts + M
n 1 exp flts + M
n 1 expm1 flts + M
n 1 log flts + M
n 1 log10 flts + M
n 1 log1p flts + M
n 1 sqrt flts + M real x < 0 raises error
n 1 ceil real + M
n 1 trunc real + M
n 1 floor real + M
n 1 fabs real + M
n 1 rint flts + M
n 1 isnan flts -> bool
n 1 isinf flts -> bool
n 1 isfinite flts -> bool
n 1 signbit real -> bool
n 1 modf real -> (frac, int)
n 1 logical_not bool + nums + M -> bool
n 2 left_shift ints + O flts raise an error
n 2 right_shift ints + O flts raise an error
n 2 add bool + nums + O boolean + is ||
n 2 subtract bool + nums + O boolean - is ^
n 2 multiply bool + nums + O boolean * is &
n 2 divide nums + O
n 2 floor_divide nums + O
n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d
n 2 fmod nums + M
n 2 power nums + O
n 2 greater bool + nums + O -> bool
n 2 greater_equal bool + nums + O -> bool
n 2 less bool + nums + O -> bool
n 2 less_equal bool + nums + O -> bool
n 2 equal bool + nums + O -> bool
n 2 not_equal bool + nums + O -> bool
n 2 logical_and bool + nums + M -> bool
n 2 logical_or bool + nums + M -> bool
n 2 logical_xor bool + nums + M -> bool
n 2 maximum bool + nums + O
n 2 minimum bool + nums + O
n 2 bitwise_and bool + ints + O flts raise an error
n 2 bitwise_or bool + ints + O flts raise an error
n 2 bitwise_xor bool + ints + O flts raise an error
n 2 arctan2 real + M
n 2 remainder ints + real + O
n 2 hypot real + M
===== ==== ============= =============== ========================
Types other than those listed will be accepted, but they are cast to
the smallest compatible type for which the function is defined. The
casting rules are:
bool -> int8 -> float32
ints -> double
"""
pass
def test_signature(self):
# the arguments to test_signature are: nin, nout, core_signature
# pass
assert_equal(umt.test_signature(2, 1, "(i),(i)->()"), 1)
# pass. empty core signature; treat as plain ufunc (with trivial core)
assert_equal(umt.test_signature(2, 1, "(),()->()"), 0)
# in the following calls, a ValueError should be raised because
# of error in core signature
# error: extra parenthesis
msg = "core_sig: extra parenthesis"
try:
ret = umt.test_signature(2, 1, "((i)),(i)->()")
assert_equal(ret, None, err_msg=msg)
except ValueError: None
# error: parenthesis matching
msg = "core_sig: parenthesis matching"
try:
ret = umt.test_signature(2, 1, "(i),)i(->()")
assert_equal(ret, None, err_msg=msg)
except ValueError: None
# error: incomplete signature. letters outside of parenthesis are ignored
msg = "core_sig: incomplete signature"
try:
ret = umt.test_signature(2, 1, "(i),->()")
assert_equal(ret, None, err_msg=msg)
except ValueError: None
# error: incomplete signature. 2 output arguments are specified
msg = "core_sig: incomplete signature"
try:
ret = umt.test_signature(2, 2, "(i),(i)->()")
assert_equal(ret, None, err_msg=msg)
except ValueError: None
# more complicated names for variables
assert_equal(umt.test_signature(2, 1, "(i1,i2),(J_1)->(_kAB)"), 1)
def test_get_signature(self):
assert_equal(umt.inner1d.signature, "(i),(i)->()")
def test_forced_sig(self):
a = 0.5*np.arange(3, dtype='f8')
assert_equal(np.add(a, 0.5), [0.5, 1, 1.5])
assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'),
casting='unsafe'), [0, 0, 1])
b = np.zeros((3,), dtype='f8')
np.add(a, 0.5, out=b)
assert_equal(b, [0.5, 1, 1.5])
b[:] = 0
np.add(a, 0.5, sig='i', out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
def test_sum_stability(self):
a = np.ones(500, dtype=np.float32)
assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4)
a = np.ones(500, dtype=np.float64)
assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13)
def test_sum(self):
for dt in (np.int, np.float16, np.float32, np.float64, np.longdouble):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
128, 1024, 1235):
tgt = dt(v * (v + 1) / 2)
d = np.arange(1, v + 1, dtype=dt)
assert_almost_equal(np.sum(d), tgt)
assert_almost_equal(np.sum(d[::-1]), tgt)
d = np.ones(500, dtype=dt)
assert_almost_equal(np.sum(d[::2]), 250.)
assert_almost_equal(np.sum(d[1::2]), 250.)
assert_almost_equal(np.sum(d[::3]), 167.)
assert_almost_equal(np.sum(d[1::3]), 167.)
assert_almost_equal(np.sum(d[::-2]), 250.)
assert_almost_equal(np.sum(d[-1::-2]), 250.)
assert_almost_equal(np.sum(d[::-3]), 167.)
assert_almost_equal(np.sum(d[-1::-3]), 167.)
# sum with first reduction entry != 0
d = np.ones((1,), dtype=dt)
d += d
assert_almost_equal(d, 2.)
def test_sum_complex(self):
for dt in (np.complex64, np.complex128, np.clongdouble):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
128, 1024, 1235):
tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) *1j)
d = np.empty(v, dtype=dt)
d.real = np.arange(1, v + 1)
d.imag = -np.arange(1, v + 1)
assert_almost_equal(np.sum(d), tgt)
assert_almost_equal(np.sum(d[::-1]), tgt)
d = np.ones(500, dtype=dt) + 1j
assert_almost_equal(np.sum(d[::2]), 250. + 250j)
assert_almost_equal(np.sum(d[1::2]), 250. + 250j)
assert_almost_equal(np.sum(d[::3]), 167. + 167j)
assert_almost_equal(np.sum(d[1::3]), 167. + 167j)
assert_almost_equal(np.sum(d[::-2]), 250. + 250j)
assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j)
assert_almost_equal(np.sum(d[::-3]), 167. + 167j)
assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j)
# sum with first reduction entry != 0
d = np.ones((1,), dtype=dt) + 1j
d += d
assert_almost_equal(d, 2. + 2j)
def test_inner1d(self):
a = np.arange(6).reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1))
a = np.arange(6)
assert_array_equal(umt.inner1d(a, a), np.sum(a*a))
def test_broadcast(self):
msg = "broadcast"
a = np.arange(4).reshape((2, 1, 2))
b = np.arange(4).reshape((1, 2, 2))
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
msg = "extend & broadcast loop dimensions"
b = np.arange(4).reshape((2, 2))
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
msg = "broadcast in core dimensions"
a = np.arange(8).reshape((4, 2))
b = np.arange(4).reshape((4, 1))
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
msg = "extend & broadcast core and loop dimensions"
a = np.arange(8).reshape((4, 2))
b = np.array(7)
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
msg = "broadcast should fail"
a = np.arange(2).reshape((2, 1, 1))
b = np.arange(3).reshape((3, 1, 1))
try:
ret = umt.inner1d(a, b)
assert_equal(ret, None, err_msg=msg)
except ValueError: None
def test_type_cast(self):
msg = "type cast"
a = np.arange(6, dtype='short').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg)
msg = "type cast on one argument"
a = np.arange(6).reshape((2, 3))
b = a+0.1
assert_array_almost_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
def test_endian(self):
msg = "big endian"
a = np.arange(6, dtype='>i4').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg)
msg = "little endian"
a = np.arange(6, dtype='<i4').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg)
# Output should always be native-endian
Ba = np.arange(1, dtype='>f8')
La = np.arange(1, dtype='<f8')
assert_equal((Ba+Ba).dtype, np.dtype('f8'))
assert_equal((Ba+La).dtype, np.dtype('f8'))
assert_equal((La+Ba).dtype, np.dtype('f8'))
assert_equal((La+La).dtype, np.dtype('f8'))
assert_equal(np.absolute(La).dtype, np.dtype('f8'))
assert_equal(np.absolute(Ba).dtype, np.dtype('f8'))
assert_equal(np.negative(La).dtype, np.dtype('f8'))
assert_equal(np.negative(Ba).dtype, np.dtype('f8'))
def test_incontiguous_array(self):
msg = "incontiguous memory layout of array"
x = np.arange(64).reshape((2, 2, 2, 2, 2, 2))
a = x[:, 0,:, 0,:, 0]
b = x[:, 1,:, 1,:, 1]
a[0, 0, 0] = -1
msg2 = "make sure it references to the original array"
assert_equal(x[0, 0, 0, 0, 0, 0], -1, err_msg=msg2)
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
x = np.arange(24).reshape(2, 3, 4)
a = x.T
b = x.T
a[0, 0, 0] = -1
assert_equal(x[0, 0, 0], -1, err_msg=msg2)
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
def test_output_argument(self):
msg = "output argument"
a = np.arange(12).reshape((2, 3, 2))
b = np.arange(4).reshape((2, 1, 2)) + 1
c = np.zeros((2, 3), dtype='int')
umt.inner1d(a, b, c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
msg = "output argument with type cast"
c = np.zeros((2, 3), dtype='int16')
umt.inner1d(a, b, c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
msg = "output argument with incontiguous layout"
c = np.zeros((2, 3, 4), dtype='int16')
umt.inner1d(a, b, c[..., 0])
assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c[..., 0])
assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg)
def test_innerwt(self):
a = np.arange(6).reshape((2, 3))
b = np.arange(10, 16).reshape((2, 3))
w = np.arange(20, 26).reshape((2, 3))
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
a = np.arange(100, 124).reshape((2, 3, 4))
b = np.arange(200, 224).reshape((2, 3, 4))
w = np.arange(300, 324).reshape((2, 3, 4))
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
def test_innerwt_empty(self):
"""Test generalized ufunc with zero-sized operands"""
a = np.array([], dtype='f8')
b = np.array([], dtype='f8')
w = np.array([], dtype='f8')
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
def test_matrix_multiply(self):
self.compare_matrix_multiply_results(np.long)
self.compare_matrix_multiply_results(np.double)
def compare_matrix_multiply_results(self, tp):
d1 = np.array(rand(2, 3, 4), dtype=tp)
d2 = np.array(rand(2, 3, 4), dtype=tp)
msg = "matrix multiply on type %s" % d1.dtype.name
def permute_n(n):
if n == 1:
return ([0],)
ret = ()
base = permute_n(n-1)
for perm in base:
for i in range(n):
new = perm + [n-1]
new[n-1] = new[i]
new[i] = n-1
ret += (new,)
return ret
def slice_n(n):
if n == 0:
return ((),)
ret = ()
base = slice_n(n-1)
for sl in base:
ret += (sl+(slice(None),),)
ret += (sl+(slice(0, 1),),)
return ret
def broadcastable(s1, s2):
return s1 == s2 or s1 == 1 or s2 == 1
permute_3 = permute_n(3)
slice_3 = slice_n(3) + ((slice(None, None, -1),)*3,)
ref = True
for p1 in permute_3:
for p2 in permute_3:
for s1 in slice_3:
for s2 in slice_3:
a1 = d1.transpose(p1)[s1]
a2 = d2.transpose(p2)[s2]
ref = ref and a1.base != None
ref = ref and a2.base != None
if broadcastable(a1.shape[-1], a2.shape[-2]) and \
broadcastable(a1.shape[0], a2.shape[0]):
assert_array_almost_equal(
umt.matrix_multiply(a1, a2),
np.sum(a2[..., np.newaxis].swapaxes(-3, -1) *
a1[..., np.newaxis,:], axis=-1),
err_msg = msg+' %s %s' % (str(a1.shape),
str(a2.shape)))
assert_equal(ref, True, err_msg="reference check")
def test_object_logical(self):
a = np.array([3, None, True, False, "test", ""], dtype=object)
assert_equal(np.logical_or(a, None),
np.array([x or None for x in a], dtype=object))
assert_equal(np.logical_or(a, True),
np.array([x or True for x in a], dtype=object))
assert_equal(np.logical_or(a, 12),
np.array([x or 12 for x in a], dtype=object))
assert_equal(np.logical_or(a, "blah"),
np.array([x or "blah" for x in a], dtype=object))
assert_equal(np.logical_and(a, None),
np.array([x and None for x in a], dtype=object))
assert_equal(np.logical_and(a, True),
np.array([x and True for x in a], dtype=object))
assert_equal(np.logical_and(a, 12),
np.array([x and 12 for x in a], dtype=object))
assert_equal(np.logical_and(a, "blah"),
np.array([x and "blah" for x in a], dtype=object))
assert_equal(np.logical_not(a),
np.array([not x for x in a], dtype=object))
assert_equal(np.logical_or.reduce(a), 3)
assert_equal(np.logical_and.reduce(a), None)
def test_object_array_reduction(self):
# Reductions on object arrays
a = np.array(['a', 'b', 'c'], dtype=object)
assert_equal(np.sum(a), 'abc')
assert_equal(np.max(a), 'c')
assert_equal(np.min(a), 'a')
a = np.array([True, False, True], dtype=object)
assert_equal(np.sum(a), 2)
assert_equal(np.prod(a), 0)
assert_equal(np.any(a), True)
assert_equal(np.all(a), False)
assert_equal(np.max(a), True)
assert_equal(np.min(a), False)
def test_zerosize_reduction(self):
# Test with default dtype and object dtype
for a in [[], np.array([], dtype=object)]:
assert_equal(np.sum(a), 0)
assert_equal(np.prod(a), 1)
assert_equal(np.any(a), False)
assert_equal(np.all(a), True)
assert_raises(ValueError, np.max, a)
assert_raises(ValueError, np.min, a)
def test_axis_out_of_bounds(self):
a = np.array([False, False])
assert_raises(ValueError, a.all, axis=1)
a = np.array([False, False])
assert_raises(ValueError, a.all, axis=-2)
a = np.array([False, False])
assert_raises(ValueError, a.any, axis=1)
a = np.array([False, False])
assert_raises(ValueError, a.any, axis=-2)
def test_scalar_reduction(self):
# The functions 'sum', 'prod', etc allow specifying axis=0
# even for scalars
assert_equal(np.sum(3, axis=0), 3)
assert_equal(np.prod(3.5, axis=0), 3.5)
assert_equal(np.any(True, axis=0), True)
assert_equal(np.all(False, axis=0), False)
assert_equal(np.max(3, axis=0), 3)
assert_equal(np.min(2.5, axis=0), 2.5)
# Check scalar behaviour for ufuncs without an identity
assert_equal(np.power.reduce(3), 3)
# Make sure that scalars are coming out from this operation
assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32)
# check if scalars/0-d arrays get cast
assert_(type(np.any(0, axis=0)) is np.bool_)
# assert that 0-d arrays get wrapped
class MyArray(np.ndarray):
pass
a = np.array(1).view(MyArray)
assert_(type(np.any(a)) is MyArray)
def test_casting_out_param(self):
# Test that it's possible to do casts on output
a = np.ones((200, 100), np.int64)
b = np.ones((200, 100), np.int64)
c = np.ones((200, 100), np.float64)
np.add(a, b, out=c)
assert_equal(c, 2)
a = np.zeros(65536)
b = np.zeros(65536, dtype=np.float32)
np.subtract(a, 0, out=b)
assert_equal(b, 0)
def test_where_param(self):
# Test that the where= ufunc parameter works with regular arrays
a = np.arange(7)
b = np.ones(7)
c = np.zeros(7)
np.add(a, b, out=c, where=(a % 2 == 1))
assert_equal(c, [0, 2, 0, 4, 0, 6, 0])
a = np.arange(4).reshape(2, 2) + 2
np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]])
assert_equal(a, [[2, 27], [16, 5]])
# Broadcasting the where= parameter
np.subtract(a, 2, out=a, where=[True, False])
assert_equal(a, [[0, 27], [14, 5]])
def test_where_param_buffer_output(self):
# This test is temporarily skipped because it requires
# adding masking features to the nditer to work properly
# With casting on output
a = np.ones(10, np.int64)
b = np.ones(10, np.int64)
c = 1.5 * np.ones(10, np.float64)
np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0])
assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5])
def check_identityless_reduction(self, a):
# np.minimum.reduce is a identityless reduction
# Verify that it sees the zero at various positions
a[...] = 1
a[1, 0, 0] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0])
assert_equal(np.minimum.reduce(a, axis=0),
[[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[1, 1, 1, 1], [0, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[1, 1, 1], [0, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
a[...] = 1
a[0, 1, 0] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1])
assert_equal(np.minimum.reduce(a, axis=0),
[[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[0, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[1, 0, 1], [1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
a[...] = 1
a[0, 0, 1] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1])
assert_equal(np.minimum.reduce(a, axis=0),
[[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[1, 0, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[0, 1, 1], [1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
def test_identityless_reduction_corder(self):
a = np.empty((2, 3, 4), order='C')
self.check_identityless_reduction(a)
def test_identityless_reduction_forder(self):
a = np.empty((2, 3, 4), order='F')
self.check_identityless_reduction(a)
def test_identityless_reduction_otherorder(self):
a = np.empty((2, 4, 3), order='C').swapaxes(1, 2)
self.check_identityless_reduction(a)
def test_identityless_reduction_noncontig(self):
a = np.empty((3, 5, 4), order='C').swapaxes(1, 2)
a = a[1:, 1:, 1:]
self.check_identityless_reduction(a)
def test_identityless_reduction_noncontig_unaligned(self):
a = np.empty((3*4*5*8 + 1,), dtype='i1')
a = a[1:].view(dtype='f8')
a.shape = (3, 4, 5)
a = a[1:, 1:, 1:]
self.check_identityless_reduction(a)
def test_identityless_reduction_nonreorderable(self):
a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]])
res = np.divide.reduce(a, axis=0)
assert_equal(res, [8.0, 4.0, 8.0])
res = np.divide.reduce(a, axis=1)
assert_equal(res, [2.0, 8.0])
res = np.divide.reduce(a, axis=())
assert_equal(res, a)
assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1))
def test_reduce_zero_axis(self):
# If we have a n x m array and do a reduction with axis=1, then we are
# doing n reductions, and each reduction takes an m-element array. For
# a reduction operation without an identity, then:
# n > 0, m > 0: fine
# n = 0, m > 0: fine, doing 0 reductions of m-element arrays
# n > 0, m = 0: can't reduce a 0-element array, ValueError
# n = 0, m = 0: can't reduce a 0-element array, ValueError (for
# consistency with the above case)
# This test doesn't actually look at return values, it just checks to
# make sure that error we get an error in exactly those cases where we
# expect one, and assumes the calculations themselves are done
# correctly.
def ok(f, *args, **kwargs):
f(*args, **kwargs)
def err(f, *args, **kwargs):
assert_raises(ValueError, f, *args, **kwargs)
def t(expect, func, n, m):
expect(func, np.zeros((n, m)), axis=1)
expect(func, np.zeros((m, n)), axis=0)
expect(func, np.zeros((n // 2, n // 2, m)), axis=2)
expect(func, np.zeros((n // 2, m, n // 2)), axis=1)
expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2))
expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2))
expect(func, np.zeros((m // 3, m // 3, m // 3,
n // 2, n //2)),
axis=(0, 1, 2))
# Check what happens if the inner (resp. outer) dimensions are a
# mix of zero and non-zero:
expect(func, np.zeros((10, m, n)), axis=(0, 1))
expect(func, np.zeros((10, n, m)), axis=(0, 2))
expect(func, np.zeros((m, 10, n)), axis=0)
expect(func, np.zeros((10, m, n)), axis=1)
expect(func, np.zeros((10, n, m)), axis=2)
# np.maximum is just an arbitrary ufunc with no reduction identity
assert_equal(np.maximum.identity, None)
t(ok, np.maximum.reduce, 30, 30)
t(ok, np.maximum.reduce, 0, 30)
t(err, np.maximum.reduce, 30, 0)
t(err, np.maximum.reduce, 0, 0)
err(np.maximum.reduce, [])
np.maximum.reduce(np.zeros((0, 0)), axis=())
# all of the combinations are fine for a reduction that has an
# identity
t(ok, np.add.reduce, 30, 30)
t(ok, np.add.reduce, 0, 30)
t(ok, np.add.reduce, 30, 0)
t(ok, np.add.reduce, 0, 0)
np.add.reduce([])
np.add.reduce(np.zeros((0, 0)), axis=())
# OTOH, accumulate always makes sense for any combination of n and m,
# because it maps an m-element array to an m-element array. These
# tests are simpler because accumulate doesn't accept multiple axes.
for uf in (np.maximum, np.add):
uf.accumulate(np.zeros((30, 0)), axis=0)
uf.accumulate(np.zeros((0, 30)), axis=0)
uf.accumulate(np.zeros((30, 30)), axis=0)
uf.accumulate(np.zeros((0, 0)), axis=0)
def test_safe_casting(self):
# In old versions of numpy, in-place operations used the 'unsafe'
# casting rules. In some future version, 'same_kind' will become the
# default.
a = np.array([1, 2, 3], dtype=int)
# Non-in-place addition is fine
assert_array_equal(assert_no_warnings(np.add, a, 1.1),
[2.1, 3.1, 4.1])
assert_warns(DeprecationWarning, np.add, a, 1.1, out=a)
assert_array_equal(a, [2, 3, 4])
def add_inplace(a, b):
a += b
assert_warns(DeprecationWarning, add_inplace, a, 1.1)
assert_array_equal(a, [3, 4, 5])
# Make sure that explicitly overriding the warning is allowed:
assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe")
assert_array_equal(a, [4, 5, 6])
# There's no way to propagate exceptions from the place where we issue
# this deprecation warning, so we must throw the exception away
# entirely rather than cause it to be raised at some other point, or
# trigger some other unsuspecting if (PyErr_Occurred()) { ...} at some
# other location entirely.
import warnings
import sys
if sys.version_info[0] >= 3:
from io import StringIO
else:
from StringIO import StringIO
with warnings.catch_warnings():
warnings.simplefilter("error")
old_stderr = sys.stderr
try:
sys.stderr = StringIO()
# No error, but dumps to stderr
a += 1.1
# No error on the next bit of code executed either
1 + 1
assert_("Implicitly casting" in sys.stderr.getvalue())
finally:
sys.stderr = old_stderr
def test_ufunc_custom_out(self):
# Test ufunc with built in input types and custom output type
a = np.array([0, 1, 2], dtype='i8')
b = np.array([0, 1, 2], dtype='i8')
c = np.empty(3, dtype=rational)
# Output must be specified so numpy knows what
# ufunc signature to look for
result = test_add(a, b, c)
assert_equal(result, np.array([0, 2, 4], dtype=rational))
# no output type should raise TypeError
assert_raises(TypeError, test_add, a, b)
def test_operand_flags(self):
a = np.arange(16, dtype='l').reshape(4, 4)
b = np.arange(9, dtype='l').reshape(3, 3)
opflag_tests.inplace_add(a[:-1, :-1], b)
assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7],
[14, 16, 18, 11], [12, 13, 14, 15]], dtype='l'))
a = np.array(0)
opflag_tests.inplace_add(a, 3)
assert_equal(a, 3)
opflag_tests.inplace_add(a, [3, 4])
assert_equal(a, 10)
def test_struct_ufunc(self):
import numpy.core.struct_ufunc_test as struct_ufunc
a = np.array([(1, 2, 3)], dtype='u8,u8,u8')
b = np.array([(1, 2, 3)], dtype='u8,u8,u8')
result = struct_ufunc.add_triplet(a, b)
assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8'))
def test_custom_ufunc(self):
a = np.array([rational(1, 2), rational(1, 3), rational(1, 4)],
dtype=rational);
b = np.array([rational(1, 2), rational(1, 3), rational(1, 4)],
dtype=rational);
result = test_add_rationals(a, b)
expected = np.array([rational(1), rational(2, 3), rational(1, 2)],
dtype=rational);
assert_equal(result, expected);
def test_custom_array_like(self):
class MyThing(object):
__array_priority__ = 1000
rmul_count = 0
getitem_count = 0
def __init__(self, shape):
self.shape = shape
def __len__(self):
return self.shape[0]
def __getitem__(self, i):
MyThing.getitem_count += 1
if not isinstance(i, tuple):
i = (i,)
if len(i) > len(self.shape):
raise IndexError("boo")
return MyThing(self.shape[len(i):])
def __rmul__(self, other):
MyThing.rmul_count += 1
return self
np.float64(5)*MyThing((3, 3))
assert_(MyThing.rmul_count == 1, MyThing.rmul_count)
assert_(MyThing.getitem_count <= 2, MyThing.getitem_count)
def test_inplace_fancy_indexing(self):
a = np.arange(10)
np.add.at(a, [2, 5, 2], 1)
assert_equal(a, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9])
a = np.arange(10)
b = np.array([100, 100, 100])
np.add.at(a, [2, 5, 2], b)
assert_equal(a, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9])
a = np.arange(9).reshape(3, 3)
b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
np.add.at(a, (slice(None), [1, 2, 1]), b)
assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b)
assert_equal(a,
[[[0, 401, 202],
[3, 404, 205],
[6, 407, 208]],
[[9, 410, 211],
[12, 413, 214],
[15, 416, 217]],
[[18, 419, 220],
[21, 422, 223],
[24, 425, 226]]])
a = np.arange(9).reshape(3, 3)
b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
np.add.at(a, ([1, 2, 1], slice(None)), b)
assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b)
assert_equal(a,
[[[0, 1, 2 ],
[203, 404, 605],
[106, 207, 308]],
[[9, 10, 11 ],
[212, 413, 614],
[115, 216, 317]],
[[18, 19, 20 ],
[221, 422, 623],
[124, 225, 326]]])
a = np.arange(9).reshape(3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (0, [1, 2, 1]), b)
assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, ([1, 2, 1], 0, slice(None)), b)
assert_equal(a,
[[[0, 1, 2],
[3, 4, 5],
[6, 7, 8]],
[[209, 410, 611],
[12, 13, 14],
[15, 16, 17]],
[[118, 219, 320],
[21, 22, 23],
[24, 25, 26]]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), slice(None), slice(None)), b)
assert_equal(a,
[[[100, 201, 302],
[103, 204, 305],
[106, 207, 308]],
[[109, 210, 311],
[112, 213, 314],
[115, 216, 317]],
[[118, 219, 320],
[121, 222, 323],
[124, 225, 326]]])
a = np.arange(10)
np.negative.at(a, [2, 5, 2])
assert_equal(a, [0, 1, 2, 3, 4, -5, 6, 7, 8, 9])
# Test 0-dim array
a = np.array(0)
np.add.at(a, (), 1)
assert_equal(a, 1)
assert_raises(IndexError, np.add.at, a, 0, 1)
assert_raises(IndexError, np.add.at, a, [], 1)
# Test mixed dtypes
a = np.arange(10)
np.power.at(a, [1, 2, 3, 2], 3.5)
assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9]))
# Test boolean indexing and boolean ufuncs
a = np.arange(10)
index = a % 2 == 0
np.equal.at(a, index, [0, 2, 4, 6, 8])
assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9])
# Test unary operator
a = np.arange(10, dtype='u4')
np.invert.at(a, [2, 5, 2])
assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9])
# Test empty subspace
orig = np.arange(4)
a = orig[:, None][:, 0:0]
np.add.at(a, [0, 1], 3)
assert_array_equal(orig, np.arange(4))
# Test with swapped byte order
index = np.array([1, 2, 1], np.dtype('i').newbyteorder())
values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder())
np.add.at(values, index, 3)
assert_array_equal(values, [1, 8, 6, 4])
# Test exception thrown
values = np.array(['a', 1], dtype=np.object)
self.assertRaises(TypeError, np.add.at, values, [0, 1], 1)
assert_array_equal(values, np.array(['a', 1], dtype=np.object))
if __name__ == "__main__":
run_module_suite()
|
|
#!/usr/bin/python
#
# Perforce Defect Tracking Integration Project
# <http://www.ravenbrook.com/project/p4dti/>
#
# COVERAGE.PY -- COVERAGE TESTING
#
# Gareth Rees, Ravenbrook Limited, 2001-12-04
# Ned Batchelder, 2004-12-12
# http://nedbatchelder.com/code/modules/coverage.html
#
#
# 1. INTRODUCTION
#
# This module provides coverage testing for Python code.
#
# The intended readership is all Python developers.
#
# This document is not confidential.
#
# See [GDR 2001-12-04a] for the command-line interface, programmatic
# interface and limitations. See [GDR 2001-12-04b] for requirements and
# design.
r"""Usage:
coverage.py -x [-p] MODULE.py [ARG1 ARG2 ...]
Execute module, passing the given command-line arguments, collecting
coverage data. With the -p option, write to a temporary file containing
the machine name and process ID.
coverage.py -e
Erase collected coverage data.
coverage.py -c
Collect data from multiple coverage files (as created by -p option above)
and store it into a single file representing the union of the coverage.
coverage.py -r [-m] [-o dir1,dir2,...] FILE1 FILE2 ...
Report on the statement coverage for the given files. With the -m
option, show line numbers of the statements that weren't executed.
coverage.py -a [-d dir] [-o dir1,dir2,...] FILE1 FILE2 ...
Make annotated copies of the given files, marking statements that
are executed with > and statements that are missed with !. With
the -d option, make the copies in that directory. Without the -d
option, make each copy in the same directory as the original.
-o dir,dir2,...
Omit reporting or annotating files when their filename path starts with
a directory listed in the omit list.
e.g. python coverage.py -i -r -o c:\python23,lib\enthought\traits
Coverage data is saved in the file .coverage by default. Set the
COVERAGE_FILE environment variable to save it somewhere else."""
__version__ = "2.85.20080914" # see detailed history at the end of this file.
import compiler
import compiler.visitor
import glob
import os
import re
import string
import symbol
import sys
import threading
import token
import types
import zipimport
from socket import gethostname
# Python version compatibility
try:
strclass = basestring # new to 2.3
except:
strclass = str
# 2. IMPLEMENTATION
#
# This uses the "singleton" pattern.
#
# The word "morf" means a module object (from which the source file can
# be deduced by suitable manipulation of the __file__ attribute) or a
# filename.
#
# When we generate a coverage report we have to canonicalize every
# filename in the coverage dictionary just in case it refers to the
# module we are reporting on. It seems a shame to throw away this
# information so the data in the coverage dictionary is transferred to
# the 'cexecuted' dictionary under the canonical filenames.
#
# The coverage dictionary is called "c" and the trace function "t". The
# reason for these short names is that Python looks up variables by name
# at runtime and so execution time depends on the length of variables!
# In the bottleneck of this application it's appropriate to abbreviate
# names to increase speed.
class StatementFindingAstVisitor(compiler.visitor.ASTVisitor):
""" A visitor for a parsed Abstract Syntax Tree which finds executable
statements.
"""
def __init__(self, statements, excluded, suite_spots):
compiler.visitor.ASTVisitor.__init__(self)
self.statements = statements
self.excluded = excluded
self.suite_spots = suite_spots
self.excluding_suite = 0
def doRecursive(self, node):
for n in node.getChildNodes():
self.dispatch(n)
visitStmt = visitModule = doRecursive
def doCode(self, node):
if hasattr(node, 'decorators') and node.decorators:
self.dispatch(node.decorators)
self.recordAndDispatch(node.code)
else:
self.doSuite(node, node.code)
visitFunction = visitClass = doCode
def getFirstLine(self, node):
# Find the first line in the tree node.
lineno = node.lineno
for n in node.getChildNodes():
f = self.getFirstLine(n)
if lineno and f:
lineno = min(lineno, f)
else:
lineno = lineno or f
return lineno
def getLastLine(self, node):
# Find the first line in the tree node.
lineno = node.lineno
for n in node.getChildNodes():
lineno = max(lineno, self.getLastLine(n))
return lineno
def doStatement(self, node):
self.recordLine(self.getFirstLine(node))
visitAssert = visitAssign = visitAssTuple = visitPrint = \
visitPrintnl = visitRaise = visitSubscript = visitDecorators = \
doStatement
def visitPass(self, node):
# Pass statements have weird interactions with docstrings. If this
# pass statement is part of one of those pairs, claim that the statement
# is on the later of the two lines.
l = node.lineno
if l:
lines = self.suite_spots.get(l, [l,l])
self.statements[lines[1]] = 1
def visitDiscard(self, node):
# Discard nodes are statements that execute an expression, but then
# discard the results. This includes function calls, so we can't
# ignore them all. But if the expression is a constant, the statement
# won't be "executed", so don't count it now.
if node.expr.__class__.__name__ != 'Const':
self.doStatement(node)
def recordNodeLine(self, node):
# Stmt nodes often have None, but shouldn't claim the first line of
# their children (because the first child might be an ignorable line
# like "global a").
if node.__class__.__name__ != 'Stmt':
return self.recordLine(self.getFirstLine(node))
else:
return 0
def recordLine(self, lineno):
# Returns a bool, whether the line is included or excluded.
if lineno:
# Multi-line tests introducing suites have to get charged to their
# keyword.
if lineno in self.suite_spots:
lineno = self.suite_spots[lineno][0]
# If we're inside an excluded suite, record that this line was
# excluded.
if self.excluding_suite:
self.excluded[lineno] = 1
return 0
# If this line is excluded, or suite_spots maps this line to
# another line that is exlcuded, then we're excluded.
elif self.excluded.has_key(lineno) or \
self.suite_spots.has_key(lineno) and \
self.excluded.has_key(self.suite_spots[lineno][1]):
return 0
# Otherwise, this is an executable line.
else:
self.statements[lineno] = 1
return 1
return 0
default = recordNodeLine
def recordAndDispatch(self, node):
self.recordNodeLine(node)
self.dispatch(node)
def doSuite(self, intro, body, exclude=0):
exsuite = self.excluding_suite
if exclude or (intro and not self.recordNodeLine(intro)):
self.excluding_suite = 1
self.recordAndDispatch(body)
self.excluding_suite = exsuite
def doPlainWordSuite(self, prevsuite, suite):
# Finding the exclude lines for else's is tricky, because they aren't
# present in the compiler parse tree. Look at the previous suite,
# and find its last line. If any line between there and the else's
# first line are excluded, then we exclude the else.
lastprev = self.getLastLine(prevsuite)
firstelse = self.getFirstLine(suite)
for l in range(lastprev+1, firstelse):
if self.suite_spots.has_key(l):
self.doSuite(None, suite, exclude=self.excluded.has_key(l))
break
else:
self.doSuite(None, suite)
def doElse(self, prevsuite, node):
if node.else_:
self.doPlainWordSuite(prevsuite, node.else_)
def visitFor(self, node):
self.doSuite(node, node.body)
self.doElse(node.body, node)
visitWhile = visitFor
def visitIf(self, node):
# The first test has to be handled separately from the rest.
# The first test is credited to the line with the "if", but the others
# are credited to the line with the test for the elif.
self.doSuite(node, node.tests[0][1])
for t, n in node.tests[1:]:
self.doSuite(t, n)
self.doElse(node.tests[-1][1], node)
def visitTryExcept(self, node):
self.doSuite(node, node.body)
for i in range(len(node.handlers)):
a, b, h = node.handlers[i]
if not a:
# It's a plain "except:". Find the previous suite.
if i > 0:
prev = node.handlers[i-1][2]
else:
prev = node.body
self.doPlainWordSuite(prev, h)
else:
self.doSuite(a, h)
self.doElse(node.handlers[-1][2], node)
def visitTryFinally(self, node):
self.doSuite(node, node.body)
self.doPlainWordSuite(node.body, node.final)
def visitWith(self, node):
self.doSuite(node, node.body)
def visitGlobal(self, node):
# "global" statements don't execute like others (they don't call the
# trace function), so don't record their line numbers.
pass
the_coverage = None
class CoverageException(Exception):
pass
class coverage:
# Name of the cache file (unless environment variable is set).
cache_default = ".coverage"
# Environment variable naming the cache file.
cache_env = "COVERAGE_FILE"
# A dictionary with an entry for (Python source file name, line number
# in that file) if that line has been executed.
c = {}
# A map from canonical Python source file name to a dictionary in
# which there's an entry for each line number that has been
# executed.
cexecuted = {}
# Cache of results of calling the analysis2() method, so that you can
# specify both -r and -a without doing double work.
analysis_cache = {}
# Cache of results of calling the canonical_filename() method, to
# avoid duplicating work.
canonical_filename_cache = {}
def __init__(self):
global the_coverage
if the_coverage:
raise CoverageException("Only one coverage object allowed.")
self.usecache = 1
self.cache = None
self.parallel_mode = False
self.exclude_re = ''
self.nesting = 0
self.cstack = []
self.xstack = []
self.relative_dir = self.abs_file(os.curdir)+os.sep
self.exclude('# *pragma[: ]*[nN][oO] *[cC][oO][vV][eE][rR]')
# t(f, x, y). This method is passed to sys.settrace as a trace function.
# See [van Rossum 2001-07-20b, 9.2] for an explanation of sys.settrace and
# the arguments and return value of the trace function.
# See [van Rossum 2001-07-20a, 3.2] for a description of frame and code
# objects.
def t(self, f, w, unused): #pragma: no cover
if w == 'line':
self.c[(f.f_code.co_filename, f.f_lineno)] = 1
#-for c in self.cstack:
#- c[(f.f_code.co_filename, f.f_lineno)] = 1
return self.t
def help(self, error=None): #pragma: no cover
if error:
print error
print
print __doc__
sys.exit(1)
def command_line(self, argv, help_fn=None):
import getopt
help_fn = help_fn or self.help
settings = {}
optmap = {
'-a': 'annotate',
'-c': 'collect',
'-d:': 'directory=',
'-e': 'erase',
'-h': 'help',
'-i': 'ignore-errors',
'-m': 'show-missing',
'-p': 'parallel-mode',
'-r': 'report',
'-x': 'execute',
'-o:': 'omit=',
}
short_opts = string.join(map(lambda o: o[1:], optmap.keys()), '')
long_opts = optmap.values()
options, args = getopt.getopt(argv, short_opts, long_opts)
for o, a in options:
if optmap.has_key(o):
settings[optmap[o]] = 1
elif optmap.has_key(o + ':'):
settings[optmap[o + ':']] = a
elif o[2:] in long_opts:
settings[o[2:]] = 1
elif o[2:] + '=' in long_opts:
settings[o[2:]+'='] = a
else: #pragma: no cover
pass # Can't get here, because getopt won't return anything unknown.
if settings.get('help'):
help_fn()
for i in ['erase', 'execute']:
for j in ['annotate', 'report', 'collect']:
if settings.get(i) and settings.get(j):
help_fn("You can't specify the '%s' and '%s' "
"options at the same time." % (i, j))
args_needed = (settings.get('execute')
or settings.get('annotate')
or settings.get('report'))
action = (settings.get('erase')
or settings.get('collect')
or args_needed)
if not action:
help_fn("You must specify at least one of -e, -x, -c, -r, or -a.")
if not args_needed and args:
help_fn("Unexpected arguments: %s" % " ".join(args))
self.parallel_mode = settings.get('parallel-mode')
self.get_ready()
if settings.get('erase'):
self.erase()
if settings.get('execute'):
if not args:
help_fn("Nothing to do.")
sys.argv = args
self.start()
import __main__
sys.path[0] = os.path.dirname(sys.argv[0])
execfile(sys.argv[0], __main__.__dict__)
if settings.get('collect'):
self.collect()
if not args:
args = self.cexecuted.keys()
ignore_errors = settings.get('ignore-errors')
show_missing = settings.get('show-missing')
directory = settings.get('directory=')
omit = settings.get('omit=')
if omit is not None:
omit = [self.abs_file(p) for p in omit.split(',')]
else:
omit = []
if settings.get('report'):
self.report(args, show_missing, ignore_errors, omit_prefixes=omit)
if settings.get('annotate'):
self.annotate(args, directory, ignore_errors, omit_prefixes=omit)
def use_cache(self, usecache, cache_file=None):
self.usecache = usecache
if cache_file and not self.cache:
self.cache_default = cache_file
def get_ready(self, parallel_mode=False):
if self.usecache and not self.cache:
self.cache = os.environ.get(self.cache_env, self.cache_default)
if self.parallel_mode:
self.cache += "." + gethostname() + "." + str(os.getpid())
self.restore()
self.analysis_cache = {}
def start(self, parallel_mode=False):
self.get_ready()
if self.nesting == 0: #pragma: no cover
sys.settrace(self.t)
if hasattr(threading, 'settrace'):
threading.settrace(self.t)
self.nesting += 1
def stop(self):
self.nesting -= 1
if self.nesting == 0: #pragma: no cover
sys.settrace(None)
if hasattr(threading, 'settrace'):
threading.settrace(None)
def erase(self):
self.get_ready()
self.c = {}
self.analysis_cache = {}
self.cexecuted = {}
if self.cache and os.path.exists(self.cache):
os.remove(self.cache)
def exclude(self, re):
if self.exclude_re:
self.exclude_re += "|"
self.exclude_re += "(" + re + ")"
def begin_recursive(self):
self.cstack.append(self.c)
self.xstack.append(self.exclude_re)
def end_recursive(self):
self.c = self.cstack.pop()
self.exclude_re = self.xstack.pop()
# save(). Save coverage data to the coverage cache.
def save(self):
if self.usecache and self.cache:
self.canonicalize_filenames()
cache = open(self.cache, 'wb')
import marshal
marshal.dump(self.cexecuted, cache)
cache.close()
# restore(). Restore coverage data from the coverage cache (if it exists).
def restore(self):
self.c = {}
self.cexecuted = {}
assert self.usecache
if os.path.exists(self.cache):
self.cexecuted = self.restore_file(self.cache)
def restore_file(self, file_name):
try:
cache = open(file_name, 'rb')
import marshal
cexecuted = marshal.load(cache)
cache.close()
if isinstance(cexecuted, types.DictType):
return cexecuted
else:
return {}
except:
return {}
# collect(). Collect data in multiple files produced by parallel mode
def collect(self):
cache_dir, local = os.path.split(self.cache)
for f in os.listdir(cache_dir or '.'):
if not f.startswith(local):
continue
full_path = os.path.join(cache_dir, f)
cexecuted = self.restore_file(full_path)
self.merge_data(cexecuted)
def merge_data(self, new_data):
for file_name, file_data in new_data.items():
if self.cexecuted.has_key(file_name):
self.merge_file_data(self.cexecuted[file_name], file_data)
else:
self.cexecuted[file_name] = file_data
def merge_file_data(self, cache_data, new_data):
for line_number in new_data.keys():
if not cache_data.has_key(line_number):
cache_data[line_number] = new_data[line_number]
def abs_file(self, filename):
""" Helper function to turn a filename into an absolute normalized
filename.
"""
return os.path.normcase(os.path.abspath(os.path.realpath(filename)))
def get_zip_data(self, filename):
""" Get data from `filename` if it is a zip file path, or return None
if it is not.
"""
markers = ['.zip'+os.sep, '.egg'+os.sep]
for marker in markers:
if marker in filename:
parts = filename.split(marker)
try:
zi = zipimport.zipimporter(parts[0]+marker[:-1])
except zipimport.ZipImportError:
continue
try:
data = zi.get_data(parts[1])
except IOError:
continue
return data
return None
# canonical_filename(filename). Return a canonical filename for the
# file (that is, an absolute path with no redundant components and
# normalized case). See [GDR 2001-12-04b, 3.3].
def canonical_filename(self, filename):
if not self.canonical_filename_cache.has_key(filename):
f = filename
if os.path.isabs(f) and not os.path.exists(f):
if not self.get_zip_data(f):
f = os.path.basename(f)
if not os.path.isabs(f):
for path in [os.curdir] + sys.path:
g = os.path.join(path, f)
if os.path.exists(g):
f = g
break
cf = self.abs_file(f)
self.canonical_filename_cache[filename] = cf
return self.canonical_filename_cache[filename]
# canonicalize_filenames(). Copy results from "c" to "cexecuted",
# canonicalizing filenames on the way. Clear the "c" map.
def canonicalize_filenames(self):
for filename, lineno in self.c.keys():
if filename == '<string>':
# Can't do anything useful with exec'd strings, so skip them.
continue
f = self.canonical_filename(filename)
if not self.cexecuted.has_key(f):
self.cexecuted[f] = {}
self.cexecuted[f][lineno] = 1
self.c = {}
# morf_filename(morf). Return the filename for a module or file.
def morf_filename(self, morf):
if hasattr(morf, '__file__'):
f = morf.__file__
else:
f = morf
return self.canonical_filename(f)
# analyze_morf(morf). Analyze the module or filename passed as
# the argument. If the source code can't be found, raise an error.
# Otherwise, return a tuple of (1) the canonical filename of the
# source code for the module, (2) a list of lines of statements
# in the source code, (3) a list of lines of excluded statements,
# and (4), a map of line numbers to multi-line line number ranges, for
# statements that cross lines.
def analyze_morf(self, morf):
if self.analysis_cache.has_key(morf):
return self.analysis_cache[morf]
filename = self.morf_filename(morf)
ext = os.path.splitext(filename)[1]
source, sourcef = None, None
if ext == '.pyc':
if not os.path.exists(filename[:-1]):
source = self.get_zip_data(filename[:-1])
if not source:
raise CoverageException(
"No source for compiled code '%s'." % filename
)
filename = filename[:-1]
if not source:
sourcef = open(filename, 'rU')
source = sourcef.read()
try:
lines, excluded_lines, line_map = self.find_executable_statements(
source, exclude=self.exclude_re
)
except SyntaxError, synerr:
raise CoverageException(
"Couldn't parse '%s' as Python source: '%s' at line %d" %
(filename, synerr.msg, synerr.lineno)
)
if sourcef:
sourcef.close()
result = filename, lines, excluded_lines, line_map
self.analysis_cache[morf] = result
return result
def first_line_of_tree(self, tree):
while True:
if len(tree) == 3 and type(tree[2]) == type(1):
return tree[2]
tree = tree[1]
def last_line_of_tree(self, tree):
while True:
if len(tree) == 3 and type(tree[2]) == type(1):
return tree[2]
tree = tree[-1]
def find_docstring_pass_pair(self, tree, spots):
for i in range(1, len(tree)):
if self.is_string_constant(tree[i]) and self.is_pass_stmt(tree[i+1]):
first_line = self.first_line_of_tree(tree[i])
last_line = self.last_line_of_tree(tree[i+1])
self.record_multiline(spots, first_line, last_line)
def is_string_constant(self, tree):
try:
return tree[0] == symbol.stmt and tree[1][1][1][0] == symbol.expr_stmt
except:
return False
def is_pass_stmt(self, tree):
try:
return tree[0] == symbol.stmt and tree[1][1][1][0] == symbol.pass_stmt
except:
return False
def record_multiline(self, spots, i, j):
for l in range(i, j+1):
spots[l] = (i, j)
def get_suite_spots(self, tree, spots):
""" Analyze a parse tree to find suite introducers which span a number
of lines.
"""
for i in range(1, len(tree)):
if type(tree[i]) == type(()):
if tree[i][0] == symbol.suite:
# Found a suite, look back for the colon and keyword.
lineno_colon = lineno_word = None
for j in range(i-1, 0, -1):
if tree[j][0] == token.COLON:
# Colons are never executed themselves: we want the
# line number of the last token before the colon.
lineno_colon = self.last_line_of_tree(tree[j-1])
elif tree[j][0] == token.NAME:
if tree[j][1] == 'elif':
# Find the line number of the first non-terminal
# after the keyword.
t = tree[j+1]
while t and token.ISNONTERMINAL(t[0]):
t = t[1]
if t:
lineno_word = t[2]
else:
lineno_word = tree[j][2]
break
elif tree[j][0] == symbol.except_clause:
# "except" clauses look like:
# ('except_clause', ('NAME', 'except', lineno), ...)
if tree[j][1][0] == token.NAME:
lineno_word = tree[j][1][2]
break
if lineno_colon and lineno_word:
# Found colon and keyword, mark all the lines
# between the two with the two line numbers.
self.record_multiline(spots, lineno_word, lineno_colon)
# "pass" statements are tricky: different versions of Python
# treat them differently, especially in the common case of a
# function with a doc string and a single pass statement.
self.find_docstring_pass_pair(tree[i], spots)
elif tree[i][0] == symbol.simple_stmt:
first_line = self.first_line_of_tree(tree[i])
last_line = self.last_line_of_tree(tree[i])
if first_line != last_line:
self.record_multiline(spots, first_line, last_line)
self.get_suite_spots(tree[i], spots)
def find_executable_statements(self, text, exclude=None):
# Find lines which match an exclusion pattern.
excluded = {}
suite_spots = {}
if exclude:
reExclude = re.compile(exclude)
lines = text.split('\n')
for i in range(len(lines)):
if reExclude.search(lines[i]):
excluded[i+1] = 1
# Parse the code and analyze the parse tree to find out which statements
# are multiline, and where suites begin and end.
import parser
tree = parser.suite(text+'\n\n').totuple(1)
self.get_suite_spots(tree, suite_spots)
#print "Suite spots:", suite_spots
# Use the compiler module to parse the text and find the executable
# statements. We add newlines to be impervious to final partial lines.
statements = {}
ast = compiler.parse(text+'\n\n')
visitor = StatementFindingAstVisitor(statements, excluded, suite_spots)
compiler.walk(ast, visitor, walker=visitor)
lines = statements.keys()
lines.sort()
excluded_lines = excluded.keys()
excluded_lines.sort()
return lines, excluded_lines, suite_spots
# format_lines(statements, lines). Format a list of line numbers
# for printing by coalescing groups of lines as long as the lines
# represent consecutive statements. This will coalesce even if
# there are gaps between statements, so if statements =
# [1,2,3,4,5,10,11,12,13,14] and lines = [1,2,5,10,11,13,14] then
# format_lines will return "1-2, 5-11, 13-14".
def format_lines(self, statements, lines):
pairs = []
i = 0
j = 0
start = None
pairs = []
while i < len(statements) and j < len(lines):
if statements[i] == lines[j]:
if start == None:
start = lines[j]
end = lines[j]
j = j + 1
elif start:
pairs.append((start, end))
start = None
i = i + 1
if start:
pairs.append((start, end))
def stringify(pair):
start, end = pair
if start == end:
return "%d" % start
else:
return "%d-%d" % (start, end)
ret = string.join(map(stringify, pairs), ", ")
return ret
# Backward compatibility with version 1.
def analysis(self, morf):
f, s, _, m, mf = self.analysis2(morf)
return f, s, m, mf
def analysis2(self, morf):
filename, statements, excluded, line_map = self.analyze_morf(morf)
self.canonicalize_filenames()
if not self.cexecuted.has_key(filename):
self.cexecuted[filename] = {}
missing = []
for line in statements:
lines = line_map.get(line, [line, line])
for l in range(lines[0], lines[1]+1):
if self.cexecuted[filename].has_key(l):
break
else:
missing.append(line)
return (filename, statements, excluded, missing,
self.format_lines(statements, missing))
def relative_filename(self, filename):
""" Convert filename to relative filename from self.relative_dir.
"""
return filename.replace(self.relative_dir, "")
def morf_name(self, morf):
""" Return the name of morf as used in report.
"""
if hasattr(morf, '__name__'):
return morf.__name__
else:
return self.relative_filename(os.path.splitext(morf)[0])
def filter_by_prefix(self, morfs, omit_prefixes):
""" Return list of morfs where the morf name does not begin
with any one of the omit_prefixes.
"""
filtered_morfs = []
for morf in morfs:
for prefix in omit_prefixes:
if self.morf_name(morf).startswith(prefix):
break
else:
filtered_morfs.append(morf)
return filtered_morfs
def morf_name_compare(self, x, y):
return cmp(self.morf_name(x), self.morf_name(y))
def report(self, morfs, show_missing=1, ignore_errors=0, file=None, omit_prefixes=[]):
if not isinstance(morfs, types.ListType):
morfs = [morfs]
# On windows, the shell doesn't expand wildcards. Do it here.
globbed = []
for morf in morfs:
if isinstance(morf, strclass):
globbed.extend(glob.glob(morf))
else:
globbed.append(morf)
morfs = globbed
morfs = self.filter_by_prefix(morfs, omit_prefixes)
morfs.sort(self.morf_name_compare)
max_name = max([5,] + map(len, map(self.morf_name, morfs)))
fmt_name = "%%- %ds " % max_name
fmt_err = fmt_name + "%s: %s"
header = fmt_name % "Name" + " Stmts Exec Cover"
fmt_coverage = fmt_name + "% 6d % 6d % 5d%%"
if show_missing:
header = header + " Missing"
fmt_coverage = fmt_coverage + " %s"
if not file:
file = sys.stdout
print >>file, header
print >>file, "-" * len(header)
total_statements = 0
total_executed = 0
for morf in morfs:
name = self.morf_name(morf)
try:
_, statements, _, missing, readable = self.analysis2(morf)
n = len(statements)
m = n - len(missing)
if n > 0:
pc = 100.0 * m / n
else:
pc = 100.0
args = (name, n, m, pc)
if show_missing:
args = args + (readable,)
print >>file, fmt_coverage % args
total_statements = total_statements + n
total_executed = total_executed + m
except KeyboardInterrupt: #pragma: no cover
raise
except:
if not ignore_errors:
typ, msg = sys.exc_info()[:2]
print >>file, fmt_err % (name, typ, msg)
if len(morfs) > 1:
print >>file, "-" * len(header)
if total_statements > 0:
pc = 100.0 * total_executed / total_statements
else:
pc = 100.0
args = ("TOTAL", total_statements, total_executed, pc)
if show_missing:
args = args + ("",)
print >>file, fmt_coverage % args
# annotate(morfs, ignore_errors).
blank_re = re.compile(r"\s*(#|$)")
else_re = re.compile(r"\s*else\s*:\s*(#|$)")
def annotate(self, morfs, directory=None, ignore_errors=0, omit_prefixes=[]):
morfs = self.filter_by_prefix(morfs, omit_prefixes)
for morf in morfs:
try:
filename, statements, excluded, missing, _ = self.analysis2(morf)
self.annotate_file(filename, statements, excluded, missing, directory)
except KeyboardInterrupt:
raise
except:
if not ignore_errors:
raise
def annotate_file(self, filename, statements, excluded, missing, directory=None):
source = open(filename, 'r')
if directory:
dest_file = os.path.join(directory,
os.path.basename(filename)
+ ',cover')
else:
dest_file = filename + ',cover'
dest = open(dest_file, 'w')
lineno = 0
i = 0
j = 0
covered = 1
while 1:
line = source.readline()
if line == '':
break
lineno = lineno + 1
while i < len(statements) and statements[i] < lineno:
i = i + 1
while j < len(missing) and missing[j] < lineno:
j = j + 1
if i < len(statements) and statements[i] == lineno:
covered = j >= len(missing) or missing[j] > lineno
if self.blank_re.match(line):
dest.write(' ')
elif self.else_re.match(line):
# Special logic for lines containing only 'else:'.
# See [GDR 2001-12-04b, 3.2].
if i >= len(statements) and j >= len(missing):
dest.write('! ')
elif i >= len(statements) or j >= len(missing):
dest.write('> ')
elif statements[i] == missing[j]:
dest.write('! ')
else:
dest.write('> ')
elif lineno in excluded:
dest.write('- ')
elif covered:
dest.write('> ')
else:
dest.write('! ')
dest.write(line)
source.close()
dest.close()
# Singleton object.
the_coverage = coverage()
# Module functions call methods in the singleton object.
def use_cache(*args, **kw):
return the_coverage.use_cache(*args, **kw)
def start(*args, **kw):
return the_coverage.start(*args, **kw)
def stop(*args, **kw):
return the_coverage.stop(*args, **kw)
def erase(*args, **kw):
return the_coverage.erase(*args, **kw)
def begin_recursive(*args, **kw):
return the_coverage.begin_recursive(*args, **kw)
def end_recursive(*args, **kw):
return the_coverage.end_recursive(*args, **kw)
def exclude(*args, **kw):
return the_coverage.exclude(*args, **kw)
def analysis(*args, **kw):
return the_coverage.analysis(*args, **kw)
def analysis2(*args, **kw):
return the_coverage.analysis2(*args, **kw)
def report(*args, **kw):
return the_coverage.report(*args, **kw)
def annotate(*args, **kw):
return the_coverage.annotate(*args, **kw)
def annotate_file(*args, **kw):
return the_coverage.annotate_file(*args, **kw)
# Save coverage data when Python exits. (The atexit module wasn't
# introduced until Python 2.0, so use sys.exitfunc when it's not
# available.)
try:
import atexit
atexit.register(the_coverage.save)
except ImportError:
sys.exitfunc = the_coverage.save
def main():
the_coverage.command_line(sys.argv[1:])
# Command-line interface.
if __name__ == '__main__':
main()
# A. REFERENCES
#
# [GDR 2001-12-04a] "Statement coverage for Python"; Gareth Rees;
# Ravenbrook Limited; 2001-12-04;
# <http://www.nedbatchelder.com/code/modules/rees-coverage.html>.
#
# [GDR 2001-12-04b] "Statement coverage for Python: design and
# analysis"; Gareth Rees; Ravenbrook Limited; 2001-12-04;
# <http://www.nedbatchelder.com/code/modules/rees-design.html>.
#
# [van Rossum 2001-07-20a] "Python Reference Manual (releae 2.1.1)";
# Guide van Rossum; 2001-07-20;
# <http://www.python.org/doc/2.1.1/ref/ref.html>.
#
# [van Rossum 2001-07-20b] "Python Library Reference"; Guido van Rossum;
# 2001-07-20; <http://www.python.org/doc/2.1.1/lib/lib.html>.
#
#
# B. DOCUMENT HISTORY
#
# 2001-12-04 GDR Created.
#
# 2001-12-06 GDR Added command-line interface and source code
# annotation.
#
# 2001-12-09 GDR Moved design and interface to separate documents.
#
# 2001-12-10 GDR Open cache file as binary on Windows. Allow
# simultaneous -e and -x, or -a and -r.
#
# 2001-12-12 GDR Added command-line help. Cache analysis so that it
# only needs to be done once when you specify -a and -r.
#
# 2001-12-13 GDR Improved speed while recording. Portable between
# Python 1.5.2 and 2.1.1.
#
# 2002-01-03 GDR Module-level functions work correctly.
#
# 2002-01-07 GDR Update sys.path when running a file with the -x option,
# so that it matches the value the program would get if it were run on
# its own.
#
# 2004-12-12 NMB Significant code changes.
# - Finding executable statements has been rewritten so that docstrings and
# other quirks of Python execution aren't mistakenly identified as missing
# lines.
# - Lines can be excluded from consideration, even entire suites of lines.
# - The filesystem cache of covered lines can be disabled programmatically.
# - Modernized the code.
#
# 2004-12-14 NMB Minor tweaks. Return 'analysis' to its original behavior
# and add 'analysis2'. Add a global for 'annotate', and factor it, adding
# 'annotate_file'.
#
# 2004-12-31 NMB Allow for keyword arguments in the module global functions.
# Thanks, Allen.
#
# 2005-12-02 NMB Call threading.settrace so that all threads are measured.
# Thanks Martin Fuzzey. Add a file argument to report so that reports can be
# captured to a different destination.
#
# 2005-12-03 NMB coverage.py can now measure itself.
#
# 2005-12-04 NMB Adapted Greg Rogers' patch for using relative filenames,
# and sorting and omitting files to report on.
#
# 2006-07-23 NMB Applied Joseph Tate's patch for function decorators.
#
# 2006-08-21 NMB Applied Sigve Tjora and Mark van der Wal's fixes for argument
# handling.
#
# 2006-08-22 NMB Applied Geoff Bache's parallel mode patch.
#
# 2006-08-23 NMB Refactorings to improve testability. Fixes to command-line
# logic for parallel mode and collect.
#
# 2006-08-25 NMB "#pragma: nocover" is excluded by default.
#
# 2006-09-10 NMB Properly ignore docstrings and other constant expressions that
# appear in the middle of a function, a problem reported by Tim Leslie.
# Minor changes to avoid lint warnings.
#
# 2006-09-17 NMB coverage.erase() shouldn't clobber the exclude regex.
# Change how parallel mode is invoked, and fix erase() so that it erases the
# cache when called programmatically.
#
# 2007-07-21 NMB In reports, ignore code executed from strings, since we can't
# do anything useful with it anyway.
# Better file handling on Linux, thanks Guillaume Chazarain.
# Better shell support on Windows, thanks Noel O'Boyle.
# Python 2.2 support maintained, thanks Catherine Proulx.
#
# 2007-07-22 NMB Python 2.5 now fully supported. The method of dealing with
# multi-line statements is now less sensitive to the exact line that Python
# reports during execution. Pass statements are handled specially so that their
# disappearance during execution won't throw off the measurement.
#
# 2007-07-23 NMB Now Python 2.5 is *really* fully supported: the body of the
# new with statement is counted as executable.
#
# 2007-07-29 NMB Better packaging.
#
# 2007-09-30 NMB Don't try to predict whether a file is Python source based on
# the extension. Extensionless files are often Pythons scripts. Instead, simply
# parse the file and catch the syntax errors. Hat tip to Ben Finney.
#
# 2008-05-25 NMB Open files in rU mode to avoid line ending craziness.
# Thanks, Edward Loper.
#
# 2008-09-14 NMB Add support for finding source files in eggs.
# Don't check for morf's being instances of ModuleType, instead use duck typing
# so that pseudo-modules can participate. Thanks, Imri Goldberg.
# Use os.realpath as part of the fixing of filenames so that symlinks won't
# confuse things. Thanks, Patrick Mezard.
#
#
# C. COPYRIGHT AND LICENCE
#
# Copyright 2001 Gareth Rees. All rights reserved.
# Copyright 2004-2008 Ned Batchelder. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# $Id: coverage.py 100 2008-10-12 12:08:22Z nedbat $
|
|
import sys
import deen.logger
class DeenPlugin(object):
"""The core plugin class that should be subclassed
by every deen plugin. It provides some required
class attributes that ease the process of writing
new plugins."""
# In case an error happened, it should
# be stored in this variable.
error = None
# Internal name for the plugin.
name = ''
# The name that will be displayed in the GUI.
display_name = ''
# A list of aliases for this plugin. Can
# be empty if there is no aliases to the
# plugin name.
aliases = []
# Indicates of the output is formatted.
formatted = False
def __init__(self):
self.parent = None
self.content = None
self.log = None
self._content = bytearray()
self._create_log_handler()
@property
def content(self):
return self._content
@content.setter
def content(self, data):
if isinstance(data, str):
data = data.encode()
if isinstance(data, bytes):
data = bytearray(data)
self._content = data
def prerequisites(self):
"""A function that should return True if all
prerequisites for this plugin are met or False
if not. Here a plugin can e.g. check if the
current Python version is suitable for the
functionality or if required third party modules
are installed.
:return: True if all prerequisites are met,
False if not.
"""
return True
def process(self, data):
"""Every plugin must have a process method
that e.g. encodes, compresses, hashs, formats,
whatsoever.
:param data: the input data that should be
processed
:return: the processed data
"""
assert data is not None,\
'Input data is None'
assert isinstance(data, (bytes, bytearray)),\
'Invalid input type: ' + str(type(data))
def unprocess(self, data):
"""Depending of the category of a plugin, it
could also have an unprocess function. This
applies to e.g. codecs and compressions.
However, e.g. hash functions will not require
an unprocess function as they are not (easily)
reversible.
:param data: the input data that should be
processed
:return: the processed data
"""
assert data is not None,\
'Input data is None'
assert isinstance(data, (bytes, bytearray)),\
'Invalid input type: ' + str(type(data))
@staticmethod
def add_argparser(argparser, plugin_class, revert=False):
"""This function allows plugins to add subcommands
to argparse in order to be used via a seperate
command/alias on the CLI.
:param argparser: a ArgParser object
:param cmd_name: a plugin's cmd_name class variable
:param cmd_help: a plugin's cmd_help class variable
:param cmd_aliases: a plugin's cmd_aliases class variable
:param revert: True will add the -r/--revert argument
:return: the newly created argparse object
"""
cmd_name = plugin_class.cmd_name
cmd_help = plugin_class.cmd_help
cmd_aliases = plugin_class.aliases
revert = True if 'unprocess' in vars(plugin_class) else False
if not cmd_aliases:
cmd_aliases = []
# Add convenience wrappers for reverting plugins
if revert:
_cmd_aliases = []
_cmd_aliases.extend(cmd_aliases)
for alias in _cmd_aliases:
cmd_aliases.append('.' + alias)
cmd_aliases.insert(0, '.' + cmd_name)
# Note: Python 2 argparse does not support aliases.
if sys.version_info.major < 3 or \
(sys.version_info.major == 3 and
sys.version_info.minor < 2):
parser = argparser.add_parser(cmd_name, help=cmd_help, description=cmd_help)
else:
parser = argparser.add_parser(cmd_name, help=cmd_help, aliases=cmd_aliases,
description=cmd_help)
parser.add_argument('plugindata', action='store',
help='input data', nargs='?')
parser.add_argument('-f', '--file', dest='plugininfile', default=None,
help='file name or - for STDIN', metavar='filename')
if revert:
parser.add_argument('-r', '--revert', action='store_true', dest='revert',
default=False, help='revert plugin process')
def process_cli(self, args):
"""Do whatever the CLI cmd should do. The args
argument is the return of parse_args(). Must
return the processed data.
:param args: the output of argparse.parse_args()
:return: the return of either process() or unprocess()
"""
if not self.content:
if not args.plugindata:
if not args.plugininfile:
self.content = self.read_content_from_file('-')
else:
self.content = self.read_content_from_file(args.plugininfile)
else:
self.content = args.plugindata
if not self.content:
return
if not args.revert:
return self.process(self.content)
else:
return self.unprocess(self.content)
def process_gui(self, parent, content):
"""Plugins that need additional GUI elements
i.e. to accept multiple inputs, they can
override this function. The parent argument
can be used to add widgets to the main window.
:param parent: the parent object
:param content: the input data that will be processed
:return: the return value of process()
"""
self.parent = parent
def unprocess_gui(self, parent, content):
"""Plugins that need additional GUI elements
i.e. to accept multiple inputs, they can
override this function. The parent argument
can be used to add widgets to the main window.
:param parent: the parent object
:param content: the input data that will be processed
:return: the return value of unprocess()
"""
self.parent = parent
self.content = content
def read_content_from_file(self, file):
"""If file is a filename, it will read and
return it's content. If file is '-', read
from STDIN instead of a file.
:param file: filename of '-' for STDIN
:return: content of filename or data from STDIN
"""
content = b''
try:
if file == '-':
try:
stdin = sys.stdin.buffer
except AttributeError:
stdin = sys.stdin
content = stdin.read()
else:
try:
with open(file, 'rb') as f:
content = f.read()
except Exception as e:
self.error = e
except KeyboardInterrupt:
return
return content
def write_to_stdout(self, data, nonewline=False):
"""Write processed data to STDOUT. It takes
care of whether it's running in Pyton 2 or 3
to properly write bytes to STDOUT.
:param data: data to be written to STDOUT
:param nonewline: if True, omit newline at the end
"""
try:
# Python 3
stdout = sys.stdout.buffer
except AttributeError:
# Python 2
stdout = sys.stdout
stdout.write(data)
if not nonewline:
stdout.write(b'\n')
def _create_log_handler(self):
"""Create a log handler for each plugin instance.
Plugins are supposed to log via self.log, i.e.
self.log.info()."""
logger = 'plugins.' + self.__class__.__name__
self.log = deen.logger.DEEN_LOG.getChild(logger)
def log_missing_depdendencies(self, dep):
"""A helper function for plugins
to log missing dependencies in the
self.prerequisites() function.
:param dep: a str or list of module names"""
if isinstance(dep, list):
dep = ','.join(dep)
msg = dep
msg += ' modules '
else:
msg = dep
msg += ' module '
msg += 'not found, '
msg += self.display_name
msg += ' plugin disabled.'
self.log.debug(msg)
def log_incompatible_version(self, version=''):
"""A helper function for plugins to log
missing features in current Python version.
:param version: a str with a Python version (optional)"""
msg = 'Python version ' + str(sys.version_info.major)
msg += '.' + str(sys.version_info.minor)
msg += ' does not support ' + self.display_name
if version:
msg += ' (v' + version
msg += ' required)'
self.log.debug(msg)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2015-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Creating batch predictions
"""
from .world import world, setup_module, teardown_module
from . import create_source_steps as source_create
from . import create_dataset_steps as dataset_create
from . import create_model_steps as model_create
from . import create_ensemble_steps as ensemble_create
from . import create_cluster_steps as cluster_create
from . import create_anomaly_steps as anomaly_create
from . import create_batch_prediction_steps as batch_pred_create
from . import create_prediction_steps as prediction_create
class TestBatchPrediction(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully creating a batch prediction:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a model
And I wait until the model is ready less than <time_3> secs
When I create a batch prediction for the dataset with the model
And I wait until the batch prediction is ready less than <time_4> secs
And I download the created predictions file to "<local_file>"
Then the batch prediction file is like "<predictions_file>"
Examples:
| data | time_1 | time_2 | time_3 | time_4 | local_file | predictions_file |
| ../data/iris.csv | 30 | 30 | 50 | 50 | ./tmp/batch_predictions.csv |./data/batch_predictions.csv |
"""
print(self.test_scenario1.__doc__)
examples = [
['data/iris.csv', '30', '30', '50', '50', 'tmp/batch_predictions.csv', 'data/batch_predictions.csv']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_model(self)
model_create.the_model_is_finished_in_less_than(self, example[3])
batch_pred_create.i_create_a_batch_prediction(self)
batch_pred_create.the_batch_prediction_is_finished_in_less_than(self, example[4])
batch_pred_create.i_download_predictions_file(self, example[5])
batch_pred_create.i_check_predictions(self, example[6])
def test_scenario2(self):
"""
Scenario: Successfully creating a batch prediction for an ensemble:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create an ensemble of <number_of_models> models and <tlp> tlp
And I wait until the ensemble is ready less than <time_3> secs
When I create a batch prediction for the dataset with the ensemble and "<params>"
And I wait until the batch prediction is ready less than <time_4> secs
And I download the created predictions file to "<local_file>"
Then the batch prediction file is like "<predictions_file>"
Examples:
| data | time_1 | time_2 | number_of_models | tlp | time_3 | time_4 | local_file | predictions_file | params
| ../data/iris.csv | 30 | 30 | 5 | 1 | 80 | 50 | ./tmp/batch_predictions.csv | ./data/batch_predictions_e.csv | {"combiner": 0}
"""
print(self.test_scenario2.__doc__)
examples = [
['data/iris.csv', '30', '30', '5', '1', '180', '150', 'tmp/batch_predictions.csv', 'data/batch_predictions_e_c0.csv', {"combiner":0}],
['data/iris.csv', '30', '30', '5', '1', '180', '150', 'tmp/batch_predictions.csv', 'data/batch_predictions_e_c1.csv', {"combiner":1, "confidence": True}],
['data/iris.csv', '30', '30', '5', '1', '180', '150', 'tmp/batch_predictions.csv', 'data/batch_predictions_e_c2.csv', {"combiner":2, "confidence": True}],
['data/iris.csv', '30', '30', '5', '1', '180', '150', 'tmp/batch_predictions.csv', 'data/batch_predictions_e_o_k_v.csv', {"operating_kind": "votes", "confidence": True}],
['data/iris.csv', '30', '30', '5', '1', '180', '150', 'tmp/batch_predictions.csv', 'data/batch_predictions_e_o_k_p.csv', {"operating_kind": "probability", "probability": True}],
['data/iris.csv', '30', '30', '5', '1', '180', '150', 'tmp/batch_predictions.csv', 'data/batch_predictions_e_o_k_c.csv', {"operating_kind": "confidence", "confidence": True}]]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
ensemble_create.i_create_an_ensemble(self, example[3], example[4])
ensemble_create.the_ensemble_is_finished_in_less_than(self, example[5])
batch_pred_create.i_create_a_batch_prediction_ensemble(self, example[9])
batch_pred_create.the_batch_prediction_is_finished_in_less_than(self, example[6])
batch_pred_create.i_download_predictions_file(self, example[7])
batch_pred_create.i_check_predictions(self, example[8])
def test_scenario3(self):
"""
Scenario: Successfully creating a batch centroid from a cluster:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a cluster
And I wait until the cluster is ready less than <time_3> secs
When I create a batch centroid for the dataset
And I check the batch centroid is ok
And I wait until the batch centroid is ready less than <time_4> secs
And I download the created centroid file to "<local_file>"
Then the batch centroid file is like "<predictions_file>"
Examples:
| data | time_1 | time_2 | time_3 | time_4 | local_file | predictions_file |
| ../data/diabetes.csv | 50 | 50 | 50 | 50 | ./tmp/batch_predictions.csv |./data/batch_predictions_c.csv |
"""
print(self.test_scenario3.__doc__)
examples = [
['data/diabetes.csv', '50', '50', '50', '50', 'tmp/batch_predictions.csv', 'data/batch_predictions_c.csv']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
cluster_create.i_create_a_cluster(self)
cluster_create.the_cluster_is_finished_in_less_than(self, example[3])
batch_pred_create.i_create_a_batch_prediction_with_cluster(self)
batch_pred_create.the_batch_centroid_is_finished_in_less_than(self, example[4])
batch_pred_create.i_download_centroid_file(self, example[5])
batch_pred_create.i_check_predictions(self, example[6])
def test_scenario4(self):
"""
Scenario: Successfully creating a source from a batch prediction:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a model
And I wait until the model is ready less than <time_3> secs
When I create a batch prediction for the dataset with the model
And I wait until the batch prediction is ready less than <time_4> secs
Then I create a source from the batch prediction
And I wait until the source is ready less than <time_1> secs
Examples:
| data | time_1 | time_2 | time_3 | time_4 |
| ../data/iris.csv | 30 | 30 | 50 | 50 |
"""
print(self.test_scenario4.__doc__)
examples = [
['data/diabetes.csv', '30', '30', '50', '50']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_model(self)
model_create.the_model_is_finished_in_less_than(self, example[3])
batch_pred_create.i_create_a_batch_prediction(self)
batch_pred_create.the_batch_prediction_is_finished_in_less_than(self, example[4])
batch_pred_create.i_create_a_source_from_batch_prediction(self)
source_create.the_source_is_finished(self, example[1])
def test_scenario5(self):
"""
Scenario: Successfully creating a batch anomaly score from an anomaly detector:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create an anomaly detector
And I wait until the anomaly detector is ready less than <time_3> secs
When I create a batch anomaly score
And I check the batch anomaly score is ok
And I wait until the batch anomaly score is ready less than <time_4> secs
And I download the created anomaly score file to "<local_file>"
Then the batch anomaly score file is like "<predictions_file>"
Examples:
| data | time_1 | time_2 | time_3 | time_4 | local_file | predictions_file |
| ../data/tiny_kdd.csv | 30 | 30 | 50 | 50 | ./tmp/batch_predictions.csv |./data/batch_predictions_a.csv |
"""
print(self.test_scenario5.__doc__)
examples = [
['data/tiny_kdd.csv', '30', '30', '50', '50', 'tmp/batch_predictions.csv', 'data/batch_predictions_a.csv']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
anomaly_create.i_create_an_anomaly(self)
anomaly_create.the_anomaly_is_finished_in_less_than(self, example[3])
batch_pred_create.i_create_a_batch_prediction_with_anomaly(self)
batch_pred_create.the_batch_anomaly_score_is_finished_in_less_than(self, example[4])
batch_pred_create.i_download_anomaly_score_file(self, example[5])
batch_pred_create.i_check_predictions(self, example[6])
def test_scenario6(self):
"""
Scenario: Successfully creating a batch prediction for a logistic regression:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a logistic regression
And I wait until the logistic regression is ready less than <time_3> secs
When I create a batch prediction for the dataset with the logistic regression
And I wait until the batch prediction is ready less than <time_4> secs
And I download the created predictions file to "<local_file>"
Then the batch prediction file is like "<predictions_file>"
Examples:
| data | time_1 | time_2 | time_3 | time_4 | local_file | predictions_file |
| ../data/iris.csv | 30 | 30 | 80 | 50 | ./tmp/batch_predictions.csv | ./data/batch_predictions_lr.csv |
"""
print(self.test_scenario6.__doc__)
examples = [
['data/iris.csv', '30', '30', '80', '50', 'tmp/batch_predictions.csv', 'data/batch_predictions_lr.csv']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_logistic_model(self)
model_create.the_logistic_model_is_finished_in_less_than(self, example[3])
batch_pred_create.i_create_a_batch_prediction_logistic_model(self)
batch_pred_create.the_batch_prediction_is_finished_in_less_than(self, example[4])
batch_pred_create.i_download_predictions_file(self, example[5])
batch_pred_create.i_check_predictions(self, example[6])
|
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet50 model."""
import string
import edward2 as ed
import tensorflow as tf
# Use batch normalization defaults from Pytorch.
BATCH_NORM_DECAY = 0.9
BATCH_NORM_EPSILON = 1e-5
def bottleneck_block(inputs,
filters,
stage,
block,
strides):
"""Residual block with 1x1 -> 3x3 -> 1x1 convs in main path.
Note that strides appear in the second conv (3x3) rather than the first (1x1).
This is also known as "ResNet v1.5" as it differs from He et al. (2015)
(http://torch.ch/blog/2016/02/04/resnets.html).
Args:
inputs: tf.Tensor.
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
strides: Strides for the second conv layer in the block.
Returns:
tf.Tensor.
"""
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = tf.keras.layers.Conv2D(
filters1,
kernel_size=1,
use_bias=False,
kernel_initializer='he_normal',
name=conv_name_base + '2a')(inputs)
x = tf.keras.layers.BatchNormalization(
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2a')(x)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
filters2,
kernel_size=3,
strides=strides,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
name=conv_name_base + '2b')(x)
x = tf.keras.layers.BatchNormalization(
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2b')(x)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
filters3,
kernel_size=1,
use_bias=False,
kernel_initializer='he_normal',
name=conv_name_base + '2c')(x)
x = tf.keras.layers.BatchNormalization(
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2c')(x)
shortcut = inputs
if not x.shape.is_compatible_with(shortcut.shape):
shortcut = tf.keras.layers.Conv2D(
filters3,
kernel_size=1,
use_bias=False,
strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '1')(shortcut)
shortcut = tf.keras.layers.BatchNormalization(
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '1')(shortcut)
x = tf.keras.layers.add([x, shortcut])
x = tf.keras.layers.Activation('relu')(x)
return x
def group(inputs, filters, num_blocks, stage, strides):
blocks = string.ascii_lowercase
x = bottleneck_block(inputs, filters, stage, block=blocks[0], strides=strides)
for i in range(num_blocks - 1):
x = bottleneck_block(x, filters, stage, block=blocks[i + 1], strides=1)
return x
def resnet50_het_mimo(
input_shape,
num_classes,
ensemble_size,
num_factors,
temperature,
share_het_layer,
num_mc_samples=10000,
eps=1e-5,
width_multiplier=1):
"""Builds a multiheaded ResNet50 with an heteroscedastic layer.
Using strided conv, pooling, four groups of residual blocks, and pooling, the
network maps spatial features of size 224x224 -> 112x112 -> 56x56 -> 28x28 ->
14x14 -> 7x7 (Table 1 of He et al. (2015)).
Args:
input_shape: Shape tuple of input excluding batch dimension.
num_classes: Number of output classes.
ensemble_size: Number of ensembles i.e. number of heads and inputs.
num_factors: Integer. Number of factors to use in approximation to full
rank covariance matrix. If num_factors <= 0, then the diagonal covariance
method MCSoftmaxDense is used.
temperature: Float or scalar `Tensor` representing the softmax
temperature.
share_het_layer: Boolean. Whether to use a single heteroscedastic
layer of output size (num_classes * ensemble_size), or to generate an
ensemble size number of het. layers with num_classes as output size.
num_mc_samples: The number of Monte-Carlo samples used to estimate the
predictive distribution.
eps: Float. Clip probabilities into [eps, 1.0] softmax or
[eps, 1.0 - eps] sigmoid before applying log (softmax), or inverse
sigmoid.
width_multiplier: Multiply the number of filters for wide ResNet.
Returns:
tf.keras.Model.
"""
input_shape = list(input_shape)
inputs = tf.keras.layers.Input(shape=input_shape)
x = tf.keras.layers.Permute([2, 3, 4, 1])(inputs)
assert ensemble_size == input_shape[0]
x = tf.keras.layers.Reshape(list(input_shape[1:-1]) +
[input_shape[-1] * ensemble_size])(
x)
x = tf.keras.layers.ZeroPadding2D(padding=3, name='conv1_pad')(x)
x = tf.keras.layers.Conv2D(
width_multiplier * 64,
kernel_size=7,
strides=2,
padding='valid',
use_bias=False,
kernel_initializer='he_normal',
name='conv1')(x)
x = tf.keras.layers.BatchNormalization(
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name='bn_conv1')(x)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.MaxPooling2D(3, strides=2, padding='same')(x)
x = group(x, [width_multiplier * 64,
width_multiplier * 64,
width_multiplier * 256], stage=2, num_blocks=3, strides=1)
x = group(x, [width_multiplier * 128,
width_multiplier * 128,
width_multiplier * 512], stage=3, num_blocks=4, strides=2)
x = group(x, [width_multiplier * 256,
width_multiplier * 256,
width_multiplier * 1024], stage=4, num_blocks=6, strides=2)
x = group(x, [width_multiplier * 512,
width_multiplier * 512,
width_multiplier * 2048], stage=5, num_blocks=3, strides=2)
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
assert num_factors > 0
het_layer_args = {'temperature': temperature,
'train_mc_samples': num_mc_samples,
'test_mc_samples': num_mc_samples,
'share_samples_across_batch': True,
'num_classes': num_classes, 'num_factors': num_factors,
'logits_only': True, 'eps': eps,
'dtype': tf.float32, 'name': 'fc1000'}
if share_het_layer:
het_layer_args.update({'ensemble_size': ensemble_size})
output_layer = ed.layers.MultiHeadMCSoftmaxDenseFA(**het_layer_args)
x = output_layer(x)
else:
output_het = []
for i in range(ensemble_size):
het_layer_args.update({'name': 'ensemble_' + str(i) + '_fc1000'})
output_layer = ed.layers.MCSoftmaxDenseFA(**het_layer_args)
output_het.append(output_layer(x))
x = tf.stack(output_het, axis=1)
return tf.keras.Model(inputs=inputs, outputs=x, name='resnet50')
|
|
#!/usr/bin/env python
# Needed for antipackage with python 2
from __future__ import absolute_import
import datetime
import fnmatch
import glob
import io
import json
import os
import os.path
import random
import re
import socket
import string
import subprocess
import sys
import yaml
from collections import Counter, OrderedDict
from os.path import expandvars
REPO_ROOT = ''
BIN_MATRIX = None
BUCKET_MATRIX = None
ENV = os.getenv('APPSCODE_ENV', 'dev').lower()
def _goenv():
env = {}
for line in subprocess.check_output(['go', 'env']).split('\n'):
line = line.strip()
if len(line) == 0:
continue
k, v = line.split('=', 1)
v = v.strip('"')
if len(v) > 0:
env[k] = v
return env
GOENV = _goenv()
GOPATH = GOENV["GOPATH"]
GOBIN = GOENV["GOPATH"] + '/bin'
GOHOSTOS = GOENV["GOHOSTOS"]
GOHOSTARCH = GOENV["GOHOSTARCH"]
GOC = 'go'
def metadata(cwd, goos='', goarch=''):
md = {
'commit_hash': subprocess.check_output('git rev-parse --verify HEAD', shell=True, cwd=cwd).strip(),
'git_branch': subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True, cwd=cwd).strip(),
# http://stackoverflow.com/a/1404862/3476121
'git_tag': subprocess.check_output('git describe --exact-match --abbrev=0 2>/dev/null || echo ""', shell=True,
cwd=cwd).strip(),
'commit_timestamp': datetime.datetime.utcfromtimestamp(
int(subprocess.check_output('git show -s --format=%ct', shell=True, cwd=cwd).strip())).isoformat(),
'build_timestamp': datetime.datetime.utcnow().isoformat(),
'build_host': socket.gethostname(),
'build_host_os': GOENV["GOHOSTOS"],
'build_host_arch': GOENV["GOHOSTARCH"]
}
if md['git_tag']:
md['version'] = md['git_tag']
md['version_strategy'] = 'tag'
elif not md['git_branch'] in ['master', 'HEAD'] and not md['git_branch'].startswith('release-'):
md['version'] = md['git_branch']
md['version_strategy'] = 'branch'
else:
hash_ver = subprocess.check_output('git describe --tags --always --dirty', shell=True, cwd=cwd).strip()
md['version'] = hash_ver
md['version_strategy'] = 'commit_hash'
if goos:
md['os'] = goos
if goarch:
md['arch'] = goarch
return md
def read_file(name):
with open(name, 'r') as f:
return f.read()
return ''
def write_file(name, content):
dir = os.path.dirname(name)
if not os.path.exists(dir):
os.makedirs(dir)
with open(name, 'w') as f:
return f.write(content)
def append_file(name, content):
with open(name, 'a') as f:
return f.write(content)
def write_checksum(folder, file):
cmd = "openssl md5 {0} | sed 's/^.* //' > {0}.md5".format(file)
subprocess.call(cmd, shell=True, cwd=folder)
cmd = "openssl sha1 {0} | sed 's/^.* //' > {0}.sha1".format(file)
subprocess.call(cmd, shell=True, cwd=folder)
# TODO: use unicode encoding
def read_json(name):
try:
with open(name, 'r') as f:
return json.load(f, object_pairs_hook=OrderedDict)
except IOError:
return {}
def write_json(obj, name):
with io.open(name, 'w', encoding='utf-8') as f:
f.write(unicode(json.dumps(obj, indent=2, separators=(',', ': '), ensure_ascii=False)))
def call(cmd, stdin=None, cwd=None):
print(cmd)
return subprocess.call([expandvars(cmd)], shell=True, stdin=stdin, cwd=cwd)
def die(status):
if status:
sys.exit(status)
def check_output(cmd, stdin=None, cwd=None):
print(cmd)
return subprocess.check_output([expandvars(cmd)], shell=True, stdin=stdin, cwd=cwd)
def deps():
die(call('go get -u golang.org/x/tools/cmd/goimports'))
die(call('go get -u golang.org/x/tools/cmd/stringer'))
die(call('go get -u github.com/Masterminds/glide'))
die(call('go get -u github.com/sgotti/glide-vc'))
die(call('go get -u github.com/jteeuwen/go-bindata/...'))
die(call('go get -u github.com/progrium/go-extpoints'))
die(call('go get -u github.com/tools/godep'))
die(call('go get -u github.com/uber/go-torch'))
def to_upper_camel(lower_snake):
components = lower_snake.split('_')
# We capitalize the first letter of each component
# with the 'title' method and join them together.
return ''.join(x.title() for x in components[:])
# ref: https://golang.org/cmd/go/
def go_build(name, goos, goarch, main, compress=False, upx=False):
linker_opts = []
if BIN_MATRIX[name].get('go_version', False):
md = metadata(REPO_ROOT, goos, goarch)
if md['version_strategy'] == 'tag':
del md['build_timestamp']
del md['build_host']
del md['build_host_os']
del md['build_host_arch']
for k, v in md.items():
linker_opts.append('-X')
linker_opts.append('main.' + to_upper_camel(k) + '=' + v)
cgo_env = 'CGO_ENABLED=0'
cgo = ''
if BIN_MATRIX[name].get('use_cgo', False):
cgo_env = "CGO_ENABLED=1"
cgo = "-a -installsuffix cgo"
linker_opts.append('-linkmode external -extldflags -static -w')
ldflags = ''
if linker_opts:
ldflags = "-ldflags '{}'".format(' '.join(linker_opts))
tags = "-tags 'osusergo netgo static_build'"
bindir = 'dist/{name}'.format(name=name)
if not os.path.isdir(bindir):
os.makedirs(bindir)
if goos == 'alpine':
repo_dir = REPO_ROOT[len(GOPATH):]
uid = check_output('id -u').strip()
cmd = "docker run --rm -u {uid} -v /tmp:/.cache -v {repo_root}:/go{repo_dir} -w /go{repo_dir} -e {cgo_env} golang:1.11-alpine {goc} build -o {bindir}/{name}-{goos}-{goarch}{ext} {cgo} {ldflags} {tags} {main}".format(
repo_root=REPO_ROOT,
repo_dir=repo_dir,
uid=uid,
name=name,
goc=GOC,
goos=goos,
goarch=goarch,
bindir=bindir,
cgo_env=cgo_env,
cgo=cgo,
ldflags=ldflags,
tags=tags,
ext='.exe' if goos == 'windows' else '',
main=main
)
else:
cmd = "GOOS={goos} GOARCH={goarch} {cgo_env} {goc} build -o {bindir}/{name}-{goos}-{goarch}{ext} {cgo} {ldflags} {tags} {main}".format(
name=name,
goc=GOC,
goos=goos,
goarch=goarch,
bindir=bindir,
cgo_env=cgo_env,
cgo=cgo,
ldflags=ldflags,
tags=tags,
ext='.exe' if goos == 'windows' else '',
main=main
)
die(call(cmd, cwd=REPO_ROOT))
if upx and (goos in ['linux', 'darwin']) and (goarch in ['amd64', '386']):
cmd = "upx --brute {name}-{goos}-{goarch}{ext}".format(
name=name,
goos=goos,
goarch=goarch,
bindir=bindir,
ext='.exe' if goos == 'windows' else ''
)
die(call(cmd, cwd=REPO_ROOT + '/' + bindir))
if compress:
if goos in ['windows']:
cmd = "zip {name}-{goos}-{goarch}.zip {name}-{goos}-{goarch}{ext}"
else:
cmd = "bzip2 --keep -vf {name}-{goos}-{goarch}{ext}"
cmd = cmd.format(
name=name,
goos=goos,
goarch=goarch,
ext='.exe' if goos == 'windows' else ''
)
die(call(cmd, cwd=REPO_ROOT + '/' + bindir))
print('')
def upload_to_cloud(folder, f, version):
write_checksum(folder, f)
name = os.path.basename(folder)
if name not in BIN_MATRIX:
return
if ENV == 'prod' and not BIN_MATRIX[name].get('release', False):
return
buckets = BUCKET_MATRIX.get(ENV, BUCKET_MATRIX['dev'])
if not isinstance(buckets, dict):
buckets = {buckets: ''}
for bucket, region in buckets.items():
dst = "{bucket}/binaries/{name}/{version}/{file}".format(
bucket=bucket,
name=name,
version=version,
file=f
)
if bucket.startswith('gs://'):
upload_to_gcs(folder, f, dst, BIN_MATRIX[name].get('release', False))
elif bucket.startswith('s3://'):
upload_to_s3(folder, f, dst, region, BIN_MATRIX[name].get('release', False))
def upload_to_gcs(folder, src, dst, public):
call("gsutil cp {0} {1}".format(src, dst), cwd=folder)
call("gsutil cp {0}.md5 {1}.md5".format(src, dst), cwd=folder)
call("gsutil cp {0}.sha1 {1}.sha1".format(src, dst), cwd=folder)
if public:
call("gsutil acl ch -u AllUsers:R {0}".format(dst), cwd=folder)
call("gsutil acl ch -u AllUsers:R {0}.md5".format(dst), cwd=folder)
call("gsutil acl ch -u AllUsers:R {0}.sha1".format(dst), cwd=folder)
def upload_to_s3(folder, src, dst, region, public):
opt_region = ''
if region:
opt_region = '--region ' + region
opt_public = ''
if public:
opt_public = "--acl public-read"
call("aws s3 cp {2} {3} {0} {1}".format(src, dst, opt_region, opt_public), cwd=folder)
call("aws s3 cp {2} {3} {0}.md5 {1}.md5".format(src, dst, opt_region, opt_public), cwd=folder)
call("aws s3 cp {2} {3} {0}.sha1 {1}.sha1".format(src, dst, opt_region, opt_public), cwd=folder)
def update_registry(version):
dist = REPO_ROOT + '/dist'
bucket = BUCKET_MATRIX.get(ENV, BUCKET_MATRIX['dev'])
lf = dist + '/latest.txt'
write_file(lf, version)
for name in os.listdir(dist):
if os.path.isfile(dist + '/' + name):
continue
if name not in BIN_MATRIX:
continue
call("gsutil cp {2} {0}/binaries/{1}/latest.txt".format(bucket, name, lf), cwd=REPO_ROOT)
if BIN_MATRIX[name].get('release', False):
call('gsutil acl ch -u AllUsers:R -r {0}/binaries/{1}/latest.txt'.format(bucket, name), cwd=REPO_ROOT)
def ungroup_go_imports(*paths):
for p in paths:
if os.path.isfile(p):
print('Ungrouping imports of file: ' + p)
_ungroup_go_imports(p)
elif os.path.isdir(p):
print('Ungrouping imports of dir: ' + p)
for dir, _, files in os.walk(p):
for f in fnmatch.filter(files, '*.go'):
_ungroup_go_imports(dir + '/' + f)
else:
for f in glob.glob(p):
print('Ungrouping imports of file: ' + f)
_ungroup_go_imports(f)
BEGIN_IMPORT_REGEX = ur'import \(\s*'
END_IMPORT_REGEX = ur'\)\s*'
def _ungroup_go_imports(fname):
with open(fname, 'r+') as f:
content = f.readlines()
out = []
import_block = False
for line in content:
c = line.strip()
if import_block:
if c == '':
continue
elif re.match(END_IMPORT_REGEX, c) is not None:
import_block = False
elif re.match(BEGIN_IMPORT_REGEX, c) is not None:
import_block = True
out.append(line)
f.seek(0)
f.writelines(out)
f.truncate()
def git_branch_exists(branch):
return call('git show-ref --quiet refs/heads/{0}'.format(branch), cwd=REPO_ROOT) == 0
def git_checkout(branch):
call('git fetch --all --prune', cwd=REPO_ROOT)
call('git fetch --tags', cwd=REPO_ROOT)
if git_branch_exists(branch):
call('git checkout {0}'.format(branch), cwd=REPO_ROOT)
else:
call('git checkout -b {0}'.format(branch), cwd=REPO_ROOT)
def git_requires_commit():
changed_files = check_output('git diff --name-only', cwd=REPO_ROOT).strip().split('\n')
return Counter(changed_files) != Counter(['glide.lock'])
def glide_mod(glide_config):
for x in REQUIRED_DEPS:
for idx, dep in enumerate(glide_config['import']):
found = False
if dep['package'] == x['package']:
glide_config['import'][idx] = x
found = True
break
if not found:
glide_config['import'].append(x)
for x in DEP_LIST:
for idx, dep in enumerate(glide_config['import']):
if dep['package'] == x['package']:
glide_config['import'][idx] = x
break
def glide_write(f, glide_config):
f.seek(0)
pkg = glide_config.pop('package')
out = 'package: ' + pkg + '\n' + yaml.dump(glide_config, default_flow_style=False)
f.write(out)
f.truncate()
glide_config['package'] = pkg
REQUIRED_DEPS = [
{
"package": "github.com/cpuguy83/go-md2man",
"version": "v1.0.8"
},
{
"package": "github.com/russross/blackfriday",
"version": "v2.0.1"
},
{
"package": "github.com/json-iterator/go",
"version": "1.1.5"
},
{
"package": "github.com/spf13/cobra",
"version": "v0.0.3"
},
{
"package": "github.com/spf13/pflag",
"version": "v1.0.3"
},
{
"package": "golang.org/x/text",
"version": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"package": "golang.org/x/net",
"version": "1c05540f6879653db88113bc4a2b70aec4bd491f"
},
{
"package": "golang.org/x/sys",
"version": "95c6576299259db960f6c5b9b69ea52422860fce"
},
{
"package": "golang.org/x/crypto",
"version": "de0752318171da717af4ce24d0a2e8626afaeb11"
},
{
"package": "github.com/golang/protobuf",
"version": "v1.1.0"
},
{
"package": "github.com/davecgh/go-spew",
"version": "v1.1.1"
},
{
"package": "k8s.io/kube-openapi",
"version": "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803"
},
{
"package": "gopkg.in/yaml.v2",
"version": "v2.2.1"
}
]
DEP_LIST = [
{
"package": "github.com/cpuguy83/go-md2man",
"version": "v1.0.8"
},
{
"package": "github.com/json-iterator/go",
"version": "1.1.5"
},
{
"package": "github.com/prometheus-operator/prometheus-operator",
"repo": "https://github.com/kmodules/prometheus-operator.git",
"vcs": "git",
"version": "k-1.12"
},
{
"package": "k8s.io/api",
"version": "kubernetes-1.12.0"
},
{
"package": "k8s.io/apiextensions-apiserver",
"version": "kubernetes-1.12.0"
},
{
"package": "k8s.io/apimachinery",
"repo": "https://github.com/kmodules/apimachinery.git",
"vcs": "git",
"version": "ac-1.12.0"
},
{
"package": "k8s.io/apiserver",
"repo": "https://github.com/kmodules/apiserver.git",
"vcs": "git",
"version": "ac-1.12.0"
},
{
"package": "k8s.io/client-go",
"version": "v9.0.0"
},
{
"package": "k8s.io/cli-runtime",
"version": "kubernetes-1.12.0"
},
{
"package": "k8s.io/kubernetes",
"version": "v1.12.0"
},
{
"package": "k8s.io/kube-aggregator",
"version": "kubernetes-1.12.0"
},
{
"package": "k8s.io/metrics",
"version": "kubernetes-1.12.0"
},
{
"package": "github.com/appscode/kutil",
"version": "release-9.0"
},
{
"package": "github.com/appscode/kubernetes-webhook-util",
"version": "release-9.0"
},
{
"package": "kmodules.xyz/custom-resources",
"repo": "https://github.com/kmodules/custom-resources.git",
"vcs": "git",
"version": "release-9.0"
},
{
"package": "kmodules.xyz/monitoring-agent-api",
"repo": "https://github.com/kmodules/monitoring-agent-api.git",
"vcs": "git",
"version": "release-9.0"
},
{
"package": "kmodules.xyz/objectstore-api",
"repo": "https://github.com/kmodules/objectstore-api.git",
"vcs": "git",
"version": "release-9.0"
},
{
"package": "kmodules.xyz/offshoot-api",
"repo": "https://github.com/kmodules/offshoot-api.git",
"vcs": "git",
"version": "release-9.0"
},
{
"package": "kmodules.xyz/openshift",
"repo": "https://github.com/kmodules/openshift.git",
"vcs": "git",
"version": "release-9.0"
},
{
"package": "gomodules.xyz/stow",
"repo": "https://github.com/appscode/stow.git",
"vcs": "git",
"version": "master"
},
{
"package": "github.com/Azure/azure-sdk-for-go",
"version": "v19.0.0"
},
{
"package": "github.com/Azure/go-autorest",
"version": "v10.14.0"
},
{
"package": "github.com/aws/aws-sdk-go",
"version": "v1.14.12"
},
{
"package": "google.golang.org/api/storage/v1",
"version": "3639d6d93f377f39a1de765fa4ef37b3c7ca8bd9"
},
{
"package": "cloud.google.com/go",
"version": "v0.2.0"
},
{
"package": "github.com/spf13/afero",
"version": "v1.1.2"
},
{
"package": "github.com/appscode/osm",
"version": "0.9.0"
},
{
"package": "github.com/kubepack/onessl",
"version": "0.9.0"
}
]
def revendor():
seed = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
revendor_branch = 'k-1.12.0'
print(REPO_ROOT)
call('git reset HEAD --hard', cwd=REPO_ROOT)
call('git clean -xfd', cwd=REPO_ROOT)
git_checkout('master')
call('git pull --rebase origin master', cwd=REPO_ROOT)
git_checkout(revendor_branch)
# https://stackoverflow.com/a/6759339/244009
call("find " + REPO_ROOT + "/apis -type f -exec sed -i -e 's/k8s.io\\/apimachinery\\/pkg\\/api\\/testing\\/roundtrip/k8s.io\\/apimachinery\\/pkg\\/api\\/apitesting\\/roundtrip/g' {} \;", eoe=False)
with open(REPO_ROOT + '/glide.yaml', 'r+') as glide_file:
glide_config = yaml.load(glide_file)
glide_mod(glide_config)
glide_write(glide_file, glide_config)
call('glide slow', cwd=REPO_ROOT)
if git_requires_commit():
call('git add --all', cwd=REPO_ROOT)
call('git commit -s -a -m "Update kubernetes client libraries to 1.12.0"', cwd=REPO_ROOT)
call('git push origin {0}'.format(revendor_branch), cwd=REPO_ROOT)
else:
call('git reset HEAD --hard', cwd=REPO_ROOT)
|
|
""" ietf_netconf_acm
NETCONF Access Control Model.
Copyright (c) 2012 IETF Trust and the persons identified as
authors of the code. All rights reserved.
Redistribution and use in source and binary forms, with or
without modification, is permitted pursuant to, and subject
to the license terms contained in, the Simplified BSD
License set forth in Section 4.c of the IETF Trust's
Legal Provisions Relating to IETF Documents
(http\://trustee.ietf.org/license\-info).
This version of this YANG module is part of RFC 6536; see
the RFC itself for full legal notices.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class ActionTypeEnum(Enum):
"""
ActionTypeEnum
Action taken by the server when a particular
rule matches.
.. data:: PERMIT = 0
Requested action is permitted.
.. data:: DENY = 1
Requested action is denied.
"""
PERMIT = 0
DENY = 1
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_netconf_acm as meta
return meta._meta_table['ActionTypeEnum']
class AccessOperationsType_Bits(FixedBitsDict):
"""
AccessOperationsType_Bits
NETCONF Access Operation.
Keys are:- read , create , exec , update , delete
"""
def __init__(self):
self._dictionary = {
'read':False,
'create':False,
'exec':False,
'update':False,
'delete':False,
}
self._pos_map = {
}
class Nacm(object):
"""
Parameters for NETCONF Access Control Model.
.. attribute:: denied_data_writes
Number of times since the server last restarted that a protocol operation request to alter a configuration datastore was denied
**type**\: int
**range:** 0..4294967295
**mandatory**\: True
.. attribute:: denied_notifications
Number of times since the server last restarted that a notification was dropped for a subscription because access to the event type was denied
**type**\: int
**range:** 0..4294967295
**mandatory**\: True
.. attribute:: denied_operations
Number of times since the server last restarted that a protocol operation request was denied
**type**\: int
**range:** 0..4294967295
**mandatory**\: True
.. attribute:: enable_external_groups
Controls whether the server uses the groups reported by the NETCONF transport layer when it assigns the user to a set of NACM groups. If this leaf has the value 'false', any group names reported by the transport layer are ignored by the server
**type**\: bool
.. attribute:: enable_nacm
Enables or disables all NETCONF access control enforcement. If 'true', then enforcement is enabled. If 'false', then enforcement is disabled
**type**\: bool
.. attribute:: exec_default
Controls whether exec access is granted if no appropriate rule is found for a particular protocol operation request
**type**\: :py:class:`ActionTypeEnum <ydk.models.ietf.ietf_netconf_acm.ActionTypeEnum>`
.. attribute:: groups
NETCONF Access Control Groups
**type**\: :py:class:`Groups <ydk.models.ietf.ietf_netconf_acm.Nacm.Groups>`
.. attribute:: read_default
Controls whether read access is granted if no appropriate rule is found for a particular read request
**type**\: :py:class:`ActionTypeEnum <ydk.models.ietf.ietf_netconf_acm.ActionTypeEnum>`
.. attribute:: rule_list
An ordered collection of access control rules
**type**\: list of :py:class:`RuleList <ydk.models.ietf.ietf_netconf_acm.Nacm.RuleList>`
.. attribute:: write_default
Controls whether create, update, or delete access is granted if no appropriate rule is found for a particular write request
**type**\: :py:class:`ActionTypeEnum <ydk.models.ietf.ietf_netconf_acm.ActionTypeEnum>`
"""
_prefix = 'nacm'
_revision = '2012-02-22'
def __init__(self):
self.denied_data_writes = None
self.denied_notifications = None
self.denied_operations = None
self.enable_external_groups = None
self.enable_nacm = None
self.exec_default = None
self.groups = Nacm.Groups()
self.groups.parent = self
self.read_default = None
self.rule_list = YList()
self.rule_list.parent = self
self.rule_list.name = 'rule_list'
self.write_default = None
class Groups(object):
"""
NETCONF Access Control Groups.
.. attribute:: group
One NACM Group Entry. This list will only contain configured entries, not any entries learned from any transport protocols
**type**\: list of :py:class:`Group <ydk.models.ietf.ietf_netconf_acm.Nacm.Groups.Group>`
"""
_prefix = 'nacm'
_revision = '2012-02-22'
def __init__(self):
self.parent = None
self.group = YList()
self.group.parent = self
self.group.name = 'group'
class Group(object):
"""
One NACM Group Entry. This list will only contain
configured entries, not any entries learned from
any transport protocols.
.. attribute:: name <key>
Group name associated with this entry
**type**\: str
**pattern:** [^\\\*].\*
.. attribute:: user_name
Each entry identifies the username of a member of the group associated with this entry
**type**\: list of str
**range:** 1..18446744073709551615
"""
_prefix = 'nacm'
_revision = '2012-02-22'
def __init__(self):
self.parent = None
self.name = None
self.user_name = YLeafList()
self.user_name.parent = self
self.user_name.name = 'user_name'
@property
def _common_path(self):
if self.name is None:
raise YPYModelError('Key property name is None')
return '/ietf-netconf-acm:nacm/ietf-netconf-acm:groups/ietf-netconf-acm:group[ietf-netconf-acm:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
if self.user_name is not None:
for child in self.user_name:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_netconf_acm as meta
return meta._meta_table['Nacm.Groups.Group']['meta_info']
@property
def _common_path(self):
return '/ietf-netconf-acm:nacm/ietf-netconf-acm:groups'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.group is not None:
for child_ref in self.group:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_netconf_acm as meta
return meta._meta_table['Nacm.Groups']['meta_info']
class RuleList(object):
"""
An ordered collection of access control rules.
.. attribute:: name <key>
Arbitrary name assigned to the rule\-list
**type**\: str
**range:** 1..18446744073709551615
.. attribute:: group
List of administrative groups that will be assigned the associated access rights defined by the 'rule' list. The string '\*' indicates that all groups apply to the entry
**type**\: one of the below types:
**type**\: list of str
**pattern:** \\\*
----
**type**\: list of str
**pattern:** [^\\\*].\*
----
.. attribute:: rule
One access control rule. Rules are processed in user\-defined order until a match is found. A rule matches if 'module\-name', 'rule\-type', and 'access\-operations' match the request. If a rule matches, the 'action' leaf determines if access is granted or not
**type**\: list of :py:class:`Rule <ydk.models.ietf.ietf_netconf_acm.Nacm.RuleList.Rule>`
"""
_prefix = 'nacm'
_revision = '2012-02-22'
def __init__(self):
self.parent = None
self.name = None
self.group = YLeafList()
self.group.parent = self
self.group.name = 'group'
self.rule = YList()
self.rule.parent = self
self.rule.name = 'rule'
class Rule(object):
"""
One access control rule.
Rules are processed in user\-defined order until a match is
found. A rule matches if 'module\-name', 'rule\-type', and
'access\-operations' match the request. If a rule
matches, the 'action' leaf determines if access is granted
or not.
.. attribute:: name <key>
Arbitrary name assigned to the rule
**type**\: str
**range:** 1..18446744073709551615
.. attribute:: access_operations
Access operations associated with this rule. This leaf matches if it has the value '\*' or if the bit corresponding to the requested operation is set
**type**\: one of the below types:
**type**\: str
**pattern:** \\\*
----
**type**\: :py:class:`AccessOperationsType_Bits <ydk.models.ietf.ietf_netconf_acm.AccessOperationsType_Bits>`
----
.. attribute:: action
The access control action associated with the rule. If a rule is determined to match a particular request, then this object is used to determine whether to permit or deny the request
**type**\: :py:class:`ActionTypeEnum <ydk.models.ietf.ietf_netconf_acm.ActionTypeEnum>`
**mandatory**\: True
.. attribute:: comment
A textual description of the access rule
**type**\: str
.. attribute:: module_name
Name of the module associated with this rule. This leaf matches if it has the value '\*' or if the object being accessed is defined in the module with the specified module name
**type**\: one of the below types:
**type**\: str
**pattern:** \\\*
----
**type**\: str
----
.. attribute:: notification_name
This leaf matches if it has the value '\*' or if its value equals the requested notification name
**type**\: one of the below types:
**type**\: str
**pattern:** \\\*
----
**type**\: str
----
.. attribute:: path
Data Node Instance Identifier associated with the data node controlled by this rule. Configuration data or state data instance identifiers start with a top\-level data node. A complete instance identifier is required for this type of path value. The special value '/' refers to all possible datastore contents
**type**\: str
**mandatory**\: True
.. attribute:: rpc_name
This leaf matches if it has the value '\*' or if its value equals the requested protocol operation name
**type**\: one of the below types:
**type**\: str
**pattern:** \\\*
----
**type**\: str
----
"""
_prefix = 'nacm'
_revision = '2012-02-22'
def __init__(self):
self.parent = None
self.name = None
self.access_operations = None
self.action = None
self.comment = None
self.module_name = None
self.notification_name = None
self.path = None
self.rpc_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.name is None:
raise YPYModelError('Key property name is None')
return self.parent._common_path +'/ietf-netconf-acm:rule[ietf-netconf-acm:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
if self.access_operations is not None:
return True
if self.action is not None:
return True
if self.comment is not None:
return True
if self.module_name is not None:
return True
if self.notification_name is not None:
return True
if self.path is not None:
return True
if self.rpc_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_netconf_acm as meta
return meta._meta_table['Nacm.RuleList.Rule']['meta_info']
@property
def _common_path(self):
if self.name is None:
raise YPYModelError('Key property name is None')
return '/ietf-netconf-acm:nacm/ietf-netconf-acm:rule-list[ietf-netconf-acm:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
if self.group is not None:
for child in self.group:
if child is not None:
return True
if self.rule is not None:
for child_ref in self.rule:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_netconf_acm as meta
return meta._meta_table['Nacm.RuleList']['meta_info']
@property
def _common_path(self):
return '/ietf-netconf-acm:nacm'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.denied_data_writes is not None:
return True
if self.denied_notifications is not None:
return True
if self.denied_operations is not None:
return True
if self.enable_external_groups is not None:
return True
if self.enable_nacm is not None:
return True
if self.exec_default is not None:
return True
if self.groups is not None and self.groups._has_data():
return True
if self.read_default is not None:
return True
if self.rule_list is not None:
for child_ref in self.rule_list:
if child_ref._has_data():
return True
if self.write_default is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_netconf_acm as meta
return meta._meta_table['Nacm']['meta_info']
|
|
# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016
import unittest
import sys
import itertools
from enum import IntEnum
import datetime
import decimal
import random
from streamsx.topology.schema import StreamSchema
from streamsx.topology.topology import *
from streamsx.topology.tester import Tester
from streamsx.topology.context import JobConfig
import streamsx.spl.op as op
class AddIt(object):
def __init__(self, sp):
self.sp = sp
def __enter__(self):
self.spv = self.sp()
def __exit__(self, exc_type, exc_value, traceback):
pass
def __call__(self, t):
return str(t) + '-' + self.spv
class TestSubmissionParams(unittest.TestCase):
""" Test submission params (standalone).
"""
_multiprocess_can_split_ = True
def setUp(self):
Tester.setup_standalone(self)
def test_spl_default(self):
"""
Test passing as with default using SPL
"""
N=27
G='hey there'
t = ''.join(random.choice('0123456789abcdef') for x in range(20))
topic = 'topology/test/python/' + t
topo = Topology()
spGreet = topo.create_submission_parameter('greeting', default=G)
self.assertIsNone(spGreet())
sch = StreamSchema('tuple<uint64 seq, rstring s>')
b = op.Source(topo, "spl.utility::Beacon", sch,
params = {'initDelay': 10.0, 'period': 0.02, 'iterations':N})
b.seq = b.output('IterationCount()')
b.s = b.output(spGreet)
tester = Tester(topo)
tester.tuple_count(b.stream, N)
tester.contents(b.stream, [{'seq':i, 's':G} for i in range(N)])
tester.test(self.test_ctxtype, self.test_config)
def test_topo(self):
topo = Topology()
s = topo.source(range(38))
lower = topo.create_submission_parameter('lower')
upper = topo.create_submission_parameter('upper')
addin = topo.create_submission_parameter('addin')
s = s.filter(lambda v: v < int(lower()) or v > int(upper()))
m = s.filter(lambda v : v < 3)
m = m.map(AddIt(addin))
jc = JobConfig()
jc.submission_parameters['lower'] = 7
jc.submission_parameters['upper'] = 33
jc.submission_parameters['addin'] = 'Yeah!'
jc.add(self.test_config)
tester = Tester(topo)
tester.contents(s, [0,1,2,3,4,5,6,34,35,36,37])
tester.contents(m, ['0-Yeah!','1-Yeah!','2-Yeah!'])
tester.test(self.test_ctxtype, self.test_config)
def test_topo_with_def_and_type(self):
topo = Topology()
s = topo.source(range(38))
lower = topo.create_submission_parameter('lower', default=0)
upper = topo.create_submission_parameter('upper', default=30)
s = s.filter(lambda v: v < lower() or v > upper())
jc = JobConfig()
jc.submission_parameters['lower'] = 5
jc.add(self.test_config)
tester = Tester(topo)
tester.contents(s, [0,1,2,3,4,31,32,33,34,35,36,37])
tester.test(self.test_ctxtype, self.test_config)
def test_topo_types_from_default(self):
topo = Topology()
sp_str = topo.create_submission_parameter('sp_str', default='Hi')
sp_int = topo.create_submission_parameter('sp_int', default=89)
sp_float = topo.create_submission_parameter('sp_float', default=0.5)
sp_bool = topo.create_submission_parameter('sp_bool', default=False)
s = topo.source(range(17))
s = s.filter(lambda v : isinstance(sp_str(), str) and sp_str() == 'Hi')
s = s.filter(lambda v : isinstance(sp_int(), int) and sp_int() == 89)
s = s.filter(lambda v : isinstance(sp_float(), float) and sp_float() == 0.5)
s = s.filter(lambda v : isinstance(sp_bool(), bool) and sp_bool() is False)
tester = Tester(topo)
tester.tuple_count(s, 17)
tester.test(self.test_ctxtype, self.test_config)
def test_topo_types_explicit_set(self):
topo = Topology()
sp_str = topo.create_submission_parameter('sp_str', type_=str)
sp_int = topo.create_submission_parameter('sp_int', type_=int)
sp_float = topo.create_submission_parameter('sp_float', type_=float)
sp_bool = topo.create_submission_parameter('sp_bool', type_=bool)
s = topo.source(range(17))
s = s.filter(lambda v : isinstance(sp_str(), str) and sp_str() == 'SeeYa')
s = s.filter(lambda v : isinstance(sp_int(), int) and sp_int() == 10)
s = s.filter(lambda v : isinstance(sp_float(), float) and sp_float() == -0.5)
s = s.filter(lambda v : isinstance(sp_bool(), bool) and sp_bool() is True)
jc = JobConfig()
jc.submission_parameters['sp_str'] = 'SeeYa'
jc.submission_parameters['sp_int'] = 10
jc.submission_parameters['sp_float'] = -0.5
jc.submission_parameters['sp_bool'] = True
jc.add(self.test_config)
tester = Tester(topo)
tester.tuple_count(s, 17)
tester.test(self.test_ctxtype, self.test_config)
def test_parallel(self):
topo = Topology()
sp_w1 = topo.create_submission_parameter('w1', type_=int)
sp_w2 = topo.create_submission_parameter('w2', type_=int)
s = topo.source(range(67)).set_parallel(sp_w1)
s = s.filter(lambda v : v % sp_w1() == 0)
s = s.end_parallel()
s = s.parallel(width=sp_w2)
s = s.filter(lambda v : v % sp_w2() == 0)
s = s.end_parallel()
jc = JobConfig()
jc.submission_parameters['w1'] = 3
jc.submission_parameters['w2'] = 5
jc.add(self.test_config)
tester = Tester(topo)
tester.contents(s,[0,15,30,45,60]*3, ordered=False)
tester.test(self.test_ctxtype, self.test_config)
class TestDistributedSubmissionParams(TestSubmissionParams):
""" Test submission params (distributed).
"""
def setUp(self):
Tester.setup_distributed(self)
def test_spl(self):
"""
Test passing as an SPL parameter.
"""
N=22
G='hey'
t = ''.join(random.choice('0123456789abcdef') for x in range(20))
topic = 'topology/test/python/' + t
topo = Topology()
spTopic = topo.create_submission_parameter('mytopic')
spGreet = topo.create_submission_parameter('greeting')
self.assertIsNone(spTopic())
self.assertIsNone(spGreet())
sch = StreamSchema('tuple<uint64 seq, rstring s>')
b = op.Source(topo, "spl.utility::Beacon", sch,
params = {'initDelay': 10.0, 'period': 0.02, 'iterations':N})
b.seq = b.output('IterationCount()')
b.s = b.output(spGreet)
p = op.Sink("com.ibm.streamsx.topology.topic::Publish", b.stream,
params={'topic': topic})
s = op.Source(topo, "com.ibm.streamsx.topology.topic::Subscribe", sch,
params = {'streamType': sch, 'topic': spTopic})
jc = JobConfig()
jc.submission_parameters['mytopic'] = topic
jc.submission_parameters['greeting'] = G
jc.add(self.test_config)
tester = Tester(topo)
tester.tuple_count(s.stream, N)
#tester.run_for(300)
tester.contents(s.stream, [{'seq':i, 's':G} for i in range(N)])
tester.test(self.test_ctxtype, self.test_config)
class TestSasSubmissionParams(TestDistributedSubmissionParams):
""" Test submission params (service).
"""
def setUp(self):
Tester.setup_streaming_analytics(self, force_remote_build=True)
|
|
from read_spec import *
from read_spec import Inst
# Instrument parameters
inst = __name__[5:] # HARPS, HARPSpre, HARPSpost, HARPN
name = inst[:5] # HARPS, HARPN (passed to bary)
obsname = {'HARPS': 'eso', 'HARPN': 'lapalma'}[name] # for barycorrpy
iomax = {'HARPS': 72, 'HARPN': 69}[name]
oset = {'HARPS': '10:71', 'HARPN': '10:'}[name]
pat = "*.tar *e2ds_%(fib)s.fits *e2ds_%(fib)s.fits.gz *_S2D_%(fib)s.fits" # space separated suffices
#maskfile = 'telluric_mask_carm_short.dat'
def scan(self, s, pfits=True, verb=False):
"""
SYNTAX: read_harps(filename)
OUTPUT: namedtuple('spectrum', 'w f berv bjd blaze drift timeid sn55 ')
w - wavelength
f - flux
berv - Barycentric Earth Radial Velocity
bjd - Barycentric Julian Day
blaze - Blaze filename
drift - Used RV Drift
sn55 - S_N order center55
"""
drs = self.drs
if '.tar' in s:
s = file_from_tar(s, inst=inst, fib=self.fib, pfits=pfits)
if isinstance(s, str) and '.gz' in s:
# if s is isinstance(s,tarfile.ExFileObject) then s.position will change !? resulting in:
# *** IOError: Empty or corrupt FITS file
pfits = True
if 1:
HIERARCH = 'HIERARCH '
HIERINST = HIERARCH + {'HARPS': 'ESO ', 'HARPN': 'TNG '}[inst[0:5]]
k_tmmean = {'HARPS': HIERINST + 'INS DET1 TMMEAN', 'HARPN': HIERINST + 'EXP_METER_A EXP CENTROID'}[inst[0:5]]
# In old HARPN the keyword is different and the value absolute
#k_tmmean = {'HARPS': HIERINST + 'INS DET1 TMMEAN', 'HARPN': HIERINST + 'EXP1 TMMEAN'}[inst]
if drs:
self.HIERDRS = HIERDRS = HIERINST + 'DRS '
self.HIERQC = HIERQC = HIERINST + 'QC '
k_sn55 = HIERDRS + 'SPE EXT SN55'
k_berv = HIERDRS + 'BERV'
k_bjd = HIERDRS + 'BJD'
else:
k_sn55 = HIERARCH + 'FOX SNR 55'
k_berv = 'E_BERV'
k_bjd = 'E_BJD'
if pfits is True: # header with pyfits
self.hdulist = hdulist = pyfits.open(s) # slow 30 ms
hdr = self.hdulist[0].header # pyfits.getheader(s)
elif pfits==2: # a faster version
args = ('INSTRUME', 'OBJECT', 'MJD-OBS', 'DATE-OBS', 'OBS TARG NAME', 'EXPTIME',
'MJD-OBS', 'FILENAME', 'RA', 'DEC', k_tmmean, HIERINST+'DPR TYPE',
HIERINST+'DPR TECH', HIERINST+'INS MODE', HIERINST+'OBS TARG NAME')
args += (k_bjd, k_berv, k_sn55)
# args += (HIERINST+'OBS PI-COI NAME', HIERINST+'OBS PROG ID')
if drs:
args += (HIERDRS+'BLAZE FILE', HIERDRS+'DRIFT RV USED',
HIERDRS+'CAL TH DEG LL', HIERDRS+'CAL LOC NBO',
HIERDRS+'CAL TH COEFF LL')
hdr = imhead(s, *args)
self.hdu = getext(s)
else:
#hdr = fitsio.read_header(s) no faster?
self.f, hdrio = fitsio.read(s, header=True)
hdr = dict((key, val.strip() if type(val) is str else val) for key,val in dict(hdrio).iteritems())
HIERARCH = ''
#self.drs = 'DRS CAL LOC NBO' in "".join(hdr.keys()) # check DRS or FOX
self.instname = hdr['INSTRUME'] #if self.drs else 'HARPS'
if self.instname not in ('HARPS', 'HARPN'):
pause('\nWARNING: inst should be HARPS or HARPN, but got: '+self.inst+'\nSee option -inst for available inst.')
self.HIERARCH = HIERARCH
self.airmass = hdr.get('AIRMASS', np.nan)
self.exptime = hdr['EXPTIME']
self.mjd = hdr['MJD-OBS']
self.dateobs = hdr['DATE-OBS']
self.ra = hdr['RA']
self.de = hdr['DEC']
self.utc = datetime.datetime.strptime(self.dateobs, '%Y-%m-%dT%H:%M:%S.%f')
self.obs.lon = -70.7345
self.obs.lat = -29.2584
if k_tmmean not in hdr:
warnings.warn('Warning: old HARPN data? Setting tmmean to 0.5!')
self.tmmean = hdr.get(k_tmmean, 0.5)
self.DRS = hdr.get('HIERARCH ESO PRO REC1 PIPE ID') # 'espdr/2.3.5', flag for the new ESPRESSO pipeline. Should be merged with the drs flag to a version string.
self.drsbjd = hdr.get(HIERQC + 'BJD' if self.DRS else k_bjd, hdr.get('MJD-OBS')) # e.g. 'HIERARCH TNG QC BJD'
self.drsberv = hdr.get(HIERQC + 'BERV' if self.DRS else k_berv, np.nan)
self.sn55 = hdr.get(k_sn55, hdr.get(HIERQC+'ORDER55 SNR', np.nan))
self.blaze = hdr.get(HIERDRS+'BLAZE FILE', 0)
self.drift = hdr.get(HIERDRS+'DRIFT RV USED', np.nan)
if abs(self.drift) > 1000:
# sometimes there are crazy drift values ~2147491.59911, e.g. 2011-06-15T08:11:13.465
self.drift = np.nan
if self.instname == 'HARPS':
# read the comment
#if pfits==2: fileid = hdr['DATE-OBS']; self.timeid=fileid #ok!? not always
if pfits:
if hasattr(hdr, 'comments'): # pfits==2 or pyfits.__version__>'2.' https://github.com/astropy/pyregion/issues/20
fileid = hdr.comments['MJD-OBS']
else:
fileid = str(hdr.ascardlist()['MJD-OBS'])
else: fileid = hdrio.get_comment('MJD-OBS')
self.timeid = fileid[fileid.index('(')+1 : fileid.index(')')]
elif self.instname == 'HARPN':
self.timeid = fileid = hdr['FILENAME'][6:29]
hdr['OBJECT'] = hdr[HIERINST+'OBS TARG NAME'] # HARPN has no OBJECT keyword
#calmode = hdr.get('IMAGETYP',0).split(",")[:2]
calmode = hdr.get(HIERINST+'DPR TYPE','NOTFOUND').split(',')[:2]
self.calmode = ','.join(calmode)
calmodedict = {'STAR,WAVE': 'OBJ,CAL', 'STAR,DARK': 'OBJ,SKY', 'STAR,SKY': 'OBJ,SKY'}
if self.calmode in calmodedict:
self.calmode = calmodedict[self.calmode]
if self.calmode.startswith('WAVE'):
self.flag |= sflag.nosci
if hdr[HIERINST+'DPR TECH'] == 'ECHELLE,ABSORPTION-CELL':
self.flag |= sflag.iod
if hdr[HIERINST+'INS MODE'] == 'EGGS':
self.flag |= sflag.config
# in May 29th 2015 (BJD = 2457171.9481) there was the fibre intervention
if inst == 'HARPSpre' and self.timeid > '2015-05-29T':
self.flag |= sflag.config
if inst == 'HARPSpost' and self.timeid <= '2015-05-29T':
self.flag |= sflag.config
hdr['OBJECT'] = hdr.get('OBJECT', 'FOX')
self.header = self.hdr = hdr # self.header will be set to None
def data(self, orders, pfits=True):
hdr = self.hdr
drs = self.drs
if 1: # read order data
if hasattr(self, 'hdu'): # read directly
f = self.hdu.getdata(o=orders)
if not drs:
e = self.hdu.getdata('SIG', o=orders)
w = self.hdu.getdata('WAVE', o=orders)
else:
if not hasattr(self, 'hdulist'):
scan(self, self.filename)
f = self.hdulist['SCIDATA' if self.DRS else 0 if drs else 'SPEC'].section[orders]
if not drs or self.DRS:
e = self.hdulist['ERRDATA' if self.DRS else 'SIG'].section[orders]
w = self.hdulist['WAVEDATA_VAC_BARY' if self.DRS else 'WAVE'].section[orders]
if self.DRS:
w = w / (1.0 + self.drsberv/299792.4580) # undo BERV correction!
if not drs:
f *= 100000
e *= 100000
# bp = self.hdulist['QUALDATA'].section[orders] QUALDATA not used yet, at least for HARPN.2015-08-17T11-03-36.560_S2D_A all are zero.
bpmap = np.isnan(f).astype(int) # flag 1 for nan
if not drs: bpmap[e==0] |= flag.nan
if drs and not self.DRS:
# print " applying wavelength solution ", file
# omax = self.hdu['SPEC'].NAXIS1
omax = hdr.get(self.HIERDRS+'CAL LOC NBO', iomax) # 72 for A and 71 for B
# missing for HARPN.2018-11-08T18-10-16.439_e2ds_A.fits, also "DRS CAL TH ORDER NBR"
# HIERARCH IA2 DRS VERSION = 'HARPN_3.7.1_140214' / IA2 DRS version
d = hdr[self.HIERDRS+'CAL TH DEG LL']
xmax = 4096
x = np.empty((d+1, xmax), 'int64')
x[0].fill(1) # x[0,*] = x^0 = 1,1,1,1,1,...
x[1] = np.arange(xmax) # = x^1 = 0,1,2,3,4,...
for i in range(1,d): x[i+1] = x[i] * x[1] # = x^i
if not hasattr(self, 'A'):
#A = np.array([hdr[self.HIERDRS+'CAL TH COEFF LL'+str(i)] for i in range(omax*(d+1))],dtype='float64').reshape(omax,d+1) #slow 30 ms
self.A = np.reshape([hdr[self.HIERDRS+'CAL TH COEFF LL'+str(i)] for i in range(omax*(d+1))], (omax,d+1)) #slow 30 ms
w = np.dot(self.A[orders], x) # wavelength lambda
w = airtovac(w)
e = np.sqrt(np.where(bpmap, 0., 5**2 * 6 + np.abs(f, dtype=float)))
with np.errstate(invalid='ignore'):
bpmap[f < -3*e] |= flag.neg # flag 2 for zero and negative flux
bpmap[f > 300000] |= flag.sat # estimate for saturation level:
# HARPS.2004-10-03T01:30:44.506.fits:
# last order: e2ds_B: 346930 (x=2158) raw: 62263 (y=1939)
return w, f, e, bpmap
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers that act as activation functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.layers.LeakyReLU')
class LeakyReLU(Layer):
"""Leaky version of a Rectified Linear Unit.
It allows a small gradient when the unit is not active:
`f(x) = alpha * x for x < 0`,
`f(x) = x for x >= 0`.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
alpha: float >= 0. Negative slope coefficient.
"""
def __init__(self, alpha=0.3, **kwargs):
super(LeakyReLU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = K.cast_to_floatx(alpha)
def call(self, inputs):
return K.relu(inputs, alpha=self.alpha)
def get_config(self):
config = {'alpha': float(self.alpha)}
base_config = super(LeakyReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@tf_export('keras.layers.PReLU')
class PReLU(Layer):
"""Parametric Rectified Linear Unit.
It follows:
`f(x) = alpha * x for x < 0`,
`f(x) = x for x >= 0`,
where `alpha` is a learned array with the same shape as x.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
alpha_initializer: initializer function for the weights.
alpha_regularizer: regularizer for the weights.
alpha_constraint: constraint for the weights.
shared_axes: the axes along which to share learnable
parameters for the activation function.
For example, if the incoming feature maps
are from a 2D convolution
with output shape `(batch, height, width, channels)`,
and you wish to share parameters across space
so that each filter only has one set of parameters,
set `shared_axes=[1, 2]`.
"""
def __init__(self,
alpha_initializer='zeros',
alpha_regularizer=None,
alpha_constraint=None,
shared_axes=None,
**kwargs):
super(PReLU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha_initializer = initializers.get(alpha_initializer)
self.alpha_regularizer = regularizers.get(alpha_regularizer)
self.alpha_constraint = constraints.get(alpha_constraint)
if shared_axes is None:
self.shared_axes = None
elif not isinstance(shared_axes, (list, tuple)):
self.shared_axes = [shared_axes]
else:
self.shared_axes = list(shared_axes)
@tf_utils.shape_type_conversion
def build(self, input_shape):
param_shape = list(input_shape[1:])
self.param_broadcast = [False] * len(param_shape)
if self.shared_axes is not None:
for i in self.shared_axes:
param_shape[i - 1] = 1
self.param_broadcast[i - 1] = True
self.alpha = self.add_weight(
shape=param_shape,
name='alpha',
initializer=self.alpha_initializer,
regularizer=self.alpha_regularizer,
constraint=self.alpha_constraint)
# Set input spec
axes = {}
if self.shared_axes:
for i in range(1, len(input_shape)):
if i not in self.shared_axes:
axes[i] = input_shape[i]
self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
self.built = True
def call(self, inputs, mask=None):
pos = K.relu(inputs)
if K.backend() == 'theano':
neg = (
K.pattern_broadcast(self.alpha, self.param_broadcast) *
(inputs - math_ops.abs(inputs)) * 0.5)
else:
neg = -self.alpha * K.relu(-inputs)
return pos + neg
def get_config(self):
config = {
'alpha_initializer': initializers.serialize(self.alpha_initializer),
'alpha_regularizer': regularizers.serialize(self.alpha_regularizer),
'alpha_constraint': constraints.serialize(self.alpha_constraint),
'shared_axes': self.shared_axes
}
base_config = super(PReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@tf_export('keras.layers.ELU')
class ELU(Layer):
"""Exponential Linear Unit.
It follows:
`f(x) = alpha * (exp(x) - 1.) for x < 0`,
`f(x) = x for x >= 0`.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
alpha: scale for the negative factor.
"""
def __init__(self, alpha=1.0, **kwargs):
super(ELU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = K.cast_to_floatx(alpha)
def call(self, inputs):
return K.elu(inputs, self.alpha)
def get_config(self):
config = {'alpha': float(self.alpha)}
base_config = super(ELU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@tf_export('keras.layers.ThresholdedReLU')
class ThresholdedReLU(Layer):
"""Thresholded Rectified Linear Unit.
It follows:
`f(x) = x for x > theta`,
`f(x) = 0 otherwise`.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
theta: float >= 0. Threshold location of activation.
"""
def __init__(self, theta=1.0, **kwargs):
super(ThresholdedReLU, self).__init__(**kwargs)
self.supports_masking = True
self.theta = K.cast_to_floatx(theta)
def call(self, inputs, mask=None):
return inputs * math_ops.cast(
math_ops.greater(inputs, self.theta), K.floatx())
def get_config(self):
config = {'theta': float(self.theta)}
base_config = super(ThresholdedReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@tf_export('keras.layers.Softmax')
class Softmax(Layer):
"""Softmax activation function.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
axis: Integer, axis along which the softmax normalization is applied.
"""
def __init__(self, axis=-1, **kwargs):
super(Softmax, self).__init__(**kwargs)
self.supports_masking = True
self.axis = axis
def call(self, inputs):
return K.softmax(inputs, axis=self.axis)
def get_config(self):
config = {'axis': self.axis}
base_config = super(Softmax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@tf_export('keras.layers.ReLU')
class ReLU(Layer):
"""Rectified Linear Unit activation function.
With default values, it returns element-wise `max(x, 0)`.
Otherwise, it follows:
`f(x) = max_value` for `x >= max_value`,
`f(x) = x` for `threshold <= x < max_value`,
`f(x) = negative_slope * (x - threshold)` otherwise.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
max_value: float >= 0. Maximum activation value.
negative_slope: float >= 0. Negative slope coefficient.
threshold: float. Threshold value for thresholded activation.
"""
def __init__(self, max_value=None, negative_slope=0, threshold=0, **kwargs):
super(ReLU, self).__init__(**kwargs)
if max_value is not None and max_value < 0.:
raise ValueError('max_value of Relu layer '
'cannot be negative value: ' + str(max_value))
if negative_slope < 0.:
raise ValueError('negative_slope of Relu layer '
'cannot be negative value: ' + str(negative_slope))
self.support_masking = True
if max_value is not None:
max_value = K.cast_to_floatx(max_value)
self.max_value = max_value
self.negative_slope = K.cast_to_floatx(negative_slope)
self.threshold = K.cast_to_floatx(threshold)
def call(self, inputs):
# alpha is used for leaky relu slope in activations instead of
# negative_slope.
return K.relu(inputs,
alpha=self.negative_slope,
max_value=self.max_value,
threshold=self.threshold)
def get_config(self):
config = {
'max_value': self.max_value,
'negative_slope': self.negative_slope,
'threshold': self.threshold
}
base_config = super(ReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
|
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helpers related to multiprocessing."""
import builtins
import itertools
import logging
import multiprocessing
import multiprocessing.dummy
import os
import sys
import threading
import traceback
from multiprocessing import process
DISABLE_ASYNC = os.environ.get('SUPERSIZE_DISABLE_ASYNC') == '1'
if DISABLE_ASYNC:
logging.warning('Running in synchronous mode.')
_is_child_process = False
_silence_exceptions = False
# Used to pass parameters to forked processes without pickling.
_fork_params = None
_fork_kwargs = None
# Avoid printing backtrace for every worker for Ctrl-C.
def _PatchMultiprocessing():
old_run = process.BaseProcess.run
def new_run(self):
try:
return old_run(self)
except (BrokenPipeError, KeyboardInterrupt):
sys.exit(1)
process.BaseProcess.run = new_run
_PatchMultiprocessing()
class _ImmediateResult:
def __init__(self, value):
self._value = value
def get(self):
return self._value
def wait(self):
pass
def ready(self):
return True
def successful(self):
return True
class _ExceptionWrapper:
"""Used to marshal exception messages back to main process."""
def __init__(self, msg, exception_type=None):
self.msg = msg
self.exception_type = exception_type
def MaybeThrow(self):
if self.exception_type:
raise getattr(builtins,
self.exception_type)('Originally caused by: ' + self.msg)
class _FuncWrapper:
"""Runs on the fork()'ed side to catch exceptions and spread *args."""
def __init__(self, func):
global _is_child_process
_is_child_process = True
self._func = func
def __call__(self, index, _=None):
try:
return self._func(*_fork_params[index], **dict(_fork_kwargs))
except BaseException as e:
# Only keep the exception type for builtin exception types or else risk
# further marshalling exceptions.
exception_type = None
if type(e).__name__ in dir(builtins):
exception_type = type(e).__name__
# multiprocessing is supposed to catch and return exceptions automatically
# but it doesn't seem to work properly :(.
return _ExceptionWrapper(traceback.format_exc(), exception_type)
class _WrappedResult:
"""Allows for host-side logic to be run after child process has terminated.
* Raises exception caught by _FuncWrapper.
* Allows for custom unmarshalling of return value.
"""
def __init__(self, result, decode_func=None):
self._result = result
self._decode_func = decode_func
def get(self):
self.wait()
value = self._result.get()
_CheckForException(value)
if not self._decode_func or not self._result.successful():
return value
return self._decode_func(value)
def wait(self):
self._result.wait()
def ready(self):
return self._result.ready()
def successful(self):
return self._result.successful()
def _CheckForException(value):
if isinstance(value, _ExceptionWrapper):
global _silence_exceptions
if not _silence_exceptions:
value.MaybeThrow()
_silence_exceptions = True
logging.error('Subprocess raised an exception:\n%s', value.msg)
sys.exit(1)
def _MakeProcessPool(job_params, **job_kwargs):
global _fork_params
global _fork_kwargs
assert _fork_params is None
assert _fork_kwargs is None
pool_size = min(len(job_params), multiprocessing.cpu_count())
_fork_params = job_params
_fork_kwargs = job_kwargs
ret = multiprocessing.Pool(pool_size)
_fork_params = None
_fork_kwargs = None
return ret
def ForkAndCall(func, args, decode_func=None):
"""Runs |func| in a fork'ed process.
Returns:
A Result object (call .get() to get the return value)
"""
if DISABLE_ASYNC:
result = _ImmediateResult(func(*args))
else:
pool = _MakeProcessPool([args]) # Omit |kwargs|.
result = pool.apply_async(_FuncWrapper(func), (0, ))
pool.close()
return _WrappedResult(result, decode_func=decode_func)
def BulkForkAndCall(func, arg_tuples, **kwargs):
"""Calls |func| in a fork'ed process for each set of args within |arg_tuples|.
Args:
kwargs: Common keyword arguments to be passed to |func|.
Yields the return values as they come in.
"""
arg_tuples = list(arg_tuples)
if not arg_tuples:
return
if DISABLE_ASYNC:
for args in arg_tuples:
yield func(*args, **kwargs)
return
pool = _MakeProcessPool(arg_tuples, **kwargs)
wrapped_func = _FuncWrapper(func)
try:
for result in pool.imap_unordered(wrapped_func, range(len(arg_tuples))):
_CheckForException(result)
yield result
finally:
pool.close()
pool.join()
def CallOnThread(func, *args, **kwargs):
"""Calls |func| on a new thread and returns a promise for its return value."""
if DISABLE_ASYNC:
return _ImmediateResult(func(*args, **kwargs))
pool = multiprocessing.dummy.Pool(1)
result = pool.apply_async(func, args=args, kwds=kwargs)
pool.close()
return result
def EncodeDictOfLists(d, key_transform=None, value_transform=None):
"""Serializes a dict where values are lists of strings.
Does not support '' as keys, nor [''] as values.
"""
assert '' not in d
assert [''] not in iter(d.values())
keys = iter(d)
if key_transform:
keys = (key_transform(k) for k in keys)
keys = '\x01'.join(keys)
if value_transform:
values = '\x01'.join(
'\x02'.join(value_transform(y) for y in x) for x in d.values())
else:
values = '\x01'.join('\x02'.join(x) for x in d.values())
return keys, values
def JoinEncodedDictOfLists(encoded_values):
assert isinstance(encoded_values, list), 'Does not work with generators'
return ('\x01'.join(x[0] for x in encoded_values if x[0]),
'\x01'.join(x[1] for x in encoded_values if x[1]))
def DecodeDictOfLists(encoded_keys_and_values,
key_transform=None,
value_transform=None):
"""Deserializes a dict where values are lists of strings."""
encoded_keys, encoded_values = encoded_keys_and_values
if not encoded_keys:
return {}
keys = encoded_keys.split('\x01')
if key_transform:
keys = (key_transform(k) for k in keys)
encoded_lists = encoded_values.split('\x01')
ret = {}
for key, encoded_list in zip(keys, encoded_lists):
if not encoded_list:
values = []
else:
values = encoded_list.split('\x02')
if value_transform:
for i in range(len(values)):
values[i] = value_transform(values[i])
ret[key] = values
return ret
EMPTY_ENCODED_DICT = EncodeDictOfLists({})
|
|
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Tests for Interrogate."""
import socket
from grr.client import vfs
from grr.client.client_actions import admin
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import artifact_test
from grr.lib import client_index
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import test_lib
# pylint: disable=unused-import
from grr.lib.flows.general import discovery
# pylint: enable=unused-import
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import paths as rdf_paths
class DiscoveryTestEventListener(flow.EventListener):
"""A test listener to receive new client discoveries."""
well_known_session_id = rdfvalue.SessionID(flow_name="discovery_test")
EVENTS = ["Discovery"]
# For this test we just write the event as a class attribute.
event = None
@flow.EventHandler(auth_required=True)
def ProcessMessage(self, message=None, event=None):
_ = message
DiscoveryTestEventListener.event = event
class TestClientInterrogate(artifact_test.ArtifactTest):
"""Test the interrogate flow."""
def _CheckUsers(self, all_users):
"""Check all user stores."""
summary = self.fd.GetSummary()
self.assertItemsEqual([x.username for x in summary.users], all_users)
users = [x.username for x in self.fd.Get(self.fd.Schema.USER)]
self.assertItemsEqual(users, all_users)
self.assertItemsEqual(self.fd.Get(self.fd.Schema.USERNAMES), all_users)
# Check kb users
kbusers = [x.username for x in
self.fd.Get(self.fd.Schema.KNOWLEDGE_BASE).users]
self.assertItemsEqual(kbusers, all_users)
def _CheckAFF4Object(self, hostname, system, install_date):
self.assertEqual(self.fd.Get(self.fd.Schema.HOSTNAME), hostname)
self.assertEqual(self.fd.Get(self.fd.Schema.SYSTEM), system)
self.assertEqual(self.fd.Get(self.fd.Schema.INSTALL_DATE), install_date)
def _CheckClientInfo(self):
info = self.fd.Get(self.fd.Schema.CLIENT_INFO)
self.assertEqual(info.client_name, config_lib.CONFIG["Client.name"])
self.assertEqual(info.client_version,
int(config_lib.CONFIG["Client.version_numeric"]))
self.assertEqual(info.build_time, config_lib.CONFIG["Client.build_time"])
def _CheckGRRConfig(self):
"""Check old and new client config."""
config_info = self.fd.Get(self.fd.Schema.GRR_CONFIGURATION)
self.assertEqual(config_info["Client.control_urls"],
["http://localhost:8001/control"])
self.assertEqual(config_info["Client.poll_min"], 1.0)
def _CheckClientIndex(self, host_pattern):
"""Check that the index has been updated."""
index_fd = aff4.FACTORY.Create(self.fd.Schema.client_index, "AFF4Index",
mode="r", token=self.token)
self.assertEqual(
[self.fd.urn],
[x for x in index_fd.Query([self.fd.Schema.HOSTNAME], host_pattern)])
def _CheckClientKwIndex(self, keywords, expected_count):
# Tests that the client index has expected_count results when
# searched for keywords.
index = aff4.FACTORY.Create(client_index.MAIN_INDEX,
aff4_type="ClientIndex",
mode="rw",
token=self.token)
self.assertEqual(len(index.LookupClients(keywords)),
expected_count)
def _CheckNotificationsCreated(self):
user_fd = aff4.FACTORY.Open("aff4:/users/test", token=self.token)
notifications = user_fd.Get(user_fd.Schema.PENDING_NOTIFICATIONS)
self.assertEqual(len(notifications), 1)
notification = notifications[0]
self.assertEqual(notification.subject, rdfvalue.RDFURN(self.client_id))
def _CheckClientSummary(self, osname, version, kernel="3.13.0-39-generic",
release="5"):
summary = self.fd.GetSummary()
self.assertEqual(summary.client_info.client_name,
config_lib.CONFIG["Client.name"])
self.assertEqual(summary.client_info.client_version,
int(config_lib.CONFIG["Client.version_numeric"]))
self.assertEqual(summary.client_info.build_time,
config_lib.CONFIG["Client.build_time"])
self.assertEqual(summary.system_info.system, osname)
self.assertEqual(summary.system_info.node, "test_node")
self.assertEqual(summary.system_info.release, release)
self.assertEqual(summary.system_info.version, version)
self.assertEqual(summary.system_info.machine, "i386")
self.assertEqual(summary.system_info.kernel, kernel)
self.assertEqual(len(summary.interfaces), 1)
self.assertEqual(summary.interfaces[0].mac_address, "123456")
# Check that the client summary was published to the event listener.
self.assertEqual(DiscoveryTestEventListener.event.client_id, self.client_id)
self.assertEqual(
DiscoveryTestEventListener.event.interfaces[0].mac_address,
"123456")
def _CheckNetworkInfo(self):
net_fd = self.fd.OpenMember("network")
interfaces = list(net_fd.Get(net_fd.Schema.INTERFACES))
self.assertEqual(interfaces[0].mac_address, "123456")
self.assertEqual(interfaces[0].addresses[0].human_readable, "100.100.100.1")
self.assertEqual(socket.inet_ntoa(interfaces[0].addresses[0].packed_bytes),
"100.100.100.1")
# Mac addresses should be available as hex for searching
mac_addresses = self.fd.Get(self.fd.Schema.MAC_ADDRESS)
self.assertTrue("123456".encode("hex") in str(mac_addresses))
# Same for IP addresses.
ip_addresses = self.fd.Get(self.fd.Schema.HOST_IPS)
self.assertTrue("100.100.100.1" in str(ip_addresses))
def _CheckVFS(self):
# Check that virtual directories exist for the mount points
fd = aff4.FACTORY.Open(self.client_id.Add("fs/os/mnt/data"),
token=self.token)
# But no directory listing exists yet - we will need to fetch a new one
self.assertEqual(len(list(fd.OpenChildren())), 0)
fd = aff4.FACTORY.Open(self.client_id.Add("fs/tsk/dev/sda"),
token=self.token)
# But no directory listing exists yet - we will need to fetch a new one
self.assertEqual(len(list(fd.OpenChildren())), 0)
fd = aff4.FACTORY.Open(self.client_id.Add("devices/dev/sda"),
token=self.token)
# But no directory listing exists yet - we will need to fetch a new one
self.assertEqual(len(list(fd.OpenChildren())), 0)
def _CheckLabelIndex(self):
"""Check that label indexes are updated."""
index = aff4.FACTORY.Create(
client_index.MAIN_INDEX, aff4_type="ClientIndex",
mode="rw", token=self.token)
self.assertEqual(
list(index.LookupClients(["label:Label2"])),
[self.client_id])
def _CheckWindowsDiskInfo(self):
client = aff4.FACTORY.Open(self.client_id, token=self.token)
volumes = client.Get(client.Schema.VOLUMES)
self.assertEqual(len(volumes), 2)
for result in volumes:
self.assertTrue(isinstance(result, rdf_client.Volume))
self.assertTrue(result.windowsvolume.drive_letter in ["Z:", "C:"])
def _CheckRegistryPathspec(self):
# This tests that we can click refresh on a key in the registry vfs subtree
# even if we haven't downloaded any other key above it in the tree.
fd = aff4.FACTORY.Open(self.client_id.Add("registry").Add(
"HKEY_LOCAL_MACHINE").Add("random/path/bla"), token=self.token)
pathspec = fd.real_pathspec
self.assertEqual(pathspec.pathtype, rdf_paths.PathSpec.PathType.REGISTRY)
self.assertEqual(pathspec.CollapsePath(),
u"/HKEY_LOCAL_MACHINE/random/path/bla")
def _CheckRelease(self, desired_release, desired_version):
# Test for correct Linux release override behaviour.
client = aff4.FACTORY.Open(self.client_id, token=self.token)
release = str(client.Get(client.Schema.OS_RELEASE))
version = str(client.Get(client.Schema.OS_VERSION))
self.assertEqual(release, desired_release)
self.assertEqual(version, desired_version)
def _CheckClientLibraries(self):
client = aff4.FACTORY.Open(self.client_id, token=self.token)
libs = client.Get(client.Schema.LIBRARY_VERSIONS)
self.assertTrue(libs is not None)
libs = libs.ToDict()
error_str = admin.GetLibraryVersions.error_str
# Strip off the exception itself.
error_str = error_str[:error_str.find("%s")]
for key in admin.GetLibraryVersions.library_map:
self.assertIn(key, libs)
self.assertFalse(libs[key].startswith(error_str))
def testInterrogateLinuxWithWtmp(self):
"""Test the Interrogate flow."""
test_lib.ClientFixture(self.client_id, token=self.token)
vfs.VFS_HANDLERS[
rdf_paths.PathSpec.PathType.OS] = test_lib.FakeTestDataVFSHandler
config_lib.CONFIG.Set("Artifacts.knowledge_base", ["LinuxWtmp",
"NetgroupConfiguration",
"LinuxRelease"])
config_lib.CONFIG.Set("Artifacts.netgroup_filter_regexes", [r"^login$"])
self.SetLinuxClient()
client_mock = action_mocks.InterrogatedClient(
"TransferBuffer", "StatFile", "Find", "HashBuffer",
"ListDirectory", "FingerprintFile", "GetLibraryVersions")
client_mock.InitializeClient()
for _ in test_lib.TestFlowHelper("Interrogate", client_mock,
token=self.token,
client_id=self.client_id):
pass
self.fd = aff4.FACTORY.Open(self.client_id, token=self.token)
self._CheckAFF4Object("test_node", "Linux", 100 * 1000000)
self._CheckClientInfo()
self._CheckClientIndex(".*test.*")
self._CheckGRRConfig()
self._CheckNotificationsCreated()
self._CheckClientSummary("Linux", "14.4", release="Ubuntu",
kernel="3.13.0-39-generic")
self._CheckRelease("Ubuntu", "14.4")
# users 1,2,3 from wtmp
# users yagharek, isaac from netgroup
self._CheckUsers(["yagharek", "isaac", "user1", "user2", "user3"])
self._CheckNetworkInfo()
self._CheckVFS()
self._CheckLabelIndex()
self._CheckClientKwIndex(["Linux"], 1)
self._CheckClientKwIndex(["Label2"], 1)
self._CheckClientLibraries()
def testInterrogateWindows(self):
"""Test the Interrogate flow."""
test_lib.ClientFixture(self.client_id, token=self.token)
vfs.VFS_HANDLERS[
rdf_paths.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler
vfs.VFS_HANDLERS[
rdf_paths.PathSpec.PathType.OS] = test_lib.FakeFullVFSHandler
client_mock = action_mocks.InterrogatedClient(
"TransferBuffer", "StatFile", "Find", "HashBuffer",
"ListDirectory", "FingerprintFile", "GetLibraryVersions")
self.SetWindowsClient()
client_mock.InitializeClient(system="Windows", version="6.1.7600",
kernel="6.1.7601")
# Run the flow in the simulated way
for _ in test_lib.TestFlowHelper("Interrogate", client_mock,
token=self.token,
client_id=self.client_id):
pass
self.fd = aff4.FACTORY.Open(self.client_id, token=self.token)
self._CheckAFF4Object("test_node", "Windows", 100 * 1000000)
self._CheckClientInfo()
self._CheckClientIndex(".*Host.*")
self._CheckGRRConfig()
self._CheckNotificationsCreated()
self._CheckClientSummary("Windows", "6.1.7600", kernel="6.1.7601")
# users Bert and Ernie added by the fixture should not be present (USERS
# overriden by kb)
# jim parsed from registry profile keys
self._CheckUsers(["jim", "kovacs"])
self._CheckNetworkInfo()
self._CheckVFS()
self._CheckLabelIndex()
self._CheckWindowsDiskInfo()
self._CheckRegistryPathspec()
self._CheckClientKwIndex(["Linux"], 0)
self._CheckClientKwIndex(["Windows"], 1)
self._CheckClientKwIndex(["Label2"], 1)
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
|
|
#!/usr/bin/env python2
import sys
import time
import argparse
import numpy
import matplotlib
matplotlib.use('Agg')
sys.path.append('/home/twesterh/Bachelor/project/plasmon-cpp/python-tipsi')
import tipsi
# Define some config parameters
# =============================
# lattice_constant = 0.24612E-9 # [m]
# onsite_potential = 0.0 #
# hopping_value = 2.8 # [eV]
def parse_options(argv):
parser = argparse.ArgumentParser('Generate system')
parser.add_argument( '--type'
, dest='sample_type'
, choices=[ 'triangle:zigzag', 'triangle:armchair'
, 'sierpinski:carpet'
, 'square'
, 'periodic'
]
, required=True )
parser.add_argument( '--lattice-constant'
, dest='lattice_constant'
, type=float
, required=True )
parser.add_argument( '--hopping-value'
, dest='hopping_value'
, type=float
, required=True )
parser.add_argument( '--width'
, type=int
, dest='width'
, required=False )
parser.add_argument( '--start'
, type=int
, dest='start_width'
, required=False )
parser.add_argument( '--depth'
, type=int
, dest='depth'
, required=False )
print argv
options = parser.parse_args(argv)
print options
take_needed = \
{ 'triangle:zigzag' : lambda x: (x.width,)
, 'triangle:armchair' : lambda x: (x.width,)
, 'periodic' : lambda x: (x.width,)
, 'sierpinski:carpet' : lambda x: (x.start_width, x.depth)
, 'square' : lambda x: (x.width,)
}
return (options.sample_type,) \
+ take_needed[options.sample_type](options) \
+ (options.lattice_constant, options.hopping_value)
def save_hamiltonian(sample, hamiltonian_filename):
print "[*] Saving H to file..."
H = sample.hamiltonian()
N, _ = H.shape
with open(hamiltonian_filename, 'w') as f:
for i in range(N):
for x in numpy.nditer(H[i]):
f.write('({0.real},{0.imag})\t'.format(x))
f.write('\n')
print "[+] Done."
def save_coordinates(sample, coordinates_filename):
print "[*] Saving (x,y,z)'s to file..."
with open(coordinates_filename, 'w') as f:
for pos in sample._r:
f.write('{}\t{}\t{}\n'.format(pos[0], pos[1], pos[2]))
print "[+] Done."
def sierpinski_carpet( start_width
, iteration
, lattice_constant
, hopping_value ):
W = start_width * 3**iteration
H = W
# First we choose a system size (width, height) in unit cells.
sample = tipsi.square_sheet( W, H
, pbc=False
, latconst=lattice_constant)
# Add fractal holes.
deletesites = []
for i in xrange(iteration):
scale = W / (3.**i)
def in_hole(tag):
x, y, _, _ = tag
x = x % scale
y = y % scale
return x >= scale / 3. and x < 2. * scale / 3. \
and y >= scale / 3. and y < 2. * scale / 3.
for tag in sample.sites:
if in_hole(tag):
deletesites.append(tag)
for tag in deletesites:
sample.delete(tag)
# Add hoppings and finalize
sample.finalize_sites()
sample.neighbor_hopping(-hopping_value)
sample.plot()
sample = sample.finalize()
# Save results
save_hamiltonian(sample, "Hamiltonian"
+ "." + str(start_width)
+ "." + str(iteration)
+ ".dat")
save_coordinates(sample, "Coordinates"
+ "." + str(start_width)
+ "." + str(iteration)
+ ".dat")
def square( width
, lattice_constant
, hopping_value ):
# First we choose a system size (width, height) in unit cells.
sample = tipsi.square_sheet( width, width
, pbc=False
, latconst=lattice_constant)
# Add hoppings and finalize
sample.finalize_sites()
sample.neighbor_hopping(-hopping_value)
sample.plot()
sample = sample.finalize()
# Save results
save_hamiltonian(sample, "Hamiltonian"
+ "." + str(width)
+ ".dat")
save_coordinates(sample, "Coordinates"
+ "." + str(width)
+ ".dat")
def triangle_zigzag( width
, lattice_constant
, hopping_value ):
sample = tipsi.sample(tipsi.honeycomb_2d_lattice(lattice_constant))
# add sites
for y in xrange(width+2):
for x in xrange(width+2-y):
if (y==0):
if ((x!=0) and (x!=width+1)):
sample.set((x,y,0,1),onsite_potential)
elif (y==width+1):
sample.set((x,y,0,0),onsite_potential)
else:
sample.set((x,y,0,0),onsite_potential)
sample.set((x,y,0,1),onsite_potential)
# Add hoppings and finalize
sample.finalize_sites()
sample.neighbor_hopping(-hopping_value)
sample = sample.finalize()
# Save results
save_hamiltonian(sample, "Hamiltonian"
+ "." + str(width)
+ ".dat")
save_coordinates(sample, "Coordinates"
+ "." + str(width)
+ ".dat")
def triangle_armchair( width
, lattice_constant
, hopping_value ):
sample = tipsi.sample(tipsi.honeycomb_2d_lattice(lattice_constant))
# Add sites
for y in xrange(-width,width+1):
x_min = 0
x_max = int(ceil(width*1.5))
if (width%2==0):
x_max = x_max-int(floor(abs(y)/2.))
elif (width%2==1):
x_max = x_max-int(ceil(abs(y)/2.))
if (y!=0):
x_min = abs(y)-1
if (y<0):
x_min = x_min-y
x_max = x_max-y
for x in xrange(x_min,x_max):
if ((y!=0) and x==x_min):
if (y<0):
sample.set((x,y,0,1),onsite_potential)
else:
sample.set((x,y,0,0),onsite_potential)
else:
sample.set((x,y,0,0),onsite_potential)
sample.set((x,y,0,1),onsite_potential)
# Add hoppings and finalize
sample.finalize_sites()
sample.neighbor_hopping(-hopping_value)
sample = sample.finalize()
# Save results
save_hamiltonian(sample, "Hamiltonian"
+ "." + str(width)
+ ".dat")
save_coordinates(sample, "Coordinates" +
+ "." + str(width)
+ ".dat")
def periodic( width
, lattice_constant
, hopping_value ):
sample = tipsi.honeycomb_sheet( width, width
, pbc=True
, latconst=lattice_constant )
sample.finalize_sites()
sample.neighbor_hopping(-hopping_value)
sample = sample.finalize()
# Save results
save_hamiltonian(sample, "Hamiltonian"
+ "." + str(width)
+ ".dat")
save_coordinates(sample, "Coordinates" +
+ "." + str(width)
+ ".dat")
def main():
options = parse_options(sys.argv[1:])
constructor = { 'triangle:zigzag' : triangle_zigzag
, 'triangle:armchair' : triangle_armchair
, 'periodic' : periodic
, 'sierpinski:carpet' : sierpinski_carpet
, 'square' : square
}
sample_type = options[0]
arguments = options[1:]
print "[*] Building sample..."
constructor[sample_type](*arguments)
print "[+] Done."
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 SeukWon Kang ([email protected])
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# generated by wxGlade 0.6.3 on Sun Nov 22 16:00:21 2009
import wx
import random
import os
# begin wxGlade: extracode
# end wxGlade
class dice(object):
def __init__(self, side=0, addsub=0, roll=True):
self.side = max(side, 0)
self.addsub = addsub
self.result = 0
if roll:
self.roll()
def roll(self):
if self.side > 0:
self.result = random.randint(1, self.side) + self.addsub
else:
self.result = self.addsub
return self
def __str__(self):
return "%d=%s" % (self.result, self.getinfostr())
def getinfostr(self):
ad, sd = '', ''
if self.addsub:
ad = "%+d" % self.addsub
if self.side:
sd = "d%d" % self.side
return sd + ad
def copy(self, roll=True):
return dice(self.side, self.addsub, roll)
def challenge(self, targetdc, critcal=[20]):
""" fail:0 success:1 critcal:2"""
rollval = self.result - self.addsub
ishit = self.result >= targetdc
if (ishit or rollval == 20) and rollval in critcal:
crirol = self.copy(True)
return 2 if crirol.challenge(targetdc, []) else 1
else:
return 1 if ishit and rollval != 1 else 0
class diceset(object):
def __init__(self):
self.sets = []
def append(self, other, count=1):
if isinstance(other, dice):
for i in range(count):
self.sets.append(other.copy())
else:
pass
return self
def pop(self):
return self.sets.pop()
def roll(self):
for a in self.sets:
a.roll()
return self
def getsum(self):
return sum([a.result for a in self.sets])
def copy(self):
rtn = diceset()
for a in self.sets:
rtn.append(a)
return rtn
def __str__(self):
rtn = '+'.join([a.getinfostr() for a in self.sets])
return "%s" % (rtn)
def getRollStr(self):
self.roll()
dsum = self.getsum()
sdsum = '+'.join([str(a.result) for a in self.sets])
return "%s(%s)" % (dsum, sdsum)
class WxRollDiceFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: WxRollDiceFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
basedirname = os.path.dirname(os.path.abspath(__file__))
self.bitmap_button_2 = wx.BitmapButton(self, 4, wx.Bitmap(
os.path.join(basedirname, "d4_128x128.png"), wx.BITMAP_TYPE_ANY))
self.bitmap_button_3 = wx.BitmapButton(self, 6, wx.Bitmap(
os.path.join(basedirname, "d6_128x128.png"), wx.BITMAP_TYPE_ANY))
self.bitmap_button_4 = wx.BitmapButton(self, 8, wx.Bitmap(
os.path.join(basedirname, "d8_128x128.png"), wx.BITMAP_TYPE_ANY))
self.bitmap_button_5 = wx.BitmapButton(self, 10, wx.Bitmap(
os.path.join(basedirname, "d10_128x128.png"), wx.BITMAP_TYPE_ANY))
self.bitmap_button_6 = wx.BitmapButton(self, 12, wx.Bitmap(
os.path.join(basedirname, "d12_128x128.png"), wx.BITMAP_TYPE_ANY))
self.bitmap_button_7 = wx.BitmapButton(self, 20, wx.Bitmap(
os.path.join(basedirname, "d20_128x128.png"), wx.BITMAP_TYPE_ANY))
self.bitmap_button_8 = wx.BitmapButton(self, 100, wx.Bitmap(
os.path.join(basedirname, "d100_128x128.png"), wx.BITMAP_TYPE_ANY))
self.button_Roll = wx.Button(self, -1, "Roll")
self.spin_ctrl_1 = wx.SpinCtrl(
self, -1, "1", min=1, max=100, style=wx.SP_ARROW_KEYS | wx.SP_WRAP | wx.TE_AUTO_URL | wx.TE_NOHIDESEL)
self.button_add1 = wx.Button(self, -1, "+1")
self.button_sub1 = wx.Button(self, -1, "-1")
self.button_BS = wx.Button(self, -1, "BS")
self.button_reset = wx.Button(self, -1, "Reset")
self.label_result = wx.StaticText(self, -1, "0", style=wx.ALIGN_CENTRE)
self.text_ctrl_1 = wx.TextCtrl(self, -1, "")
self.list_ctrl_1 = wx.ListCtrl(
self, -1, style=wx.LC_REPORT | wx.SUNKEN_BORDER)
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.evt_dice_in, id=4)
self.Bind(wx.EVT_BUTTON, self.evt_dice_in, id=6)
self.Bind(wx.EVT_BUTTON, self.evt_dice_in, id=8)
self.Bind(wx.EVT_BUTTON, self.evt_dice_in, id=10)
self.Bind(wx.EVT_BUTTON, self.evt_dice_in, id=12)
self.Bind(wx.EVT_BUTTON, self.evt_dice_in, id=20)
self.Bind(wx.EVT_BUTTON, self.evt_dice_in, id=100)
self.Bind(wx.EVT_BUTTON, self.evt_roll, self.button_Roll)
self.Bind(wx.EVT_BUTTON, self.evt_add1, self.button_add1)
self.Bind(wx.EVT_BUTTON, self.evt_sub1, self.button_sub1)
self.Bind(wx.EVT_BUTTON, self.evt_bs, self.button_BS)
self.Bind(wx.EVT_BUTTON, self.evt_reset, self.button_reset)
# end wxGlade
self.currentdiceset = diceset()
self.updatedices()
self.list_ctrl_1.InsertColumn(
0, '', format=wx.LIST_FORMAT_LEFT, width=1024)
def __set_properties(self):
# begin wxGlade: WxRollDiceFrame.__set_properties
self.SetTitle("WxDiceRoll")
self.SetSize((984, 964))
self.bitmap_button_2.SetSize(self.bitmap_button_2.GetBestSize())
self.bitmap_button_3.SetSize(self.bitmap_button_3.GetBestSize())
self.bitmap_button_4.SetSize(self.bitmap_button_4.GetBestSize())
self.bitmap_button_5.SetSize(self.bitmap_button_5.GetBestSize())
self.bitmap_button_6.SetSize(self.bitmap_button_6.GetBestSize())
self.bitmap_button_7.SetSize(self.bitmap_button_7.GetBestSize())
self.bitmap_button_8.SetSize(self.bitmap_button_8.GetBestSize())
self.button_Roll.SetFont(
wx.Font(24, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "Sans"))
self.spin_ctrl_1.SetFont(
wx.Font(24, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, ""))
self.button_add1.SetFont(
wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "Sans"))
self.button_sub1.SetFont(
wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "Sans"))
self.button_BS.SetFont(
wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "Sans"))
self.button_reset.SetFont(
wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "Sans"))
self.label_result.SetFont(
wx.Font(48, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, ""))
self.list_ctrl_1.SetMinSize((100, 80))
# end wxGlade
def __do_layout(self):
# begin wxGlade: WxRollDiceFrame.__do_layout
sizer_2 = wx.BoxSizer(wx.VERTICAL)
sizer_3 = wx.BoxSizer(wx.HORIZONTAL)
sizer_5 = wx.BoxSizer(wx.VERTICAL)
sizer_4 = wx.BoxSizer(wx.VERTICAL)
sizer_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_1.Add(self.bitmap_button_2, 0, 0, 0)
sizer_1.Add(self.bitmap_button_3, 0, 0, 0)
sizer_1.Add(self.bitmap_button_4, 0, 0, 0)
sizer_1.Add(self.bitmap_button_5, 0, 0, 0)
sizer_1.Add(self.bitmap_button_6, 0, 0, 0)
sizer_1.Add(self.bitmap_button_7, 0, 0, 0)
sizer_1.Add(self.bitmap_button_8, 0, 0, 0)
sizer_2.Add(sizer_1, 0, wx.EXPAND, 0)
sizer_4.Add(self.button_Roll, 2, wx.EXPAND, 0)
sizer_4.Add(self.spin_ctrl_1, 1, wx.EXPAND, 0)
sizer_4.Add(self.button_add1, 1, wx.EXPAND, 0)
sizer_4.Add(self.button_sub1, 1, wx.EXPAND, 0)
sizer_4.Add(self.button_BS, 1, wx.EXPAND, 0)
sizer_4.Add(self.button_reset, 1, wx.EXPAND, 0)
sizer_3.Add(sizer_4, 1, wx.EXPAND, 0)
sizer_5.Add(self.label_result, 0, wx.EXPAND, 0)
sizer_5.Add(self.text_ctrl_1, 0, wx.EXPAND, 0)
sizer_5.Add(self.list_ctrl_1, 1, wx.EXPAND, 0)
sizer_3.Add(sizer_5, 3, wx.EXPAND, 0)
sizer_2.Add(sizer_3, 1, wx.EXPAND, 0)
self.SetSizer(sizer_2)
self.Layout()
# end wxGlade
def updatedices(self):
self.text_ctrl_1.ChangeValue(str(self.currentdiceset))
def evt_add1(self, event): # wxGlade: WxRollDiceFrame.<event_handler>
self.currentdiceset.append(dice(0, self.spin_ctrl_1.GetValue()))
self.updatedices()
def evt_sub1(self, event): # wxGlade: WxRollDiceFrame.<event_handler>
self.currentdiceset.append(dice(0, -1 * self.spin_ctrl_1.GetValue()))
self.updatedices()
def evt_bs(self, event): # wxGlade: WxRollDiceFrame.<event_handler>
self.currentdiceset.pop()
self.updatedices()
def evt_reset(self, event): # wxGlade: WxRollDiceFrame.<event_handler>
self.currentdiceset = diceset()
self.updatedices()
def evt_roll(self, event): # wxGlade: WxRollDiceFrame.<event_handler>
rollstr = self.currentdiceset.getRollStr()
printstr = "%s = %s" % (rollstr, self.currentdiceset)
self.list_ctrl_1.InsertStringItem(0, printstr)
self.label_result.SetLabel(rollstr)
def evt_dice_n(self, event): # wxGlade: WxRollDiceFrame.<event_handler>
self.currentdiceset.append(
dice(event.GetId()), self.spin_ctrl_1.GetValue())
self.updatedices()
def evt_dice_in(self, event): # wxGlade: WxRollDiceFrame.<event_handler>
self.currentdiceset.append(
dice(event.GetId()), self.spin_ctrl_1.GetValue())
self.updatedices()
# end of class WxRollDiceFrame
class grolldiceapp(wx.PySimpleApp):
"""
"""
if __name__ == "__main__":
app = grolldiceapp(0)
wx.InitAllImageHandlers()
frame_1 = WxRollDiceFrame(None, -1, "")
app.SetTopWindow(frame_1)
frame_1.Show()
app.MainLoop()
|
|
"""Prepare read inputs (fastq, gzipped fastq and BAM) for parallel NGS alignment.
"""
import collections
import copy
import os
import shutil
import subprocess
import toolz as tz
from bcbio import bam, utils
from bcbio.bam import cram
from bcbio.log import logger
from bcbio.distributed import objectstore
from bcbio.distributed.multi import run_multicore, zeromq_aware_logging
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils, tools
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
def create_inputs(data):
"""Index input reads and prepare groups of reads to process concurrently.
Allows parallelization of alignment beyond processors available on a single
machine. Uses bgzip and grabix to prepare an indexed fastq file.
"""
aligner = tz.get_in(("config", "algorithm", "aligner"), data)
# CRAM files must be converted to bgzipped fastq, unless not aligning.
# Also need to prep and download remote files.
if not ("files" in data and data["files"] and aligner and (_is_cram_input(data["files"]) or
objectstore.is_remote(data["files"][0]))):
# skip indexing on samples without input files or not doing alignment
# skip if we're not BAM and not doing alignment splitting
if ("files" not in data or not data["files"] or data["files"][0] is None or not aligner
or _no_index_needed(data)):
return [[data]]
ready_files = _prep_grabix_indexes(data["files"], data["dirs"], data)
data["files"] = ready_files
# bgzip preparation takes care of converting illumina into sanger format
data["config"]["algorithm"]["quality_format"] = "standard"
if tz.get_in(["config", "algorithm", "align_split_size"], data):
splits = _find_read_splits(ready_files[0], data["config"]["algorithm"]["align_split_size"])
else:
splits = [None]
if len(splits) == 1:
return [[data]]
else:
out = []
for split in splits:
cur_data = copy.deepcopy(data)
cur_data["align_split"] = list(split)
out.append([cur_data])
return out
def _no_index_needed(data):
return (not data["files"][0].endswith(".bam")
and data["config"]["algorithm"].get("align_split_size") is None)
def split_namedpipe_cl(in_file, data):
"""Create a commandline suitable for use as a named pipe with reads in a given region.
"""
grabix = config_utils.get_program("grabix", data["config"])
start, end = data["align_split"]
return "<({grabix} grab {in_file} {start} {end})".format(**locals())
def fastq_convert_pipe_cl(in_file, data):
"""Create an anonymous pipe converting Illumina 1.3-1.7 to Sanger.
Uses seqtk: https://github.com/lh3/seqt
"""
seqtk = config_utils.get_program("seqtk", data["config"])
in_file = objectstore.cl_input(in_file)
return "<({seqtk} seq -Q64 -V {in_file})".format(**locals())
# ## configuration
def parallel_multiplier(items):
"""Determine if we will be parallelizing items during processing.
"""
multiplier = 1
for data in (x[0] for x in items):
if (tz.get_in(["config", "algorithm", "align_split_size"], data) or
tz.get_in(["algorithm", "align_split_size"], data)):
multiplier += 50
return multiplier
# ## merge
def setup_combine(final_file, data):
"""Setup the data and outputs to allow merging data back together.
"""
align_dir = os.path.dirname(final_file)
base, ext = os.path.splitext(os.path.basename(final_file))
start, end = data["align_split"]
out_file = os.path.join(utils.safe_makedir(os.path.join(align_dir, "split")),
"%s-%s_%s%s" % (base, start, end, ext))
data["combine"] = {"work_bam": {"out": final_file, "extras": []}}
return out_file, data
def merge_split_alignments(samples, run_parallel):
"""Manage merging split alignments back into a final working BAM file.
Perform de-duplication on the final merged file.
"""
ready = []
file_key = "work_bam"
to_merge = collections.defaultdict(list)
for data in (xs[0] for xs in samples):
if data.get("combine"):
out_key = tz.get_in(["combine", file_key, "out"], data)
if not out_key:
out_key = data["rgnames"]["lane"]
to_merge[out_key].append(data)
else:
ready.append([data])
ready_merge = []
for mgroup in to_merge.itervalues():
cur_data = mgroup[0]
del cur_data["align_split"]
for x in mgroup[1:]:
cur_data["combine"][file_key]["extras"].append(x[file_key])
ready_merge.append([cur_data])
merged = run_parallel("delayed_bam_merge", ready_merge)
# Add stable 'align_bam' target to use for retrieving raw alignment
out = []
for data in [x[0] for x in merged + ready]:
if data.get("work_bam"):
data["align_bam"] = data["work_bam"]
out.append([data])
return out
# ## determine file sections
def _find_read_splits(in_file, split_size):
"""Determine sections of fastq files to process in splits.
Assumes a 4 line order to input files (name, read, name, quality).
grabix is 1-based inclusive, so return coordinates in that format.
"""
gbi_file = in_file + ".gbi"
with open(gbi_file) as in_handle:
in_handle.next() # throw away
num_lines = int(in_handle.next().strip())
assert num_lines % 4 == 0, "Expected lines to be multiple of 4"
split_lines = split_size * 4
chunks = []
last = 1
for chunki in range(num_lines // split_lines + min(1, num_lines % split_lines)):
new = last + split_lines - 1
chunks.append((last, min(new, num_lines)))
last = new + 1
return chunks
# ## bgzip and grabix
def _is_bam_input(in_files):
return in_files and in_files[0].endswith(".bam") and (len(in_files) == 1 or in_files[1] is None)
def _is_cram_input(in_files):
return in_files and in_files[0].endswith(".cram") and (len(in_files) == 1 or in_files[1] is None)
def _prep_grabix_indexes(in_files, dirs, data):
if _is_bam_input(in_files):
out = _bgzip_from_bam(in_files[0], dirs, data["config"])
elif _is_cram_input(in_files):
out = _bgzip_from_cram(in_files[0], dirs, data)
else:
out = run_multicore(_bgzip_from_fastq,
[[{"in_file": x, "dirs": dirs, "config": data["config"]}] for x in in_files if x],
data["config"])
items = [[{"bgzip_file": x, "config": copy.deepcopy(data["config"])}] for x in out if x]
run_multicore(_grabix_index, items, data["config"])
return out
def _bgzip_from_cram(cram_file, dirs, data):
"""Create bgzipped fastq files from an input CRAM file in regions of interest.
Returns a list with a single file, for single end CRAM files, or two
files for paired end input.
"""
import pybedtools
region_file = (tz.get_in(["config", "algorithm", "variant_regions"], data)
if tz.get_in(["config", "algorithm", "coverage_interval"], data)
in ["regional", "exome", "amplicon"]
else None)
if region_file:
regions = ["%s:%s-%s" % tuple(r[:3]) for r in pybedtools.BedTool(region_file)]
else:
regions = [None]
work_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_prep"))
out_s, out_p1, out_p2 = [os.path.join(work_dir, "%s-%s.fq.gz" %
(utils.splitext_plus(os.path.basename(cram_file))[0], fext))
for fext in ["s1", "p1", "p2"]]
if (not utils.file_exists(out_s) and
(not utils.file_exists(out_p1) or not utils.file_exists(out_p2))):
cram.index(cram_file, data["config"])
fastqs, part_dir = _cram_to_fastq_regions(regions, cram_file, dirs, data)
if len(fastqs[0]) == 1:
with file_transaction(data, out_s) as tx_out_file:
_merge_and_bgzip([xs[0] for xs in fastqs], tx_out_file, out_s)
else:
for i, out_file in enumerate([out_p1, out_p2]):
if not utils.file_exists(out_file):
ext = "/%s" % (i + 1)
with file_transaction(data, out_file) as tx_out_file:
_merge_and_bgzip([xs[i] for xs in fastqs], tx_out_file, out_file, ext)
shutil.rmtree(part_dir)
if utils.file_exists(out_p1):
return [out_p1, out_p2]
else:
assert utils.file_exists(out_s)
return [out_s]
def _bgzip_from_cram_sambamba(cram_file, dirs, data):
"""Use sambamba to extract from CRAM via regions.
"""
raise NotImplementedError("sambamba doesn't yet support retrieval from CRAM by BED file")
region_file = (tz.get_in(["config", "algorithm", "variant_regions"], data)
if tz.get_in(["config", "algorithm", "coverage_interval"], data) in ["regional", "exome"]
else None)
base_name = utils.splitext_plus(os.path.basename(cram_file))[0]
work_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_prep",
"%s-parts" % base_name))
f1, f2, o1, o2, si = [os.path.join(work_dir, "%s.fq" % x) for x in ["match1", "match2", "unmatch1", "unmatch2",
"single"]]
ref_file = dd.get_ref_file(data)
region = "-L %s" % region_file if region_file else ""
cmd = ("sambamba view -f bam -l 0 -C {cram_file} -T {ref_file} {region} | "
"bamtofastq F={f1} F2={f2} S={si} O={o1} O2={o2}")
do.run(cmd.format(**locals()), "Convert CRAM to fastq in regions")
def _merge_and_bgzip(orig_files, out_file, base_file, ext=""):
"""Merge a group of gzipped input files into a final bgzipped output.
Also handles providing unique names for each input file to avoid
collisions on multi-region output. Handles renaming with awk magic from:
https://www.biostars.org/p/68477/
"""
assert out_file.endswith(".gz")
full_file = out_file.replace(".gz", "")
run_file = "%s-merge.bash" % utils.splitext_plus(base_file)[0]
cmds = ["set -e\n"]
for i, fname in enumerate(orig_files):
cmd = ("""zcat %s | awk '{print (NR%%4 == 1) ? "@%s_" ++i "%s" : $0}' >> %s\n"""
% (fname, i, ext, full_file))
cmds.append(cmd)
cmds.append("bgzip -f %s\n" % full_file)
with open(run_file, "w") as out_handle:
out_handle.write("".join("".join(cmds)))
do.run([do.find_bash(), run_file], "Rename, merge and bgzip CRAM fastq output")
assert os.path.exists(out_file) and not _is_gzip_empty(out_file)
def _cram_to_fastq_regions(regions, cram_file, dirs, data):
"""Convert CRAM files to fastq, potentially within sub regions.
Returns multiple fastq files that can be merged back together.
"""
base_name = utils.splitext_plus(os.path.basename(cram_file))[0]
work_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_prep",
"%s-parts" % base_name))
fnames = run_multicore(_cram_to_fastq_region,
[(cram_file, work_dir, base_name, region, data) for region in regions],
data["config"])
# check if we have paired or single end data
if any(not _is_gzip_empty(p1) for p1, p2, s in fnames):
out = [[p1, p2] for p1, p2, s in fnames]
else:
out = [[s] for p1, p2, s in fnames]
return out, work_dir
@utils.map_wrap
@zeromq_aware_logging
def _cram_to_fastq_region(cram_file, work_dir, base_name, region, data):
"""Convert CRAM to fastq in a specified region.
"""
ref_file = tz.get_in(["reference", "fasta", "base"], data)
resources = config_utils.get_resources("bamtofastq", data["config"])
cores = tz.get_in(["config", "algorithm", "num_cores"], data, 1)
max_mem = config_utils.convert_to_bytes(resources.get("memory", "1G")) * cores
rext = "-%s" % region.replace(":", "_").replace("-", "_") if region else "full"
out_s, out_p1, out_p2, out_o1, out_o2 = [os.path.join(work_dir, "%s%s-%s.fq.gz" %
(base_name, rext, fext))
for fext in ["s1", "p1", "p2", "o1", "o2"]]
if not utils.file_exists(out_p1):
with file_transaction(data, out_s, out_p1, out_p2, out_o1, out_o2) as \
(tx_out_s, tx_out_p1, tx_out_p2, tx_out_o1, tx_out_o2):
cram_file = objectstore.cl_input(cram_file)
sortprefix = "%s-sort" % utils.splitext_plus(tx_out_s)[0]
cmd = ("bamtofastq filename={cram_file} inputformat=cram T={sortprefix} "
"gz=1 collate=1 colsbs={max_mem} exclude=SECONDARY,SUPPLEMENTARY "
"F={tx_out_p1} F2={tx_out_p2} S={tx_out_s} O={tx_out_o1} O2={tx_out_o2} "
"reference={ref_file}")
if region:
cmd += " ranges='{region}'"
do.run(cmd.format(**locals()), "CRAM to fastq %s" % region if region else "")
return [[out_p1, out_p2, out_s]]
def _is_gzip_empty(fname):
count = subprocess.check_output("zcat %s | head -1 | wc -l" % fname, shell=True,
stderr=open("/dev/null", "w"))
return int(count) < 1
def _bgzip_from_bam(bam_file, dirs, config, is_retry=False, output_infix=''):
"""Create bgzipped fastq files from an input BAM file.
"""
# tools
bamtofastq = config_utils.get_program("bamtofastq", config)
resources = config_utils.get_resources("bamtofastq", config)
cores = config["algorithm"].get("num_cores", 1)
max_mem = config_utils.convert_to_bytes(resources.get("memory", "1G")) * cores
bgzip = tools.get_bgzip_cmd(config, is_retry)
# files
work_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_prep"))
out_file_1 = os.path.join(work_dir, "%s%s-1.fq.gz" % (os.path.splitext(os.path.basename(bam_file))[0], output_infix))
if bam.is_paired(bam_file):
out_file_2 = out_file_1.replace("-1.fq.gz", "-2.fq.gz")
else:
out_file_2 = None
needs_retry = False
if is_retry or not utils.file_exists(out_file_1):
with file_transaction(config, out_file_1) as tx_out_file:
for f in [tx_out_file, out_file_1, out_file_2]:
if f and os.path.exists(f):
os.remove(f)
fq1_bgzip_cmd = "%s -c /dev/stdin > %s" % (bgzip, tx_out_file)
sortprefix = "%s-sort" % os.path.splitext(tx_out_file)[0]
if bam.is_paired(bam_file):
fq2_bgzip_cmd = "%s -c /dev/stdin > %s" % (bgzip, out_file_2)
out_str = ("F=>({fq1_bgzip_cmd}) F2=>({fq2_bgzip_cmd}) S=/dev/null O=/dev/null "
"O2=/dev/null collate=1 colsbs={max_mem}")
else:
out_str = "S=>({fq1_bgzip_cmd})"
bam_file = objectstore.cl_input(bam_file)
cmd = "{bamtofastq} filename={bam_file} T={sortprefix} " + out_str
try:
do.run(cmd.format(**locals()), "BAM to bgzipped fastq",
checks=[do.file_reasonable_size(tx_out_file, bam_file)],
log_error=False)
except subprocess.CalledProcessError, msg:
if not is_retry and "deflate failed" in str(msg):
logger.info("bamtofastq deflate IO failure preparing %s. Retrying with single core."
% (bam_file))
needs_retry = True
else:
logger.exception()
raise
if needs_retry:
return _bgzip_from_bam(bam_file, dirs, config, is_retry=True)
else:
return [x for x in [out_file_1, out_file_2] if x is not None]
@utils.map_wrap
@zeromq_aware_logging
def _grabix_index(data):
in_file = data["bgzip_file"]
config = data["config"]
grabix = config_utils.get_program("grabix", config)
gbi_file = in_file + ".gbi"
if tz.get_in(["algorithm", "align_split_size"], config):
if not utils.file_exists(gbi_file) or _is_partial_index(gbi_file):
do.run([grabix, "index", in_file], "Index input with grabix: %s" % os.path.basename(in_file))
return [gbi_file]
def _is_partial_index(gbi_file):
"""Check for truncated output since grabix doesn't write to a transactional directory.
"""
with open(gbi_file) as in_handle:
for i, _ in enumerate(in_handle):
if i > 2:
return False
return True
@utils.map_wrap
@zeromq_aware_logging
def _bgzip_from_fastq(data):
"""Prepare a bgzipped file from a fastq input, potentially gzipped (or bgzipped already).
"""
in_file = data["in_file"]
config = data["config"]
grabix = config_utils.get_program("grabix", config)
needs_convert = config["algorithm"].get("quality_format", "").lower() == "illumina"
if in_file.endswith(".gz") and not objectstore.is_remote(in_file):
needs_bgzip, needs_gunzip = _check_gzipped_input(in_file, grabix, needs_convert)
elif objectstore.is_remote(in_file) and not tz.get_in(["algorithm", "align_split_size"], config):
needs_bgzip, needs_gunzip = False, False
else:
needs_bgzip, needs_gunzip = True, False
if needs_bgzip or needs_gunzip or needs_convert or objectstore.is_remote(in_file):
out_file = _bgzip_file(in_file, data["dirs"], config, needs_bgzip, needs_gunzip,
needs_convert)
else:
out_file = in_file
return [out_file]
def _bgzip_file(in_file, dirs, config, needs_bgzip, needs_gunzip, needs_convert):
"""Handle bgzip of input file, potentially gunzipping an existing file.
"""
work_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_prep"))
out_file = os.path.join(work_dir, os.path.basename(in_file) +
(".gz" if not in_file.endswith(".gz") else ""))
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
bgzip = tools.get_bgzip_cmd(config)
is_remote = objectstore.is_remote(in_file)
in_file = objectstore.cl_input(in_file, unpack=needs_gunzip or needs_convert or needs_bgzip)
if needs_convert:
in_file = fastq_convert_pipe_cl(in_file, {"config": config})
if needs_gunzip and not needs_convert:
gunzip_cmd = "gunzip -c {in_file} |".format(**locals())
bgzip_in = "/dev/stdin"
else:
gunzip_cmd = ""
bgzip_in = in_file
if needs_bgzip:
do.run("{gunzip_cmd} {bgzip} -c {bgzip_in} > {tx_out_file}".format(**locals()),
"bgzip input file")
elif is_remote:
bgzip = "| bgzip -c" if needs_convert else ""
do.run("cat {in_file} {bgzip} > {tx_out_file}".format(**locals()), "Get remote input")
else:
raise ValueError("Unexpected inputs: %s %s %s %s" % (in_file, needs_bgzip,
needs_gunzip, needs_convert))
return out_file
def _check_gzipped_input(in_file, grabix, needs_convert):
"""Determine if a gzipped input file is blocked gzip or standard.
"""
is_bgzip = subprocess.check_output([grabix, "check", in_file])
if is_bgzip.strip() == "yes" and not needs_convert:
return False, False
else:
return True, True
|
|
# coding=utf-8
"""
The Collector class is a base class for all metric collectors.
"""
import os
import socket
import platform
import logging
import configobj
import time
import re
import subprocess
from diamond.metric import Metric
from diamond.utils.config import load_config
from error import DiamondException
# Detect the architecture of the system and set the counters for MAX_VALUES
# appropriately. Otherwise, rolling over counters will cause incorrect or
# negative values.
if platform.architecture()[0] == '64bit':
MAX_COUNTER = (2 ** 64) - 1
else:
MAX_COUNTER = (2 ** 32) - 1
def get_hostname(config, method=None):
"""
Returns a hostname as configured by the user
"""
method = method or config.get('hostname_method', 'smart')
# case insensitive method
method = method.lower()
if 'hostname' in config and method != 'shell':
return config['hostname']
if method in get_hostname.cached_results:
return get_hostname.cached_results[method]
if method == 'shell':
if 'hostname' not in config:
raise DiamondException(
"hostname must be set to a shell command for"
" hostname_method=shell")
else:
proc = subprocess.Popen(config['hostname'],
shell=True,
stdout=subprocess.PIPE)
hostname = proc.communicate()[0].strip()
if proc.returncode != 0:
raise subprocess.CalledProcessError(proc.returncode,
config['hostname'])
get_hostname.cached_results[method] = hostname
return hostname
if method == 'smart':
hostname = get_hostname(config, 'fqdn_short')
if hostname != 'localhost':
get_hostname.cached_results[method] = hostname
return hostname
hostname = get_hostname(config, 'hostname_short')
get_hostname.cached_results[method] = hostname
return hostname
if method == 'fqdn_short':
hostname = socket.getfqdn().split('.')[0]
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'fqdn':
hostname = socket.getfqdn().replace('.', '_')
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'fqdn_rev':
hostname = socket.getfqdn().split('.')
hostname.reverse()
hostname = '.'.join(hostname)
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'uname_short':
hostname = os.uname()[1].split('.')[0]
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'uname_rev':
hostname = os.uname()[1].split('.')
hostname.reverse()
hostname = '.'.join(hostname)
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'hostname':
hostname = socket.gethostname()
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'hostname_short':
hostname = socket.gethostname().split('.')[0]
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'hostname_rev':
hostname = socket.gethostname().split('.')
hostname.reverse()
hostname = '.'.join(hostname)
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'none':
get_hostname.cached_results[method] = None
return None
raise NotImplementedError(config['hostname_method'])
get_hostname.cached_results = {}
def str_to_bool(value):
"""
Converts string truthy/falsey strings to a bool
Empty strings are false
"""
if isinstance(value, basestring):
value = value.strip().lower()
if value in ['true', 't', 'yes', 'y']:
return True
elif value in ['false', 'f', 'no', 'n', '']:
return False
else:
raise NotImplementedError("Unknown bool %s" % value)
return value
class Collector(object):
"""
The Collector class is a base class for all metric collectors.
"""
def __init__(self, config=None, handlers=[], name=None, configfile=None):
"""
Create a new instance of the Collector class
"""
# Initialize Logger
self.log = logging.getLogger('diamond')
# Initialize Members
if name is None:
self.name = self.__class__.__name__
else:
self.name = name
self.handlers = handlers
self.last_values = {}
self.configfile = None
self.load_config(configfile, config)
def load_config(self, configfile=None, override_config=None):
"""
Process a configfile, or reload if previously given one.
"""
self.config = configobj.ConfigObj()
# Load in the collector's defaults
if self.get_default_config() is not None:
self.config.merge(self.get_default_config())
if configfile is not None:
self.configfile = os.path.abspath(configfile)
if self.configfile is not None:
config = load_config(self.configfile)
if 'collectors' in config:
if 'default' in config['collectors']:
self.config.merge(config['collectors']['default'])
if self.name in config['collectors']:
self.config.merge(config['collectors'][self.name])
if override_config is not None:
if 'collectors' in override_config:
if 'default' in override_config['collectors']:
self.config.merge(override_config['collectors']['default'])
if self.name in override_config['collectors']:
self.config.merge(override_config['collectors'][self.name])
self.process_config()
def process_config(self):
"""
Intended to put any code that should be run after any config reload
event
"""
if 'byte_unit' in self.config:
if isinstance(self.config['byte_unit'], basestring):
self.config['byte_unit'] = self.config['byte_unit'].split()
if 'enabled' in self.config:
self.config['enabled'] = str_to_bool(self.config['enabled'])
if 'measure_collector_time' in self.config:
self.config['measure_collector_time'] = str_to_bool(
self.config['measure_collector_time'])
# Raise an error if both whitelist and blacklist are specified
if (self.config.get('metrics_whitelist', None)
and self.config.get('metrics_blacklist', None)):
raise DiamondException(
'Both metrics_whitelist and metrics_blacklist specified ' +
'in file %s' % configfile)
if self.config.get('metrics_whitelist', None):
self.config['metrics_whitelist'] = re.compile(
self.config['metrics_whitelist'])
elif self.config.get('metrics_blacklist', None):
self.config['metrics_blacklist'] = re.compile(
self.config['metrics_blacklist'])
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this collector
"""
return {
'enabled': 'Enable collecting these metrics',
'byte_unit': 'Default numeric output(s)',
'measure_collector_time': 'Collect the collector run time in ms',
'metrics_whitelist': 'Regex to match metrics to transmit. ' +
'Mutually exclusive with metrics_blacklist',
'metrics_blacklist': 'Regex to match metrics to block. ' +
'Mutually exclusive with metrics_whitelist',
}
def get_default_config(self):
"""
Return the default config for the collector
"""
return {
# Defaults options for all Collectors
# Uncomment and set to hardcode a hostname for the collector path
# Keep in mind, periods are seperators in graphite
# 'hostname': 'my_custom_hostname',
# If you perfer to just use a different way of calculating the
# hostname
# Uncomment and set this to one of these values:
# fqdn_short = Default. Similar to hostname -s
# fqdn = hostname output
# fqdn_rev = hostname in reverse (com.example.www)
# uname_short = Similar to uname -n, but only the first part
# uname_rev = uname -r in reverse (com.example.www)
# 'hostname_method': 'fqdn_short',
# All collectors are disabled by default
'enabled': False,
# Path Prefix
'path_prefix': 'servers',
# Path Prefix for Virtual Machine metrics
'instance_prefix': 'instances',
# Path Suffix
'path_suffix': '',
# Default Poll Interval (seconds)
'interval': 300,
# Default Event TTL (interval multiplier)
'ttl_multiplier': 2,
# Default numeric output
'byte_unit': 'byte',
# Collect the collector run time in ms
'measure_collector_time': False,
# Whitelist of metrics to let through
'metrics_whitelist': None,
# Blacklist of metrics to let through
'metrics_blacklist': None,
}
def get_metric_path(self, name, instance=None):
"""
Get metric path.
Instance indicates that this is a metric for a
virtual machine and should have a different
root prefix.
"""
if 'path' in self.config:
path = self.config['path']
else:
path = self.__class__.__name__
if instance is not None:
if 'instance_prefix' in self.config:
prefix = self.config['instance_prefix']
else:
prefix = 'instances'
if path == '.':
return '.'.join([prefix, instance, name])
else:
return '.'.join([prefix, instance, path, name])
if 'path_prefix' in self.config:
prefix = self.config['path_prefix']
else:
prefix = 'systems'
if 'path_suffix' in self.config:
suffix = self.config['path_suffix']
else:
suffix = None
hostname = get_hostname(self.config)
if hostname is not None:
if prefix:
prefix = ".".join((prefix, hostname))
else:
prefix = hostname
# if there is a suffix, add after the hostname
if suffix:
prefix = '.'.join((prefix, suffix))
if path == '.':
return '.'.join([prefix, name])
else:
return '.'.join([prefix, path, name])
def get_hostname(self):
return get_hostname(self.config)
def collect(self):
"""
Default collector method
"""
raise NotImplementedError()
def publish(self, name, value, raw_value=None, precision=0,
metric_type='GAUGE', instance=None):
"""
Publish a metric with the given name
"""
# Check whitelist/blacklist
if self.config['metrics_whitelist']:
if not self.config['metrics_whitelist'].match(name):
return
elif self.config['metrics_blacklist']:
if self.config['metrics_blacklist'].match(name):
return
# Get metric Path
path = self.get_metric_path(name, instance=instance)
# Get metric TTL
ttl = float(self.config['interval']) * float(
self.config['ttl_multiplier'])
# Create Metric
try:
metric = Metric(path, value, raw_value=raw_value, timestamp=None,
precision=precision, host=self.get_hostname(),
metric_type=metric_type, ttl=ttl)
except DiamondException:
self.log.error(('Error when creating new Metric: path=%r, '
'value=%r'), path, value)
raise
# Publish Metric
self.publish_metric(metric)
def publish_metric(self, metric):
"""
Publish a Metric object
"""
# Process Metric
for handler in self.handlers:
handler._process(metric)
def publish_gauge(self, name, value, precision=0, instance=None):
return self.publish(name, value, precision=precision,
metric_type='GAUGE', instance=instance)
def publish_counter(self, name, value, precision=0, max_value=0,
time_delta=True, interval=None, allow_negative=False,
instance=None):
raw_value = value
value = self.derivative(name, value, max_value=max_value,
time_delta=time_delta, interval=interval,
allow_negative=allow_negative,
instance=instance)
return self.publish(name, value, raw_value=raw_value,
precision=precision, metric_type='COUNTER',
instance=instance)
def derivative(self, name, new, max_value=0,
time_delta=True, interval=None,
allow_negative=False, instance=None):
"""
Calculate the derivative of the metric.
"""
# Format Metric Path
path = self.get_metric_path(name, instance=instance)
if path in self.last_values:
old = self.last_values[path]
# Check for rollover
if new < old:
old = old - max_value
# Get Change in X (value)
derivative_x = new - old
# If we pass in a interval, use it rather then the configured one
if interval is None:
interval = int(self.config['interval'])
# Get Change in Y (time)
if time_delta:
derivative_y = interval
else:
derivative_y = 1
result = float(derivative_x) / float(derivative_y)
if result < 0 and not allow_negative:
result = 0
else:
result = 0
# Store Old Value
self.last_values[path] = new
# Return result
return result
def _run(self):
"""
Run the collector unless it's already running
"""
try:
start_time = time.time()
# Collect Data
self.collect()
end_time = time.time()
collector_time = int((end_time - start_time) * 1000)
self.log.debug('Collection took %s ms', collector_time)
if 'measure_collector_time' in self.config:
if self.config['measure_collector_time']:
metric_name = 'collector_time_ms'
metric_value = collector_time
self.publish(metric_name, metric_value)
finally:
# After collector run, invoke a flush
# method on each handler.
for handler in self.handlers:
handler._flush()
def find_binary(self, binary):
"""
Scan and return the first path to a binary that we can find
"""
if os.path.exists(binary):
return binary
# Extract out the filename if we were given a full path
binary_name = os.path.basename(binary)
# Gather $PATH
search_paths = os.environ['PATH'].split(':')
# Extra paths to scan...
default_paths = [
'/usr/bin',
'/bin'
'/usr/local/bin',
'/usr/sbin',
'/sbin'
'/usr/local/sbin',
]
for path in default_paths:
if path not in search_paths:
search_paths.append(path)
for path in search_paths:
if os.path.isdir(path):
filename = os.path.join(path, binary_name)
if os.path.exists(filename):
return filename
return binary
class ProcessCollector(Collector):
"""
Collector with helpers for handling running commands with/without sudo
"""
def get_default_config_help(self):
config_help = super(ProcessCollector, self).get_default_config_help()
config_help.update({
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(ProcessCollector, self).get_default_config()
config.update({
'use_sudo': False,
'sudo_cmd': self.find_binary('/usr/bin/sudo'),
})
return config
def run_command(self, args):
if 'bin' not in self.config:
raise Exception('config does not have any binary configured')
if not os.access(self.config['bin'], os.X_OK):
raise Exception('%s is not executable' % self.config['bin'])
try:
command = args
command.insert(0, self.config['bin'])
if str_to_bool(self.config['use_sudo']):
command.insert(0, self.config['sudo_cmd'])
return subprocess.Popen(command,
stdout=subprocess.PIPE).communicate()
except OSError:
self.log.exception("Unable to run %s", command)
return None
|
|
#!/usr/bin/env python
# We want a way to generate non-colliding 'pyxl<num>' ids for elements, so we're
# using a non-cryptographically secure random number generator. We want it to be
# insecure because these aren't being used for anything cryptographic and it's
# much faster (2x). We're also not using NumPy (which is even faster) because
# it's a difficult dependency to fulfill purely to generate random numbers.
import random
import sys
from pyxl.utils import escape
class PyxlException(Exception):
pass
class x_base_metaclass(type):
def __init__(self, name, parents, attrs):
super(x_base_metaclass, self).__init__(name, parents, attrs)
x_base_parents = [parent for parent in parents if hasattr(parent, '__attrs__')]
parent_attrs = x_base_parents[0].__attrs__ if len(x_base_parents) else {}
self_attrs = self.__dict__.get('__attrs__', {})
# Dont allow '_' in attr names
for attr_name in self_attrs:
assert '_' not in attr_name, (
"%s: '_' not allowed in attr names, use '-' instead" % attr_name)
combined_attrs = dict(parent_attrs)
combined_attrs.update(self_attrs)
setattr(self, '__attrs__', combined_attrs)
setattr(self, '__tag__', name[2:])
class x_base(object):
__metaclass__ = x_base_metaclass
__attrs__ = {
# HTML attributes
'accesskey': unicode,
'class': unicode,
'dir': unicode,
'id': unicode,
'lang': unicode,
'maxlength': unicode,
'role': unicode,
'style': unicode,
'tabindex': int,
'title': unicode,
'xml:lang': unicode,
# Microdata HTML attributes
'itemtype': unicode,
'itemscope': unicode,
'itemprop': unicode,
'itemid': unicode,
'itemref': unicode,
# JS attributes
'onabort': unicode,
'onblur': unicode,
'onchange': unicode,
'onclick': unicode,
'ondblclick': unicode,
'onerror': unicode,
'onfocus': unicode,
'onkeydown': unicode,
'onkeypress': unicode,
'onkeyup': unicode,
'onload': unicode,
'onmousedown': unicode,
'onmouseenter': unicode,
'onmouseleave': unicode,
'onmousemove': unicode,
'onmouseout': unicode,
'onmouseover': unicode,
'onmouseup': unicode,
'onreset': unicode,
'onresize': unicode,
'onselect': unicode,
'onsubmit': unicode,
'onunload': unicode,
}
def __init__(self, **kwargs):
self.__attributes__ = {}
self.__children__ = []
for name, value in kwargs.iteritems():
self.set_attr(x_base._fix_attribute_name(name), value)
def __call__(self, *children):
self.append_children(children)
return self
def get_id(self):
eid = self.attr('id')
if not eid:
eid = 'pyxl%d' % random.randint(0, sys.maxint)
self.set_attr('id', eid)
return eid
def children(self, selector=None, exclude=False):
if not selector:
return self.__children__
# filter by class
if selector[0] == '.':
select = lambda x: selector[1:] in x.get_class()
# filter by id
elif selector[0] == '#':
select = lambda x: selector[1:] == x.get_id()
# filter by tag name
else:
select = lambda x: x.__class__.__name__ == ('x_%s' % selector)
if exclude:
func = lambda x: not select(x)
else:
func = select
return filter(func, self.__children__)
def append(self, child):
if type(child) in (list, tuple) or hasattr(child, '__iter__'):
self.__children__.extend(c for c in child if c is not None and c is not False)
elif child is not None and child is not False:
self.__children__.append(child)
def prepend(self, child):
if child is not None and child is not False:
self.__children__.insert(0, child)
def __getattr__(self, name):
return self.attr(name.replace('_', '-'))
def attr(self, name, default=None):
# this check is fairly expensive (~8% of cost)
if not self.allows_attribute(name):
raise PyxlException('<%s> has no attr named "%s"' % (self.__tag__, name))
value = self.__attributes__.get(name)
if value is not None:
return value
attr_type = self.__attrs__.get(name, unicode)
if type(attr_type) == list:
if not attr_type:
raise PyxlException('Invalid attribute definition')
if None in attr_type[1:]:
raise PyxlException('None must be the first, default value')
return attr_type[0]
return default
def transfer_attributes(self, element):
for name, value in self.__attributes__.iteritems():
if element.allows_attribute(name) and element.attr(name) is None:
element.set_attr(name, value)
def set_attr(self, name, value):
# this check is fairly expensive (~8% of cost)
if not self.allows_attribute(name):
raise PyxlException('<%s> has no attr named "%s"' % (self.__tag__, name))
if value is not None:
attr_type = self.__attrs__.get(name, unicode)
if type(attr_type) == list:
# support for enum values in pyxl attributes
values_enum = attr_type
assert values_enum, 'Invalid attribute definition'
if value not in values_enum:
msg = '%s: %s: incorrect value "%s" for "%s". Expecting enum value %s' % (
self.__tag__, self.__class__.__name__, value, name, values_enum)
raise PyxlException(msg)
else:
try:
# Validate type of attr and cast to correct type if possible
value = value if isinstance(value, attr_type) else attr_type(value)
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
msg = '%s: %s: incorrect type for "%s". expected %s, got %s' % (
self.__tag__, self.__class__.__name__, name, attr_type, type(value))
exception = PyxlException(msg)
raise exception, None, exc_tb
self.__attributes__[name] = value
elif name in self.__attributes__:
del self.__attributes__[name]
def get_class(self):
return self.attr('class', '')
def add_class(self, xclass):
if not xclass: return
current_class = self.attr('class')
if current_class: current_class += ' ' + xclass
else: current_class = xclass
self.set_attr('class', current_class)
def append_children(self, children):
for child in children:
self.append(child)
def attributes(self):
return self.__attributes__
def set_attributes(self, attrs_dict):
for name, value in attrs_dict.iteritems():
self.set_attr(name, value)
def allows_attribute(self, name):
return (name in self.__attrs__ or name.startswith('data-') or name.startswith('aria-'))
def to_string(self):
l = []
self._to_list(l)
return u''.join(l)
def _to_list(self, l):
raise NotImplementedError()
def __str__(self):
return self.to_string()
def __unicode__(self):
return self.to_string()
@staticmethod
def _render_child_to_list(child, l):
if isinstance(child, x_base): child._to_list(l)
elif child is not None: l.append(escape(child))
@staticmethod
def _fix_attribute_name(name):
if name == 'xclass': return 'class'
if name == 'xfor': return 'for'
return name.replace('_', '-').replace('COLON', ':')
|
|
from __future__ import division
try:
from builtins import str
except ImportError:
pass
from flask.ext.login import login_required, current_user
from flask import Blueprint, current_app, render_template, request, redirect, \
url_for, flash, make_response
from flask_blogging.forms import BlogEditor
import math
from werkzeug.contrib.atom import AtomFeed
import datetime
from flask.ext.principal import PermissionDenied
def _get_blogging_engine(app):
return app.extensions["FLASK_BLOGGING_ENGINE"]
def _get_user_name(user):
user_name = user.get_name() if hasattr(user, "get_name") else str(user)
return user_name
def _clear_cache(cache):
cache.delete_memoized(index)
cache.delete_memoized(page_by_id)
cache.delete_memoized(posts_by_author)
cache.delete_memoized(posts_by_tag)
cache.delete_memoized(sitemap)
cache.delete_memoized(feed)
def _store_form_data(blog_form, storage, user, post):
title = blog_form.title.data
text = blog_form.text.data
tags = blog_form.tags.data.split(",")
draft = blog_form.draft.data
user_id = user.get_id()
current_datetime = datetime.datetime.utcnow()
post_date = post.get("post_date", current_datetime)
last_modified_date = datetime.datetime.utcnow()
post_id = post.get("post_id")
pid = storage.save_post(title, text, user_id, tags, draft=draft,
post_date=post_date,
last_modified_date=last_modified_date,
post_id=post_id)
return pid
def _get_meta(storage, count, page, tag=None, user_id=None):
max_posts = storage.count_posts(tag=tag, user_id=user_id)
max_pages = math.ceil(float(max_posts)/float(count))
max_offset = (max_pages-1)*count
offset = min(max(0, (page-1)*count), max_offset)
if (tag is None) and (user_id is None):
prev_page = None if page <= 1 else url_for(
"blogging.index", count=count, page=page-1)
next_page = None if page >= max_pages else url_for(
"blogging.index", count=count, page=page+1)
elif tag:
prev_page = None if page <= 1 else url_for(
"blogging.posts_by_tag", tag=tag, count=count, page=page-1)
next_page = None if page >= max_pages else url_for(
"blogging.posts_by_tag", tag=tag, count=count, page=page+1)
elif user_id:
prev_page = None if page <= 1 else url_for(
"blogging.posts_by_author", user_id=user_id, count=count,
page=page-1)
next_page = None if page >= max_pages else url_for(
"blogging.posts_by_author", user_id=user_id, count=count,
page=page+1)
else:
prev_page = next_page = None
pagination = dict(prev_page=prev_page, next_page=next_page)
meta = dict(max_posts=max_posts, max_pages=max_pages, page=page,
max_offset=max_offset, offset=offset, count=count,
pagination=pagination)
return meta
def _is_blogger():
authenticated = current_user.is_authenticated() if callable(current_user.is_authenticated) else current_user.is_authenticated
is_blogger = authenticated and current_user.is_admin
return is_blogger
def index(count, page):
"""
Serves the page with a list of blog posts
:param count:
:param offset:
:return:
"""
blogging_engine = _get_blogging_engine(current_app)
storage = blogging_engine.storage
config = blogging_engine.config
count = count or config.get("BLOGGING_POSTS_PER_PAGE", 10)
meta = _get_meta(storage, count, page)
offset = meta["offset"]
meta["is_user_blogger"] = _is_blogger()
render = config.get("BLOGGING_RENDER_TEXT", True)
posts = storage.get_posts(count=count, offset=offset, include_draft=False,
tag=None, user_id=None, recent=True)
for post in posts:
blogging_engine.process_post(post, render=render)
return render_template("blogging/index.html", posts=posts, meta=meta,
config=config)
def page_by_id(post_id, slug):
blogging_engine = _get_blogging_engine(current_app)
storage = blogging_engine.storage
config = blogging_engine.config
post = storage.get_post_by_id(post_id)
meta = {}
meta["is_user_blogger"] = _is_blogger()
render = config.get("BLOGGING_RENDER_TEXT", True)
if post is not None:
blogging_engine.process_post(post, render=render)
return render_template("blogging/page.html", post=post, config=config,
meta=meta)
else:
flash("The page you are trying to access is not valid!", "warning")
return redirect(url_for("blogging.index"))
def posts_by_tag(tag, count, page):
blogging_engine = _get_blogging_engine(current_app)
storage = blogging_engine.storage
config = blogging_engine.config
count = count or config.get("BLOGGING_POSTS_PER_PAGE", 10)
meta = _get_meta(storage, count, page, tag=tag)
offset = meta["offset"]
meta["is_user_blogger"] = _is_blogger()
render = config.get("BLOGGING_RENDER_TEXT", True)
posts = storage.get_posts(count=count, offset=offset, tag=tag,
include_draft=False, user_id=None, recent=True)
for post in posts:
blogging_engine.process_post(post, render=render)
return render_template("blogging/index.html", posts=posts, meta=meta,
config=config)
def posts_by_author(user_id, count, page):
blogging_engine = _get_blogging_engine(current_app)
storage = blogging_engine.storage
config = blogging_engine.config
count = count or config.get("BLOGGING_POSTS_PER_PAGE", 10)
meta = _get_meta(storage, count, page, user_id=user_id)
offset = meta["offset"]
meta["is_user_blogger"] = _is_blogger()
posts = storage.get_posts(count=count, offset=offset, user_id=user_id,
include_draft=False, tag=None, recent=True)
render = config.get("BLOGGING_RENDER_TEXT", True)
if len(posts):
for post in posts:
blogging_engine.process_post(post, render=render)
else:
flash("No posts found for this user!", "warning")
return render_template("blogging/index.html", posts=posts, meta=meta,
config=config)
@login_required
def editor(post_id):
blogging_engine = _get_blogging_engine(current_app)
cache = blogging_engine.cache
if cache:
_clear_cache(cache)
if _is_blogger():
post_processor = blogging_engine.post_processor
config = blogging_engine.config
storage = blogging_engine.storage
if request.method == 'POST':
form = BlogEditor(request.form)
if form.validate():
post = storage.get_post_by_id(post_id)
if (post is not None) and \
(current_user.get_id() == post["user_id"]) and \
(post["post_id"] == post_id):
pass
else:
post = {}
pid = _store_form_data(form, storage, current_user, post)
flash("Blog posted successfully!", "info")
slug = post_processor.create_slug(form.title.data)
return redirect(url_for("blogging.page_by_id", post_id=pid,
slug=slug))
else:
flash("There were errors in blog submission", "warning")
return render_template("blogging/editor.html", form=form,
post_id=post_id, config=config)
else:
if post_id is not None:
post = storage.get_post_by_id(post_id)
if (post is not None) and \
(current_user.get_id() == post["user_id"]):
tags = ", ".join(post["tags"])
form = BlogEditor(title=post["title"],
text=post["text"], tags=tags)
return render_template("blogging/editor.html",
form=form, post_id=post_id,
config=config)
else:
flash("You do not have the rights to edit this post",
"warning")
return redirect(url_for("blogging.index",
post_id=None))
form = BlogEditor()
return render_template("blogging/editor.html", form=form,
post_id=post_id, config=config)
@login_required
def delete(post_id):
blogging_engine = _get_blogging_engine(current_app)
cache = blogging_engine.cache
if cache:
_clear_cache(cache)
if _is_blogger():
storage = blogging_engine.storage
post = storage.get_post_by_id(post_id)
if (post is not None) and \
(current_user.get_id() == post["user_id"]):
success = storage.delete_post(post_id)
if success:
flash("Your post was successfully deleted", "info")
else:
flash("There were errors while deleting your post",
"warning")
else:
flash("You do not have the rights to delete this post",
"warning")
return redirect(url_for("blogging.index"))
def sitemap():
blogging_engine = _get_blogging_engine(current_app)
storage = blogging_engine.storage
config = blogging_engine.config
posts = storage.get_posts(count=None, offset=None, recent=True,
user_id=None, tag=None, include_draft=False)
for post in posts:
blogging_engine.process_post(post, render=False)
sitemap_xml = render_template("blogging/sitemap.xml", posts=posts,
config=config)
response = make_response(sitemap_xml)
response.headers["Content-Type"] = "application/xml"
return response
def feed():
blogging_engine = _get_blogging_engine(current_app)
storage = blogging_engine.storage
config = blogging_engine.config
count = config.get("BLOGGING_FEED_LIMIT")
posts = storage.get_posts(count=count, offset=None, recent=True,
user_id=None, tag=None, include_draft=False)
feed = AtomFeed(
'%s - All Articles' % config.get("BLOGGING_SITENAME",
"Flask-Blogging"),
feed_url=request.url, url=request.url_root, generator=None)
for post in posts:
blogging_engine.process_post(post, render=True)
feed.add(post["title"], str(post["rendered_text"]),
content_type='html',
author=post["user_name"],
url=config.get("BLOGGING_SITEURL", "")+post["url"],
updated=post["last_modified_date"],
published=post["post_date"])
response = feed.get_response()
response.headers["Content-Type"] = "application/xml"
return response
def unless(blogging_engine):
# disable caching for bloggers. They can change state!
def _unless():
return _is_blogger()
return _unless
def cached_func(blogging_engine, func):
cache = blogging_engine.cache
if cache is None:
return func
else:
unless_func = unless(blogging_engine)
config = blogging_engine.config
cache_timeout = config.get("BLOGGING_CACHE_TIMEOUT", 60) # 60 seconds
memoized_func = cache.memoize(
timeout=cache_timeout, unless=unless_func)(func)
return memoized_func
def create_blueprint(import_name, blogging_engine):
blog_app = Blueprint("blogging", import_name, template_folder='templates')
# register index
index_func = cached_func(blogging_engine, index)
blog_app.add_url_rule("/", defaults={"count": None, "page": 1},
view_func=index_func)
blog_app.add_url_rule("/<int:count>/", defaults={"page": 1},
view_func=index_func)
blog_app.add_url_rule("/<int:count>/<int:page>/", view_func=index_func)
# register page_by_id
page_by_id_func = cached_func(blogging_engine, page_by_id)
blog_app.add_url_rule("/page/<int:post_id>/", defaults={"slug": ""},
view_func=page_by_id_func)
blog_app.add_url_rule("/page/<int:post_id>/<slug>/",
view_func=page_by_id_func)
# register posts_by_tag
posts_by_tag_func = cached_func(blogging_engine, posts_by_tag)
blog_app.add_url_rule("/tag/<tag>/", defaults=dict(count=None, page=1),
view_func=posts_by_tag_func)
blog_app.add_url_rule("/tag/<tag>/<int:count>/", defaults=dict(page=1),
view_func=posts_by_tag_func)
blog_app.add_url_rule("/tag/<tag>/<int:count>/<int:page>/",
view_func=posts_by_tag_func)
# register posts_by_author
posts_by_author_func = cached_func(blogging_engine, posts_by_author)
blog_app.add_url_rule("/author/<user_id>/",
defaults=dict(count=None, page=1),
view_func=posts_by_author_func)
blog_app.add_url_rule("/author/<user_id>/<int:count>/",
defaults=dict(page=1),
view_func=posts_by_author_func)
blog_app.add_url_rule("/author/<user_id>/<int:count>/<int:page>/",
view_func=posts_by_author_func)
# register editor
editor_func = editor # For now lets not cache this
blog_app.add_url_rule('/editor/', methods=["GET", "POST"],
defaults={"post_id": None},
view_func=editor_func)
blog_app.add_url_rule('/editor/<int:post_id>/', methods=["GET", "POST"],
view_func=editor_func)
# register delete
delete_func = delete # For now lets not cache this
blog_app.add_url_rule("/delete/<int:post_id>/", methods=["POST"],
view_func=delete_func)
# register sitemap
sitemap_func = cached_func(blogging_engine, sitemap)
blog_app.add_url_rule("/sitemap.xml", view_func=sitemap_func)
# register feed
feed_func = cached_func(blogging_engine, feed)
blog_app.add_url_rule('/feeds/all.atom.xml', view_func=feed_func)
return blog_app
|
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
The underlying driver is loaded as a :class:`LazyPluggable`.
Functions in this module are imported into the manila.db namespace. Call these
functions from manila.db namespace, not the manila.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:backend: string to lookup in the list of LazyPluggable backends.
`sqlalchemy` is the only supported backend right now.
:connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/manila/manila.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
"""
from oslo_config import cfg
from oslo_db import api as db_api
db_opts = [
cfg.StrOpt('db_backend',
default='sqlalchemy',
help='The backend to use for database.'),
cfg.BoolOpt('enable_new_services',
default=True,
help='Services to be added to the available pool on create.'),
cfg.StrOpt('share_name_template',
default='share-%s',
help='Template string to be used to generate share names.'),
cfg.StrOpt('share_snapshot_name_template',
default='share-snapshot-%s',
help='Template string to be used to generate share snapshot '
'names.'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
_BACKEND_MAPPING = {'sqlalchemy': 'manila.db.sqlalchemy.api'}
IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING,
lazy=True)
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
return IMPL.authorize_project_context(context, project_id)
def authorize_quota_class_context(context, class_name):
"""Ensures a request has permission to access the given quota class."""
return IMPL.authorize_quota_class_context(context, class_name)
###################
def service_destroy(context, service_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, service_id)
def service_get(context, service_id):
"""Get a service or raise if it does not exist."""
return IMPL.service_get(context, service_id)
def service_get_by_host_and_topic(context, host, topic):
"""Get a service by host it's on and topic it listens to."""
return IMPL.service_get_by_host_and_topic(context, host, topic)
def service_get_all(context, disabled=None):
"""Get all services."""
return IMPL.service_get_all(context, disabled)
def service_get_all_by_topic(context, topic):
"""Get all services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
def service_get_all_share_sorted(context):
"""Get all share services sorted by share count.
:returns: a list of (Service, share_count) tuples.
"""
return IMPL.service_get_all_share_sorted(context)
def service_get_by_args(context, host, binary):
"""Get the state of an service by node name and binary."""
return IMPL.service_get_by_args(context, host, binary)
def service_create(context, values):
"""Create a service from the values dictionary."""
return IMPL.service_create(context, values)
def service_update(context, service_id, values):
"""Set the given properties on an service and update it.
Raises NotFound if service does not exist.
"""
return IMPL.service_update(context, service_id, values)
####################
def quota_create(context, project_id, resource, limit, user_id=None):
"""Create a quota for the given project and resource."""
return IMPL.quota_create(context, project_id, resource, limit,
user_id=user_id)
def quota_get(context, project_id, resource, user_id=None):
"""Retrieve a quota or raise if it does not exist."""
return IMPL.quota_get(context, project_id, resource, user_id=user_id)
def quota_get_all_by_project_and_user(context, project_id, user_id):
"""Retrieve all quotas associated with a given project and user."""
return IMPL.quota_get_all_by_project_and_user(context, project_id, user_id)
def quota_get_all_by_project(context, project_id):
"""Retrieve all quotas associated with a given project."""
return IMPL.quota_get_all_by_project(context, project_id)
def quota_get_all(context, project_id):
"""Retrieve all user quotas associated with a given project."""
return IMPL.quota_get_all(context, project_id)
def quota_update(context, project_id, resource, limit, user_id=None):
"""Update a quota or raise if it does not exist."""
return IMPL.quota_update(context, project_id, resource, limit,
user_id=user_id)
###################
def quota_class_create(context, class_name, resource, limit):
"""Create a quota class for the given name and resource."""
return IMPL.quota_class_create(context, class_name, resource, limit)
def quota_class_get(context, class_name, resource):
"""Retrieve a quota class or raise if it does not exist."""
return IMPL.quota_class_get(context, class_name, resource)
def quota_class_get_default(context):
"""Retrieve all default quotas."""
return IMPL.quota_class_get_default(context)
def quota_class_get_all_by_name(context, class_name):
"""Retrieve all quotas associated with a given quota class."""
return IMPL.quota_class_get_all_by_name(context, class_name)
def quota_class_update(context, class_name, resource, limit):
"""Update a quota class or raise if it does not exist."""
return IMPL.quota_class_update(context, class_name, resource, limit)
###################
def quota_usage_get(context, project_id, resource, user_id=None):
"""Retrieve a quota usage or raise if it does not exist."""
return IMPL.quota_usage_get(context, project_id, resource, user_id=user_id)
def quota_usage_get_all_by_project_and_user(context, project_id, user_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project_and_user(context,
project_id, user_id)
def quota_usage_get_all_by_project(context, project_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project(context, project_id)
def quota_usage_create(context, project_id, user_id, resource, in_use,
reserved=0, until_refresh=None):
"""Create a quota usage."""
return IMPL.quota_usage_create(context, project_id, user_id, resource,
in_use, reserved, until_refresh)
def quota_usage_update(context, project_id, user_id, resource, **kwargs):
"""Update a quota usage or raise if it does not exist."""
return IMPL.quota_usage_update(context, project_id, user_id, resource,
**kwargs)
###################
def quota_reserve(context, resources, quotas, user_quotas, deltas, expire,
until_refresh, max_age, project_id=None, user_id=None):
"""Check quotas and create appropriate reservations."""
return IMPL.quota_reserve(context, resources, quotas, user_quotas, deltas,
expire, until_refresh, max_age,
project_id=project_id, user_id=user_id)
def reservation_commit(context, reservations, project_id=None, user_id=None):
"""Commit quota reservations."""
return IMPL.reservation_commit(context, reservations,
project_id=project_id,
user_id=user_id)
def reservation_rollback(context, reservations, project_id=None, user_id=None):
"""Roll back quota reservations."""
return IMPL.reservation_rollback(context, reservations,
project_id=project_id,
user_id=user_id)
def quota_destroy_all_by_project_and_user(context, project_id, user_id):
"""Destroy all quotas associated with a given project and user."""
return IMPL.quota_destroy_all_by_project_and_user(context,
project_id, user_id)
def quota_destroy_all_by_project(context, project_id):
"""Destroy all quotas associated with a given project."""
return IMPL.quota_destroy_all_by_project(context, project_id)
def reservation_expire(context):
"""Roll back any expired reservations."""
return IMPL.reservation_expire(context)
###################
def share_instance_get(context, instance_id, with_share_data=False):
"""Get share instance by id."""
return IMPL.share_instance_get(context, instance_id,
with_share_data=with_share_data)
def share_instance_create(context, share_id, values):
"""Create new share instance."""
return IMPL.share_instance_create(context, share_id, values)
def share_instance_delete(context, instance_id):
"""Delete share instance."""
return IMPL.share_instance_delete(context, instance_id)
def share_instance_update(context, instance_id, values, with_share_data=False):
"""Update share instance fields."""
return IMPL.share_instance_update(context, instance_id, values,
with_share_data=with_share_data)
def share_instances_get_all(context):
"""Returns all share instances."""
return IMPL.share_instances_get_all(context)
def share_instances_get_all_by_share_server(context, share_server_id):
"""Returns all share instances with given share_server_id."""
return IMPL.share_instances_get_all_by_share_server(context,
share_server_id)
def share_instances_get_all_by_host(context, host, with_share_data=False):
"""Returns all share instances with given host."""
return IMPL.share_instances_get_all_by_host(
context, host, with_share_data=with_share_data)
def share_instances_get_all_by_share_network(context, share_network_id):
"""Returns list of shares that belong to given share network."""
return IMPL.share_instances_get_all_by_share_network(context,
share_network_id)
def share_instances_get_all_by_share(context, share_id):
"""Returns list of shares that belong to given share."""
return IMPL.share_instances_get_all_by_share(context, share_id)
def share_instances_get_all_by_share_group_id(context, share_group_id):
"""Returns list of share instances that belong to given share group."""
return IMPL.share_instances_get_all_by_share_group_id(
context, share_group_id)
###################
def share_create(context, share_values, create_share_instance=True):
"""Create new share."""
return IMPL.share_create(context, share_values,
create_share_instance=create_share_instance)
def share_update(context, share_id, values):
"""Update share fields."""
return IMPL.share_update(context, share_id, values)
def share_get(context, share_id):
"""Get share by id."""
return IMPL.share_get(context, share_id)
def share_get_all(context, filters=None, sort_key=None, sort_dir=None):
"""Get all shares."""
return IMPL.share_get_all(
context, filters=filters, sort_key=sort_key, sort_dir=sort_dir,
)
def share_get_all_by_project(context, project_id, filters=None,
is_public=False, sort_key=None, sort_dir=None):
"""Returns all shares with given project ID."""
return IMPL.share_get_all_by_project(
context, project_id, filters=filters, is_public=is_public,
sort_key=sort_key, sort_dir=sort_dir,
)
def share_get_all_by_share_group_id(context, share_group_id,
filters=None, sort_key=None,
sort_dir=None):
"""Returns all shares with given project ID and share group id."""
return IMPL.share_get_all_by_share_group_id(
context, share_group_id, filters=filters,
sort_key=sort_key, sort_dir=sort_dir)
def share_get_all_by_share_server(context, share_server_id, filters=None,
sort_key=None, sort_dir=None):
"""Returns all shares with given share server ID."""
return IMPL.share_get_all_by_share_server(
context, share_server_id, filters=filters, sort_key=sort_key,
sort_dir=sort_dir,
)
def share_delete(context, share_id):
"""Delete share."""
return IMPL.share_delete(context, share_id)
###################
def share_access_create(context, values):
"""Allow access to share."""
return IMPL.share_access_create(context, values)
def share_access_get(context, access_id):
"""Get share access rule."""
return IMPL.share_access_get(context, access_id)
def share_access_get_all_for_share(context, share_id):
"""Get all access rules for given share."""
return IMPL.share_access_get_all_for_share(context, share_id)
def share_access_get_all_for_instance(context, instance_id, filters=None,
with_share_access_data=True):
"""Get all access rules related to a certain share instance."""
return IMPL.share_access_get_all_for_instance(
context, instance_id, filters=filters,
with_share_access_data=with_share_access_data)
def share_access_get_all_by_type_and_access(context, share_id, access_type,
access):
"""Returns share access by given type and access."""
return IMPL.share_access_get_all_by_type_and_access(
context, share_id, access_type, access)
def share_instance_access_create(context, values, share_instance_id):
"""Allow access to share instance."""
return IMPL.share_instance_access_create(
context, values, share_instance_id)
def share_instance_access_copy(context, share_id, instance_id):
"""Maps the existing access rules for the share to the instance in the DB.
Adds the instance mapping to the share's access rules and
returns the share's access rules.
"""
return IMPL.share_instance_access_copy(context, share_id, instance_id)
def share_instance_access_get(context, access_id, instance_id,
with_share_access_data=True):
"""Get access rule mapping for share instance."""
return IMPL.share_instance_access_get(
context, access_id, instance_id,
with_share_access_data=with_share_access_data)
def share_instance_access_update(context, access_id, instance_id, updates):
"""Update the access mapping row for a given share instance and access."""
return IMPL.share_instance_access_update(
context, access_id, instance_id, updates)
def share_instance_access_delete(context, mapping_id):
"""Deny access to share instance."""
return IMPL.share_instance_access_delete(context, mapping_id)
####################
def share_snapshot_instance_update(context, instance_id, values):
"""Set the given properties on a share snapshot instance and update it.
Raises NotFound if snapshot instance does not exist.
"""
return IMPL.share_snapshot_instance_update(context, instance_id, values)
def share_snapshot_instance_create(context, snapshot_id, values):
"""Create a share snapshot instance for an existing snapshot."""
return IMPL.share_snapshot_instance_create(
context, snapshot_id, values)
def share_snapshot_instance_get(context, instance_id, with_share_data=False):
"""Get a snapshot instance or raise a NotFound exception."""
return IMPL.share_snapshot_instance_get(
context, instance_id, with_share_data=with_share_data)
def share_snapshot_instance_get_all_with_filters(context, filters,
with_share_data=False):
"""Get all snapshot instances satisfying provided filters."""
return IMPL.share_snapshot_instance_get_all_with_filters(
context, filters, with_share_data=with_share_data)
def share_snapshot_instance_delete(context, snapshot_instance_id):
"""Delete a share snapshot instance."""
return IMPL.share_snapshot_instance_delete(context, snapshot_instance_id)
####################
def share_snapshot_create(context, values):
"""Create a snapshot from the values dictionary."""
return IMPL.share_snapshot_create(context, values)
def share_snapshot_get(context, snapshot_id):
"""Get a snapshot or raise if it does not exist."""
return IMPL.share_snapshot_get(context, snapshot_id)
def share_snapshot_get_all(context, filters=None, sort_key=None,
sort_dir=None):
"""Get all snapshots."""
return IMPL.share_snapshot_get_all(
context, filters=filters, sort_key=sort_key, sort_dir=sort_dir,
)
def share_snapshot_get_all_by_project(context, project_id, filters=None,
sort_key=None, sort_dir=None):
"""Get all snapshots belonging to a project."""
return IMPL.share_snapshot_get_all_by_project(
context, project_id, filters=filters, sort_key=sort_key,
sort_dir=sort_dir,
)
def share_snapshot_get_all_for_share(context, share_id, filters=None,
sort_key=None, sort_dir=None):
"""Get all snapshots for a share."""
return IMPL.share_snapshot_get_all_for_share(
context, share_id, filters=filters, sort_key=sort_key,
sort_dir=sort_dir,
)
def share_snapshot_get_latest_for_share(context, share_id):
"""Get the most recent snapshot for a share."""
return IMPL.share_snapshot_get_latest_for_share(context, share_id)
def share_snapshot_update(context, snapshot_id, values):
"""Set the given properties on an snapshot and update it.
Raises NotFound if snapshot does not exist.
"""
return IMPL.share_snapshot_update(context, snapshot_id, values)
###################
def share_snapshot_access_create(context, values):
"""Create a share snapshot access from the values dictionary."""
return IMPL.share_snapshot_access_create(context, values)
def share_snapshot_access_get(context, access_id):
"""Get share snapshot access rule from given access_id."""
return IMPL.share_snapshot_access_get(context, access_id)
def share_snapshot_access_get_all_for_snapshot_instance(
context, snapshot_instance_id, session=None):
"""Get all access rules related to a certain snapshot instance."""
return IMPL.share_snapshot_access_get_all_for_snapshot_instance(
context, snapshot_instance_id, session)
def share_snapshot_access_get_all_for_share_snapshot(context,
share_snapshot_id,
filters):
"""Get all access rules for a given share snapshot according to filters."""
return IMPL.share_snapshot_access_get_all_for_share_snapshot(
context, share_snapshot_id, filters)
def share_snapshot_export_locations_get(context, snapshot_id):
"""Get all export locations for a given share snapshot."""
return IMPL.share_snapshot_export_locations_get(context, snapshot_id)
def share_snapshot_instance_access_update(
context, access_id, instance_id, updates):
"""Update the state of the share snapshot instance access."""
return IMPL.share_snapshot_instance_access_update(
context, access_id, instance_id, updates)
def share_snapshot_instance_access_get(context, share_snapshot_instance_id,
access_id):
"""Get the share snapshot instance access related to given ids."""
return IMPL.share_snapshot_instance_access_get(
context, share_snapshot_instance_id, access_id)
def share_snapshot_instance_access_delete(context, access_id,
snapshot_instance_id):
"""Delete share snapshot instance access given its id."""
return IMPL.share_snapshot_instance_access_delete(
context, access_id, snapshot_instance_id)
def share_snapshot_instance_export_location_create(context, values):
"""Create a share snapshot instance export location."""
return IMPL.share_snapshot_instance_export_location_create(context, values)
def share_snapshot_instance_export_locations_get_all(
context, share_snapshot_instance_id):
"""Get the share snapshot instance export locations for given id."""
return IMPL.share_snapshot_instance_export_locations_get_all(
context, share_snapshot_instance_id)
def share_snapshot_instance_export_location_get(context, el_id):
"""Get the share snapshot instance export location for given id."""
return IMPL.share_snapshot_instance_export_location_get(
context, el_id)
def share_snapshot_instance_export_location_delete(context, el_id):
"""Delete share snapshot instance export location given its id."""
return IMPL.share_snapshot_instance_export_location_delete(context, el_id)
###################
def security_service_create(context, values):
"""Create security service DB record."""
return IMPL.security_service_create(context, values)
def security_service_delete(context, id):
"""Delete security service DB record."""
return IMPL.security_service_delete(context, id)
def security_service_update(context, id, values):
"""Update security service DB record."""
return IMPL.security_service_update(context, id, values)
def security_service_get(context, id):
"""Get security service DB record."""
return IMPL.security_service_get(context, id)
def security_service_get_all(context):
"""Get all security service DB records."""
return IMPL.security_service_get_all(context)
def security_service_get_all_by_project(context, project_id):
"""Get all security service DB records for the given project."""
return IMPL.security_service_get_all_by_project(context, project_id)
####################
def share_metadata_get(context, share_id):
"""Get all metadata for a share."""
return IMPL.share_metadata_get(context, share_id)
def share_metadata_delete(context, share_id, key):
"""Delete the given metadata item."""
IMPL.share_metadata_delete(context, share_id, key)
def share_metadata_update(context, share, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
IMPL.share_metadata_update(context, share, metadata, delete)
###################
def share_export_location_get_by_uuid(context, export_location_uuid):
"""Get specific export location of a share."""
return IMPL.share_export_location_get_by_uuid(
context, export_location_uuid)
def share_export_locations_get(context, share_id):
"""Get all export locations of a share."""
return IMPL.share_export_locations_get(context, share_id)
def share_export_locations_get_by_share_id(context, share_id,
include_admin_only=True,
ignore_migration_destination=False):
"""Get all export locations of a share by its ID."""
return IMPL.share_export_locations_get_by_share_id(
context, share_id, include_admin_only=include_admin_only,
ignore_migration_destination=ignore_migration_destination)
def share_export_locations_get_by_share_instance_id(context,
share_instance_id):
"""Get all export locations of a share instance by its ID."""
return IMPL.share_export_locations_get_by_share_instance_id(
context, share_instance_id)
def share_export_locations_update(context, share_instance_id, export_locations,
delete=True):
"""Update export locations of a share instance."""
return IMPL.share_export_locations_update(
context, share_instance_id, export_locations, delete)
####################
def export_location_metadata_get(context, export_location_uuid, session=None):
"""Get all metadata of an export location."""
return IMPL.export_location_metadata_get(
context, export_location_uuid, session=session)
def export_location_metadata_delete(context, export_location_uuid, keys,
session=None):
"""Delete metadata of an export location."""
return IMPL.export_location_metadata_delete(
context, export_location_uuid, keys, session=session)
def export_location_metadata_update(context, export_location_uuid, metadata,
delete, session=None):
"""Update metadata of an export location."""
return IMPL.export_location_metadata_update(
context, export_location_uuid, metadata, delete, session=session)
####################
def share_network_create(context, values):
"""Create a share network DB record."""
return IMPL.share_network_create(context, values)
def share_network_delete(context, id):
"""Delete a share network DB record."""
return IMPL.share_network_delete(context, id)
def share_network_update(context, id, values):
"""Update a share network DB record."""
return IMPL.share_network_update(context, id, values)
def share_network_get(context, id):
"""Get requested share network DB record."""
return IMPL.share_network_get(context, id)
def share_network_get_all(context):
"""Get all share network DB records."""
return IMPL.share_network_get_all(context)
def share_network_get_all_by_project(context, project_id):
"""Get all share network DB records for the given project."""
return IMPL.share_network_get_all_by_project(context, project_id)
def share_network_get_all_by_security_service(context, security_service_id):
"""Get all share network DB records for the given project."""
return IMPL.share_network_get_all_by_security_service(
context, security_service_id)
def share_network_add_security_service(context, id, security_service_id):
return IMPL.share_network_add_security_service(context,
id,
security_service_id)
def share_network_remove_security_service(context, id, security_service_id):
return IMPL.share_network_remove_security_service(context,
id,
security_service_id)
##################
def network_allocation_create(context, values):
"""Create a network allocation DB record."""
return IMPL.network_allocation_create(context, values)
def network_allocation_delete(context, id):
"""Delete a network allocation DB record."""
return IMPL.network_allocation_delete(context, id)
def network_allocation_update(context, id, values):
"""Update a network allocation DB record."""
return IMPL.network_allocation_update(context, id, values)
def network_allocations_get_for_share_server(context, share_server_id,
session=None, label=None):
"""Get network allocations for share server."""
return IMPL.network_allocations_get_for_share_server(
context, share_server_id, label=label, session=session)
def network_allocations_get_by_ip_address(context, ip_address):
"""Get network allocations by IP address."""
return IMPL.network_allocations_get_by_ip_address(context, ip_address)
##################
def share_server_create(context, values):
"""Create share server DB record."""
return IMPL.share_server_create(context, values)
def share_server_delete(context, id):
"""Delete share server DB record."""
return IMPL.share_server_delete(context, id)
def share_server_update(context, id, values):
"""Update share server DB record."""
return IMPL.share_server_update(context, id, values)
def share_server_get(context, id, session=None):
"""Get share server DB record by ID."""
return IMPL.share_server_get(context, id, session=session)
def share_server_get_all_by_host_and_share_net_valid(context, host,
share_net_id,
session=None):
"""Get share server DB records by host and share net not error."""
return IMPL.share_server_get_all_by_host_and_share_net_valid(
context, host, share_net_id, session=session)
def share_server_get_all(context):
"""Get all share server DB records."""
return IMPL.share_server_get_all(context)
def share_server_get_all_by_host(context, host):
"""Get all share servers related to particular host."""
return IMPL.share_server_get_all_by_host(context, host)
def share_server_get_all_unused_deletable(context, host, updated_before):
"""Get all free share servers DB records."""
return IMPL.share_server_get_all_unused_deletable(context, host,
updated_before)
def share_server_backend_details_set(context, share_server_id, server_details):
"""Create DB record with backend details."""
return IMPL.share_server_backend_details_set(context, share_server_id,
server_details)
##################
def share_type_create(context, values, projects=None):
"""Create a new share type."""
return IMPL.share_type_create(context, values, projects)
def share_type_get_all(context, inactive=False, filters=None):
"""Get all share types.
:param context: context to query under
:param inactive: Include inactive share types to the result set
:param filters: Filters for the query in the form of key/value.
:is_public: Filter share types based on visibility:
* **True**: List public share types only
* **False**: List private share types only
* **None**: List both public and private share types
:returns: list of matching share types
"""
return IMPL.share_type_get_all(context, inactive, filters)
def share_type_get(context, type_id, inactive=False, expected_fields=None):
"""Get share type by id.
:param context: context to query under
:param type_id: share type id to get.
:param inactive: Consider inactive share types when searching
:param expected_fields: Return those additional fields.
Supported fields are: projects.
:returns: share type
"""
return IMPL.share_type_get(context, type_id, inactive, expected_fields)
def share_type_get_by_name(context, name):
"""Get share type by name."""
return IMPL.share_type_get_by_name(context, name)
def share_type_get_by_name_or_id(context, name_or_id):
"""Get share type by name or ID and return None if not found."""
return IMPL.share_type_get_by_name_or_id(context, name_or_id)
def share_type_access_get_all(context, type_id):
"""Get all share type access of a share type."""
return IMPL.share_type_access_get_all(context, type_id)
def share_type_access_add(context, type_id, project_id):
"""Add share type access for project."""
return IMPL.share_type_access_add(context, type_id, project_id)
def share_type_access_remove(context, type_id, project_id):
"""Remove share type access for project."""
return IMPL.share_type_access_remove(context, type_id, project_id)
def share_type_destroy(context, id):
"""Delete a share type."""
return IMPL.share_type_destroy(context, id)
####################
def share_type_extra_specs_get(context, share_type_id):
"""Get all extra specs for a share type."""
return IMPL.share_type_extra_specs_get(context, share_type_id)
def share_type_extra_specs_delete(context, share_type_id, key):
"""Delete the given extra specs item."""
return IMPL.share_type_extra_specs_delete(context, share_type_id, key)
def share_type_extra_specs_update_or_create(context, share_type_id,
extra_specs):
"""Create or update share type extra specs.
This adds or modifies the key/value pairs specified in the extra
specs dict argument.
"""
return IMPL.share_type_extra_specs_update_or_create(context,
share_type_id,
extra_specs)
def driver_private_data_get(context, entity_id, key=None, default=None):
"""Get one, list or all key-value pairs for given entity_id."""
return IMPL.driver_private_data_get(context, entity_id, key, default)
def driver_private_data_update(context, entity_id, details,
delete_existing=False):
"""Update key-value pairs for given entity_id."""
return IMPL.driver_private_data_update(context, entity_id, details,
delete_existing)
def driver_private_data_delete(context, entity_id, key=None):
"""Remove one, list or all key-value pairs for given entity_id."""
return IMPL.driver_private_data_delete(context, entity_id, key)
####################
def availability_zone_get(context, id_or_name):
"""Get availability zone by name or id."""
return IMPL.availability_zone_get(context, id_or_name)
def availability_zone_get_all(context):
"""Get all active availability zones."""
return IMPL.availability_zone_get_all(context)
####################
def share_group_get(context, share_group_id):
"""Get a share group or raise if it does not exist."""
return IMPL.share_group_get(context, share_group_id)
def share_group_get_all(context, detailed=True, filters=None, sort_key=None,
sort_dir=None):
"""Get all share groups."""
return IMPL.share_group_get_all(
context, detailed=detailed, filters=filters, sort_key=sort_key,
sort_dir=sort_dir)
def share_group_get_all_by_host(context, host, detailed=True, filters=None,
sort_key=None, sort_dir=None):
"""Get all share groups belonging to a host."""
return IMPL.share_group_get_all_by_host(
context, host, detailed=detailed, filters=filters, sort_key=sort_key,
sort_dir=sort_dir)
def share_group_create(context, values):
"""Create a share group from the values dictionary."""
return IMPL.share_group_create(context, values)
def share_group_get_all_by_share_server(context, share_server_id,
filters=None, sort_key=None,
sort_dir=None):
"""Get all share groups associated with a share server."""
return IMPL.share_group_get_all_by_share_server(
context, share_server_id, filters=filters, sort_key=sort_key,
sort_dir=sort_dir)
def share_group_get_all_by_project(context, project_id, detailed=True,
filters=None, sort_key=None,
sort_dir=None):
"""Get all share groups belonging to a project."""
return IMPL.share_group_get_all_by_project(
context, project_id, detailed=detailed, filters=filters,
sort_key=sort_key, sort_dir=sort_dir)
def share_group_update(context, share_group_id, values):
"""Set the given properties on a share group and update it.
Raises NotFound if share group does not exist.
"""
return IMPL.share_group_update(context, share_group_id, values)
def share_group_destroy(context, share_group_id):
"""Destroy the share group or raise if it does not exist."""
return IMPL.share_group_destroy(context, share_group_id)
def count_shares_in_share_group(context, share_group_id):
"""Returns the number of undeleted shares with the specified group."""
return IMPL.count_shares_in_share_group(context, share_group_id)
def get_all_shares_by_share_group(context, share_group_id):
return IMPL.get_all_shares_by_share_group(context, share_group_id)
def count_share_group_snapshots_in_share_group(context, share_group_id):
"""Returns the number of sg snapshots with the specified share group."""
return IMPL.count_share_group_snapshots_in_share_group(
context, share_group_id)
def count_share_groups_in_share_network(context, share_network_id,
session=None):
"""Return the number of groups with the specified share network."""
return IMPL.count_share_groups_in_share_network(context, share_network_id)
def count_share_group_snapshot_members_in_share(context, share_id,
session=None):
"""Returns the number of group snapshot members linked to the share."""
return IMPL.count_share_group_snapshot_members_in_share(context, share_id)
def share_group_snapshot_get(context, share_group_snapshot_id):
"""Get a share group snapshot."""
return IMPL.share_group_snapshot_get(context, share_group_snapshot_id)
def share_group_snapshot_get_all(context, detailed=True, filters=None,
sort_key=None, sort_dir=None):
"""Get all share group snapshots."""
return IMPL.share_group_snapshot_get_all(
context, detailed=detailed, filters=filters, sort_key=sort_key,
sort_dir=sort_dir)
def share_group_snapshot_get_all_by_project(context, project_id, detailed=True,
filters=None, sort_key=None,
sort_dir=None):
"""Get all share group snapshots belonging to a project."""
return IMPL.share_group_snapshot_get_all_by_project(
context, project_id, detailed=detailed, filters=filters,
sort_key=sort_key, sort_dir=sort_dir)
def share_group_snapshot_create(context, values):
"""Create a share group snapshot from the values dictionary."""
return IMPL.share_group_snapshot_create(context, values)
def share_group_snapshot_update(context, share_group_snapshot_id, values):
"""Set the given properties on a share group snapshot and update it.
Raises NotFound if share group snapshot does not exist.
"""
return IMPL.share_group_snapshot_update(
context, share_group_snapshot_id, values)
def share_group_snapshot_destroy(context, share_group_snapshot_id):
"""Destroy the share_group_snapshot or raise if it does not exist."""
return IMPL.share_group_snapshot_destroy(context, share_group_snapshot_id)
def share_group_snapshot_members_get_all(context, share_group_snapshot_id):
"""Return the members of a share group snapshot."""
return IMPL.share_group_snapshot_members_get_all(
context, share_group_snapshot_id)
def share_group_snapshot_member_create(context, values):
"""Create a share group snapshot member from the values dictionary."""
return IMPL.share_group_snapshot_member_create(context, values)
def share_group_snapshot_member_update(context, member_id, values):
"""Set the given properties on a share group snapshot member and update it.
Raises NotFound if share_group_snapshot member does not exist.
"""
return IMPL.share_group_snapshot_member_update(context, member_id, values)
####################
def share_replicas_get_all(context, with_share_server=False,
with_share_data=False):
"""Returns all share replicas regardless of share."""
return IMPL.share_replicas_get_all(
context, with_share_server=with_share_server,
with_share_data=with_share_data)
def share_replicas_get_all_by_share(context, share_id, with_share_server=False,
with_share_data=False):
"""Returns all share replicas for a given share."""
return IMPL.share_replicas_get_all_by_share(
context, share_id, with_share_server=with_share_server,
with_share_data=with_share_data)
def share_replicas_get_available_active_replica(context, share_id,
with_share_server=False,
with_share_data=False):
"""Returns an active replica for a given share."""
return IMPL.share_replicas_get_available_active_replica(
context, share_id, with_share_server=with_share_server,
with_share_data=with_share_data)
def share_replica_get(context, replica_id, with_share_server=False,
with_share_data=False):
"""Get share replica by id."""
return IMPL.share_replica_get(
context, replica_id, with_share_server=with_share_server,
with_share_data=with_share_data)
def share_replica_update(context, share_replica_id, values,
with_share_data=False):
"""Updates a share replica with given values."""
return IMPL.share_replica_update(context, share_replica_id, values,
with_share_data=with_share_data)
def share_replica_delete(context, share_replica_id):
"""Deletes a share replica."""
return IMPL.share_replica_delete(context, share_replica_id)
def purge_deleted_records(context, age_in_days):
"""Purge deleted rows older than given age from all tables
:raises: InvalidParameterValue if age_in_days is incorrect.
"""
return IMPL.purge_deleted_records(context, age_in_days=age_in_days)
####################
def share_group_type_create(context, values, projects=None):
"""Create a new share group type."""
return IMPL.share_group_type_create(context, values, projects)
def share_group_type_get_all(context, inactive=False, filters=None):
"""Get all share group types.
:param context: context to query under
:param inactive: Include inactive share group types to the result set
:param filters: Filters for the query in the form of key/value.
:is_public: Filter share group types based on visibility:
* **True**: List public group types only
* **False**: List private group types only
* **None**: List both public and private group types
:returns: list of matching share group types
"""
return IMPL.share_group_type_get_all(context, inactive, filters)
def share_group_type_get(context, type_id, inactive=False,
expected_fields=None):
"""Get share_group type by id.
:param context: context to query under
:param type_id: group type id to get.
:param inactive: Consider inactive group types when searching
:param expected_fields: Return those additional fields.
Supported fields are: projects.
:returns: share group type
"""
return IMPL.share_group_type_get(
context, type_id, inactive, expected_fields)
def share_group_type_get_by_name(context, name):
"""Get share group type by name."""
return IMPL.share_group_type_get_by_name(context, name)
def share_group_type_access_get_all(context, type_id):
"""Get all share group type access of a share group type."""
return IMPL.share_group_type_access_get_all(context, type_id)
def share_group_type_access_add(context, type_id, project_id):
"""Add share group type access for project."""
return IMPL.share_group_type_access_add(context, type_id, project_id)
def share_group_type_access_remove(context, type_id, project_id):
"""Remove share group type access for project."""
return IMPL.share_group_type_access_remove(context, type_id, project_id)
def share_group_type_destroy(context, type_id):
"""Delete a share group type."""
return IMPL.share_group_type_destroy(context, type_id)
def share_group_type_specs_get(context, type_id):
"""Get all group specs for a share group type."""
return IMPL.share_group_type_specs_get(context, type_id)
def share_group_type_specs_delete(context, type_id, key):
"""Delete the given group specs item."""
return IMPL.share_group_type_specs_delete(context, type_id, key)
def share_group_type_specs_update_or_create(context, type_id, group_specs):
"""Create or update share group type specs.
This adds or modifies the key/value pairs specified in the group
specs dict argument.
"""
return IMPL.share_group_type_specs_update_or_create(
context, type_id, group_specs)
|
|
"""
sentry.tagstore.legacy.backend
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2017 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import six
from collections import defaultdict
from datetime import timedelta
from django.db import connections, router, IntegrityError, transaction
from django.db.models import Q, Sum
from django.utils import timezone
from operator import or_
from six.moves import reduce
from sentry import buffer
from sentry.tagstore import TagKeyStatus
from sentry.tagstore.base import TagStorage
from sentry.utils import db
from .models import EventTag, GroupTagKey, GroupTagValue, TagKey, TagValue
class LegacyTagStorage(TagStorage):
"""\
The legacy tagstore backend ignores the ``environment_id`` (because it doesn't store this information
in its models) and stores ``times_seen`` and ``values_seen`` in Postgres.
"""
def setup(self):
self.setup_deletions(
tagvalue_model=TagValue,
grouptagkey_model=GroupTagKey,
grouptagvalue_model=GroupTagValue,
eventtag_model=EventTag,
)
self.setup_cleanup(
tagvalue_model=TagValue,
grouptagvalue_model=GroupTagValue,
eventtag_model=EventTag,
)
self.setup_merge(
grouptagkey_model=GroupTagKey,
grouptagvalue_model=GroupTagValue,
)
self.setup_receivers(
tagvalue_model=TagValue,
grouptagvalue_model=GroupTagValue,
)
def setup_deletions(self, **kwargs):
super(LegacyTagStorage, self).setup_deletions(**kwargs)
from sentry.deletions import default_manager as deletion_manager
from sentry.deletions.base import ModelRelation, ModelDeletionTask
from sentry.models import Project
class TagKeyDeletionTask(ModelDeletionTask):
def get_child_relations(self, instance):
# in bulk
model_list = (GroupTagValue, GroupTagKey, TagValue)
relations = [
ModelRelation(m, {
'project_id': instance.project_id,
'key': instance.key,
}) for m in model_list
]
return relations
def mark_deletion_in_progress(self, instance_list):
for instance in instance_list:
if instance.status != TagKeyStatus.DELETION_IN_PROGRESS:
instance.update(status=TagKeyStatus.DELETION_IN_PROGRESS)
deletion_manager.register(TagKey, TagKeyDeletionTask)
deletion_manager.add_dependencies(Project, [
lambda instance: ModelRelation(TagKey, {'project_id': instance.id}),
lambda instance: ModelRelation(TagValue, {'project_id': instance.id}),
lambda instance: ModelRelation(GroupTagKey, {'project_id': instance.id}),
lambda instance: ModelRelation(GroupTagValue, {'project_id': instance.id}),
])
def setup_receivers(self, **kwargs):
super(LegacyTagStorage, self).setup_receivers(**kwargs)
from sentry.signals import buffer_incr_complete
# Legacy tag write flow:
#
# event_manager calls index_event_tags:
# for tag in event:
# get_or_create_tag_key
# get_or_create_tag_value
# create_event_tags
#
# event_manager calls Group.objects.add_tags:
# for tag in event:
# incr_tag_value_times_seen:
# (async) buffer.incr(TagValue):
# create_or_update(TagValue)
# buffer_incr_complete.send_robust(TagValue):
# record_project_tag_count(TagValue)
# if created(TagValue):
# incr_tag_key_values_seen:
# (async) buffer.incr(TagKey):
# create_or_update(TagKey)
# incr_group_tag_value_times_seen:
# (async) buffer.incr(GroupTagValue):
# create_or_update(GroupTagValue)
# buffer_incr_complete.send_robust(GroupTagValue)
# record_project_tag_count(GroupTagValue)
# if created(GroupTagValue):
# incr_group_tag_key_values_seen:
# (async) buffer.incr(GroupTagKey):
# create_or_update(GroupTagKey)
@buffer_incr_complete.connect(sender=TagValue, weak=False)
def record_project_tag_count(filters, created, **kwargs):
if not created:
return
project_id = filters['project_id']
key = filters['key']
buffer.incr(TagKey,
columns={
'values_seen': 1,
},
filters={
'project_id': project_id,
'key': key,
})
@buffer_incr_complete.connect(sender=GroupTagValue, weak=False)
def record_group_tag_count(filters, created, extra, **kwargs):
if not created:
return
project_id = extra['project_id']
group_id = filters['group_id']
key = filters['key']
buffer.incr(GroupTagKey,
columns={
'values_seen': 1,
},
filters={
'project_id': project_id,
'group_id': group_id,
'key': key,
})
def create_tag_key(self, project_id, environment_id, key, **kwargs):
return TagKey.objects.create(project_id=project_id, key=key, **kwargs)
def get_or_create_tag_key(self, project_id, environment_id, key, **kwargs):
return TagKey.objects.get_or_create(project_id=project_id, key=key, **kwargs)
def create_tag_value(self, project_id, environment_id, key, value, **kwargs):
return TagValue.objects.create(project_id=project_id, key=key, value=value, **kwargs)
def get_or_create_tag_value(self, project_id, environment_id,
key, value, key_id=None, **kwargs):
return TagValue.objects.get_or_create(
project_id=project_id, key=key, value=value, **kwargs)
def create_group_tag_key(self, project_id, group_id, environment_id, key, **kwargs):
return GroupTagKey.objects.create(project_id=project_id, group_id=group_id,
key=key, **kwargs)
def get_or_create_group_tag_key(self, project_id, group_id, environment_id, key, **kwargs):
return GroupTagKey.objects.get_or_create(project_id=project_id, group_id=group_id,
key=key, **kwargs)
def create_group_tag_value(self, project_id, group_id, environment_id, key, value, **kwargs):
return GroupTagValue.objects.create(
project_id=project_id, group_id=group_id, key=key, value=value, **kwargs)
def get_or_create_group_tag_value(self, project_id, group_id,
environment_id, key, value, **kwargs):
return GroupTagValue.objects.get_or_create(
project_id=project_id, group_id=group_id, key=key, value=value, **kwargs)
def create_event_tags(self, project_id, group_id, environment_id, event_id, tags):
tag_ids = []
for key, value in tags:
tagkey, _ = self.get_or_create_tag_key(project_id, environment_id, key)
tagvalue, _ = self.get_or_create_tag_value(
project_id, environment_id, key, value)
tag_ids.append((tagkey.id, tagvalue.id))
try:
# don't let a duplicate break the outer transaction
with transaction.atomic():
# Tags are bulk inserted because this is an all-or-nothing situation.
# Either the whole transaction works, or it doesn't. There's no value
# in a partial success where we'd need to replay half of the rows.
EventTag.objects.bulk_create([
EventTag(
project_id=project_id,
group_id=group_id,
event_id=event_id,
key_id=key_id,
value_id=value_id,
)
for key_id, value_id in tag_ids
])
except IntegrityError:
pass
def get_tag_key(self, project_id, environment_id, key, status=TagKeyStatus.VISIBLE):
from sentry.tagstore.exceptions import TagKeyNotFound
qs = TagKey.objects.filter(
project_id=project_id,
key=key,
)
if status is not None:
qs = qs.filter(status=status)
try:
return qs.get()
except TagKey.DoesNotExist:
raise TagKeyNotFound
def get_tag_keys(self, project_id, environment_id, status=TagKeyStatus.VISIBLE):
qs = TagKey.objects.filter(project_id=project_id)
if status is not None:
qs = qs.filter(status=status)
return list(qs)
def get_tag_value(self, project_id, environment_id, key, value):
from sentry.tagstore.exceptions import TagValueNotFound
try:
return TagValue.objects.get(
project_id=project_id,
key=key,
value=value
)
except TagValue.DoesNotExist:
raise TagValueNotFound
def get_tag_values(self, project_id, environment_id, key):
qs = TagValue.objects.filter(
project_id=project_id,
key=key,
)
return list(qs)
def get_group_tag_key(self, project_id, group_id, environment_id, key):
from sentry.tagstore.exceptions import GroupTagKeyNotFound
try:
return GroupTagKey.objects.get(
group_id=group_id,
key=key,
)
except GroupTagKey.DoesNotExist:
raise GroupTagKeyNotFound
def get_group_tag_keys(self, project_id, group_id, environment_id, limit=None):
qs = GroupTagKey.objects.filter(group_id=group_id)
if limit is not None:
qs = qs[:limit]
return list(qs)
def get_group_tag_value(self, project_id, group_id, environment_id, key, value):
from sentry.tagstore.exceptions import GroupTagValueNotFound
try:
return GroupTagValue.objects.get(
group_id=group_id,
key=key,
value=value,
)
except GroupTagValue.DoesNotExist:
raise GroupTagValueNotFound
def get_group_tag_values(self, project_id, group_id, environment_id, key):
qs = GroupTagValue.objects.filter(
group_id=group_id,
key=key,
)
return list(qs)
def delete_tag_key(self, project_id, key):
from sentry.tagstore.tasks import delete_tag_key as delete_tag_key_task
tagkeys_qs = TagKey.objects.filter(
project_id=project_id,
key=key,
)
deleted = []
for tagkey in tagkeys_qs:
updated = TagKey.objects.filter(
id=tagkey.id,
status=TagKeyStatus.VISIBLE,
).update(status=TagKeyStatus.PENDING_DELETION)
if updated:
delete_tag_key_task.delay(object_id=tagkey.id, model=TagKey)
deleted.append(tagkey)
return deleted
def delete_all_group_tag_keys(self, project_id, group_id):
GroupTagKey.objects.filter(
group_id=group_id,
).delete()
def delete_all_group_tag_values(self, project_id, group_id):
GroupTagValue.objects.filter(
group_id=group_id,
).delete()
def incr_tag_value_times_seen(self, project_id, environment_id,
key, value, extra=None, count=1):
buffer.incr(TagValue,
columns={
'times_seen': count,
},
filters={
'project_id': project_id,
'key': key,
'value': value,
},
extra=extra)
def incr_group_tag_value_times_seen(self, project_id, group_id, environment_id,
key, value, extra=None, count=1):
buffer.incr(GroupTagValue,
columns={
'times_seen': count,
},
filters={
'group_id': group_id,
'key': key,
'value': value,
},
extra=extra)
def get_group_event_ids(self, project_id, group_id, environment_id, tags):
tagkeys = dict(
TagKey.objects.filter(
project_id=project_id,
key__in=tags.keys(),
status=TagKeyStatus.VISIBLE,
).values_list('key', 'id')
)
tagvalues = {
(t[1], t[2]): t[0]
for t in TagValue.objects.filter(
reduce(or_, (Q(key=k, value=v)
for k, v in six.iteritems(tags))),
project_id=project_id,
).values_list('id', 'key', 'value')
}
try:
tag_lookups = [(tagkeys[k], tagvalues[(k, v)])
for k, v in six.iteritems(tags)]
# [(1, 10), ...]
except KeyError:
# one or more tags were invalid, thus the result should be an empty
# set
return []
# Django doesnt support union, so we limit results and try to find
# reasonable matches
# get initial matches to start the filter
k, v = tag_lookups.pop()
matches = list(
EventTag.objects.filter(
key_id=k,
value_id=v,
group_id=group_id,
).values_list('event_id', flat=True)[:1000]
)
# for each remaining tag, find matches contained in our
# existing set, pruning it down each iteration
for k, v in tag_lookups:
matches = list(
EventTag.objects.filter(
key_id=k,
value_id=v,
event_id__in=matches,
group_id=group_id,
).values_list('event_id', flat=True)[:1000]
)
if not matches:
return []
return matches
def get_groups_user_counts(self, project_id, group_ids, environment_id):
qs = GroupTagKey.objects.filter(
project_id=project_id,
group_id__in=group_ids,
key='sentry:user'
)
return defaultdict(int, qs.values_list('group_id', 'values_seen'))
def get_group_tag_value_count(self, project_id, group_id, environment_id, key):
if db.is_postgres():
# This doesnt guarantee percentage is accurate, but it does ensure
# that the query has a maximum cost
using = router.db_for_read(GroupTagValue)
cursor = connections[using].cursor()
cursor.execute(
"""
SELECT SUM(t)
FROM (
SELECT times_seen as t
FROM sentry_messagefiltervalue
WHERE group_id = %s
AND key = %s
ORDER BY last_seen DESC
LIMIT 10000
) as a
""", [group_id, key]
)
return cursor.fetchone()[0] or 0
cutoff = timezone.now() - timedelta(days=7)
return GroupTagValue.objects.filter(
group_id=group_id,
key=key,
last_seen__gte=cutoff,
).aggregate(t=Sum('times_seen'))['t']
def get_top_group_tag_values(self, project_id, group_id, environment_id, key, limit=3):
if db.is_postgres():
# This doesnt guarantee percentage is accurate, but it does ensure
# that the query has a maximum cost
return list(
GroupTagValue.objects.raw(
"""
SELECT *
FROM (
SELECT *
FROM sentry_messagefiltervalue
WHERE group_id = %%s
AND key = %%s
ORDER BY last_seen DESC
LIMIT 10000
) as a
ORDER BY times_seen DESC
LIMIT %d
""" % limit, [group_id, key]
)
)
cutoff = timezone.now() - timedelta(days=7)
return list(
GroupTagValue.objects.filter(
group_id=group_id,
key=key,
last_seen__gte=cutoff,
).order_by('-times_seen')[:limit]
)
def get_first_release(self, project_id, group_id):
try:
first_release = GroupTagValue.objects.filter(
project_id=project_id,
group_id=group_id,
key__in=('sentry:release', 'release'),
).order_by('first_seen')[0]
except IndexError:
return None
else:
return first_release.value
def get_last_release(self, project_id, group_id):
try:
last_release = GroupTagValue.objects.filter(
project_id=project_id,
group_id=group_id,
key__in=('sentry:release', 'release'),
).order_by('-last_seen')[0]
except IndexError:
return None
return last_release.value
def get_release_tags(self, project_ids, environment_id, versions):
return list(TagValue.objects.filter(
project_id__in=project_ids,
key='sentry:release',
value__in=versions,
))
def get_group_ids_for_users(self, project_ids, event_users, limit=100):
return list(GroupTagValue.objects.filter(
key='sentry:user',
value__in=[eu.tag_value for eu in event_users],
project_id__in=project_ids,
).order_by('-last_seen').values_list('group_id', flat=True)[:limit])
def get_group_tag_values_for_users(self, event_users, limit=100):
tag_filters = [Q(value=eu.tag_value, project_id=eu.project_id) for eu in event_users]
return list(GroupTagValue.objects.filter(
reduce(or_, tag_filters),
key='sentry:user',
).order_by('-last_seen')[:limit])
def get_group_ids_for_search_filter(self, project_id, environment_id, tags):
from sentry.search.base import ANY, EMPTY
# Django doesnt support union, so we limit results and try to find
# reasonable matches
# ANY matches should come last since they're the least specific and
# will provide the largest range of matches
tag_lookups = sorted(six.iteritems(tags), key=lambda x: x != ANY)
# get initial matches to start the filter
matches = None
# for each remaining tag, find matches contained in our
# existing set, pruning it down each iteration
for k, v in tag_lookups:
if v is EMPTY:
return None
elif v != ANY:
base_qs = GroupTagValue.objects.filter(
key=k,
value=v,
project_id=project_id,
)
else:
base_qs = GroupTagValue.objects.filter(
key=k,
project_id=project_id,
).distinct()
if matches:
base_qs = base_qs.filter(group_id__in=matches)
else:
# restrict matches to only the most recently seen issues
base_qs = base_qs.order_by('-last_seen')
matches = list(base_qs.values_list('group_id', flat=True)[:1000])
if not matches:
return None
return matches
def update_group_tag_key_values_seen(self, project_id, group_ids):
gtk_qs = GroupTagKey.objects.filter(
project_id=project_id,
group_id__in=group_ids
)
for instance in gtk_qs:
instance.update(
values_seen=GroupTagValue.objects.filter(
project_id=instance.project_id,
group_id=instance.group_id,
key=instance.key,
).count(),
)
def get_tag_value_qs(self, project_id, environment_id, key, query=None):
queryset = TagValue.objects.filter(
project_id=project_id,
key=key,
)
if query:
queryset = queryset.filter(value__contains=query)
return queryset
def get_group_tag_value_qs(self, project_id, group_id, environment_id, key):
return GroupTagValue.objects.filter(
group_id=group_id,
key=key,
)
def update_group_for_events(self, project_id, event_ids, destination_id):
return EventTag.objects.filter(
project_id=project_id,
event_id__in=event_ids,
).update(group_id=destination_id)
|
|
'''
brozzler/ydl.py - youtube-dl / yt-dlp support for brozzler
Copyright (C) 2022 Internet Archive
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
import yt_dlp as youtube_dl
import brozzler
import urllib.request
import tempfile
import urlcanon
import os
import json
import doublethink
import datetime
import threading
thread_local = threading.local()
_orig_webpage_read_content = youtube_dl.extractor.GenericIE._webpage_read_content
def _webpage_read_content(self, *args, **kwargs):
content = _orig_webpage_read_content(self, *args, **kwargs)
if len(content) > 20000000:
logging.warning(
'bypassing yt-dlp extraction because content is '
'too large (%s characters)', len(content))
return ''
return content
youtube_dl.extractor.GenericIE._webpage_read_content = _webpage_read_content
class ExtraHeaderAdder(urllib.request.BaseHandler):
def __init__(self, extra_headers):
self.extra_headers = extra_headers
self.http_request = self._http_request
self.https_request = self._http_request
def _http_request(self, req):
for h, v in self.extra_headers.items():
if h.capitalize() not in req.headers:
req.add_header(h, v)
return req
class YoutubeDLSpy(urllib.request.BaseHandler):
logger = logging.getLogger(__module__ + "." + __qualname__)
def __init__(self):
self.reset()
def _http_response(self, request, response):
fetch = {
'url': request.full_url,
'method': request.get_method(),
'response_code': response.code,
'response_headers': response.headers,
}
self.fetches.append(fetch)
return response
http_response = https_response = _http_response
def reset(self):
self.fetches = []
def final_bounces(fetches, url):
"""
Resolves redirect chains in `fetches` and returns a list of fetches
representing the final redirect destinations of the given url. There could
be more than one if for example youtube-dl hit the same url with HEAD and
then GET requests.
"""
redirects = {}
for fetch in fetches:
# XXX check http status 301,302,303,307? check for "uri" header
# as well as "location"? see urllib.request.HTTPRedirectHandler
if 'location' in fetch['response_headers']:
redirects[fetch['url']] = fetch
final_url = url
while final_url in redirects:
fetch = redirects.pop(final_url)
final_url = urllib.parse.urljoin(
fetch['url'], fetch['response_headers']['location'])
final_bounces = []
for fetch in fetches:
if fetch['url'] == final_url:
final_bounces.append(fetch)
return final_bounces
def _build_youtube_dl(worker, destdir, site):
'''
Builds a yt-dlp `youtube_dl.YoutubeDL` for brozzling `site` with `worker`.
The `YoutubeDL` instance does a few special brozzler-specific things:
- keeps track of urls fetched using a `YoutubeDLSpy`
- periodically updates `site.last_claimed` in rethinkdb
- if brozzling through warcprox and downloading segmented videos (e.g.
HLS), pushes the stitched-up video created by yt-dlp/ffmpeg to warcprox
using a WARCPROX_WRITE_RECORD request
- some logging
Args:
worker (brozzler.BrozzlerWorker): the calling brozzler worker
destdir (str): where to save downloaded videos
site (brozzler.Site): the site we are brozzling
Returns:
a yt-dlp `youtube_dl.YoutubeDL` instance
'''
class _YoutubeDL(youtube_dl.YoutubeDL):
logger = logging.getLogger(__module__ + "." + __qualname__)
def urlopen(self, req):
try:
url = req.full_url
except AttributeError:
url = req
self.logger.debug('fetching %r', url)
return super().urlopen(req)
def add_default_extra_info(self, ie_result, ie, url):
# hook in some logging
super().add_default_extra_info(ie_result, ie, url)
if ie_result.get('_type') == 'playlist':
self.logger.info(
'extractor %r found playlist in %s', ie.IE_NAME, url)
if ie.IE_NAME in {'youtube:playlist', 'youtube:tab', 'soundcloud:user', 'instagram:user'}:
# At this point ie_result['entries'] is an iterator that
# will fetch more metadata from youtube to list all the
# videos. We unroll that iterator here partly because
# otherwise `process_ie_result()` will clobber it, and we
# use it later to extract the watch pages as outlinks.
try:
ie_result['entries_no_dl'] = list(ie_result['entries'])
except Exception as e:
self.logger.warning(
"failed to unroll ie_result['entries']? for %s, %s; exception %s",
ie.IE_NAME, url, e)
ie_result['entries_no_dl'] =[]
ie_result['entries'] = []
self.logger.info(
'not downloading %s media files from this '
'playlist because we expect to capture them from '
'individual watch/track/detail pages',
len(ie_result['entries_no_dl']))
else:
self.logger.info(
'extractor %r found a download in %s', ie.IE_NAME, url)
def _push_stitched_up_vid_to_warcprox(self, site, info_dict):
# 220211 update: does yt-dlp supply content-type?
# XXX Don't know how to get the right content-type. Youtube-dl
# doesn't supply it. Sometimes (with --hls-prefer-native)
# youtube-dl produces a stitched-up video that /usr/bin/file fails
# to identify (says "application/octet-stream"). `ffprobe` doesn't
# give us a mimetype.
if info_dict.get('ext') == 'mp4':
mimetype = 'video/mp4'
else:
try:
import magic
mimetype = magic.from_file(info_dict['filepath'], mime=True)
except ImportError as e:
mimetype = 'video/%s' % info_dict['ext']
self.logger.warning(
'guessing mimetype %s because %r', mimetype, e)
url = 'youtube-dl:%05d:%s' % (
info_dict.get('playlist_index') or 1,
info_dict['webpage_url'])
size = os.path.getsize(info_dict['filepath'])
self.logger.info(
'pushing %r video stitched-up as %s (%s bytes) to '
'warcprox at %s with url %s', info_dict['format'],
mimetype, size, worker._proxy_for(site), url)
with open(info_dict['filepath'], 'rb') as f:
# include content-length header to avoid chunked
# transfer, which warcprox currently rejects
extra_headers = dict(site.extra_headers())
extra_headers['content-length'] = size
request, response = worker._warcprox_write_record(
warcprox_address=worker._proxy_for(site), url=url,
warc_type='resource', content_type=mimetype, payload=f,
extra_headers=extra_headers)
# consulted by _remember_videos()
ydl.stitch_ups.append({
'url': url,
'response_code': response.code,
'content-type': mimetype,
'content-length': size,
})
def maybe_heartbeat_site_last_claimed(*args, **kwargs):
# in case yt-dlp takes a long time, heartbeat site.last_claimed
# to prevent another brozzler-worker from claiming the site
try:
if site.rr and doublethink.utcnow() - site.last_claimed > datetime.timedelta(minutes=worker.SITE_SESSION_MINUTES):
worker.logger.debug(
'heartbeating site.last_claimed to prevent another '
'brozzler-worker claiming this site id=%r', site.id)
site.last_claimed = doublethink.utcnow()
site.save()
except:
worker.logger.debug(
'problem heartbeating site.last_claimed site id=%r',
site.id, exc_info=True)
def ydl_postprocess_hook(d):
if d['status'] == 'finished':
print('[ydl_postprocess_hook] Done postprocessing')
if worker._using_warcprox(site):
_YoutubeDL._push_stitched_up_vid_to_warcprox(_YoutubeDL, site, d['info_dict'])
ydl_opts = {
"outtmpl": "{}/ydl%(autonumber)s.out".format(destdir),
"retries": 1,
"nocheckcertificate": True,
"noplaylist": True,
"noprogress": True,
"nopart": True,
"no_color": True,
"progress_hooks": [maybe_heartbeat_site_last_claimed],
"postprocessor_hooks": [ydl_postprocess_hook],
# https://github.com/yt-dlp/yt-dlp#format-selection
# "By default, yt-dlp tries to download the best available quality..."
# https://github.com/yt-dlp/yt-dlp#sorting-formats
# "You can change the criteria for being considered the best by using -S (--format-sort)...."
# "vext: Video Extension (mp4 > webm > flv > other). If --prefer-free-formats is used, webm is preferred."
# "aext: Audio Extension (m4a > aac > mp3 > ogg > opus > webm > other)."
# "If --prefer-free-formats is used, the order changes to opus > ogg > webm > m4a > mp3 > aac."
# "ext: Equivalent to vext,aext"
"format_sort": ["ext"],
# --cache-dir local or...
"cache_dir": False,
### we do our own logging
# "logger": logging.getLogger("youtube_dl"),
"verbose": True,
"quiet": False,
}
if worker._proxy_for(site):
ydl_opts["proxy"] = "http://{}".format(worker._proxy_for(site))
ydl = _YoutubeDL(ydl_opts)
if site.extra_headers():
ydl._opener.add_handler(ExtraHeaderAdder(site.extra_headers()))
ydl.fetch_spy = YoutubeDLSpy()
ydl.stitch_ups = []
ydl._opener.add_handler(ydl.fetch_spy)
return ydl
def _remember_videos(page, fetches, stitch_ups=None):
'''
Saves info about videos captured by yt-dlp in `page.videos`.
'''
if not 'videos' in page:
page.videos = []
for fetch in fetches or []:
content_type = fetch['response_headers'].get_content_type()
if (content_type.startswith('video/')
# skip manifests of DASH segmented video -
# see https://github.com/internetarchive/brozzler/pull/70
and content_type != 'video/vnd.mpeg.dash.mpd'
and fetch['method'] == 'GET'
and fetch['response_code'] in (200, 206)):
video = {
'blame': 'youtube-dl',
'url': fetch['url'],
'response_code': fetch['response_code'],
'content-type': content_type,
}
if 'content-length' in fetch['response_headers']:
video['content-length'] = int(
fetch['response_headers']['content-length'])
if 'content-range' in fetch['response_headers']:
video['content-range'] = fetch[
'response_headers']['content-range']
logging.debug('embedded video %s', video)
page.videos.append(video)
for stitch_up in stitch_ups or []:
if stitch_up['content-type'].startswith('video/'):
video = {
'blame': 'youtube-dl',
'url': stitch_up['url'],
'response_code': stitch_up['response_code'],
'content-type': stitch_up['content-type'],
'content-length': stitch_up['content-length'],
}
logging.debug('embedded video %s', video)
page.videos.append(video)
def _try_youtube_dl(worker, ydl, site, page):
try:
logging.info("trying yt-dlp on %s", page)
with brozzler.thread_accept_exceptions():
# we do whatwg canonicalization here to avoid "<urlopen error
# no host given>" resulting in ProxyError
# needs automated test
# and yt-dlp needs sanitize_info for extract_info
ie_result = ydl.sanitize_info(ydl.extract_info(str(urlcanon.whatwg(page.url))))
_remember_videos(page, ydl.fetch_spy.fetches, ydl.stitch_ups)
if worker._using_warcprox(site):
info_json = json.dumps(ie_result, sort_keys=True, indent=4)
logging.info(
"sending WARCPROX_WRITE_RECORD request to warcprox "
"with yt-dlp json for %s", page)
worker._warcprox_write_record(
warcprox_address=worker._proxy_for(site),
url="youtube-dl:%s" % str(urlcanon.semantic(page.url)),
warc_type="metadata",
content_type="application/vnd.youtube-dl_formats+json;charset=utf-8",
payload=info_json.encode("utf-8"),
extra_headers=site.extra_headers())
return ie_result
except brozzler.ShutdownRequested as e:
raise
except Exception as e:
if hasattr(e, "exc_info") and e.exc_info[0] == youtube_dl.utils.UnsupportedError:
return None
elif (hasattr(e, "exc_info")
and e.exc_info[0] == urllib.error.HTTPError
and hasattr(e.exc_info[1], "code")
and e.exc_info[1].code == 420):
raise brozzler.ReachedLimit(e.exc_info[1])
elif (hasattr(e, 'exc_info')
and e.exc_info[0] == urllib.error.URLError
and worker._proxy_for(site)):
# connection problem when using a proxy == proxy error (XXX?)
raise brozzler.ProxyError(
'yt-dlp hit apparent proxy error from '
'%s' % page.url) from e
else:
raise
def do_youtube_dl(worker, site, page):
'''
Runs yt-dlp configured for `worker` and `site` to download videos from
`page`.
Args:
worker (brozzler.BrozzlerWorker): the calling brozzler worker
site (brozzler.Site): the site we are brozzling
page (brozzler.Page): the page we are brozzling
Returns:
tuple with two entries:
`list` of `dict`: with info about urls fetched:
[{
'url': ...,
'method': ...,
'response_code': ...,
'response_headers': ...,
}, ...]
`list` of `str`: outlink urls
'''
with tempfile.TemporaryDirectory(prefix='brzl-ydl-') as tempdir:
ydl = _build_youtube_dl(worker, tempdir, site)
ie_result = _try_youtube_dl(worker, ydl, site, page)
outlinks = set()
if ie_result and ie_result.get('extractor') == 'youtube:playlist':
# youtube watch pages as outlinks
outlinks = {'https://www.youtube.com/watch?v=%s' % e['id']
for e in ie_result.get('entries_no_dl', [])}
# any outlinks for other cases?
return ydl.fetch_spy.fetches, outlinks
|
|
# $Id$
"""Internet Protocol, version 6."""
import dpkt
class IP6(dpkt.Packet):
__hdr__ = (
('v_fc_flow', 'I', 0x60000000L),
('plen', 'H', 0), # payload length (not including header)
('nxt', 'B', 0), # next header protocol
('hlim', 'B', 0), # hop limit
('src', '16s', ''),
('dst', '16s', '')
)
# XXX - to be shared with IP. We cannot refer to the ip module
# right now because ip.__load_protos() expects the IP6 class to be
# defined.
_protosw = None
def _get_v(self):
return self.v_fc_flow >> 28
def _set_v(self, v):
self.v_fc_flow = (self.v_fc_flow & ~0xf0000000L) | (v << 28)
v = property(_get_v, _set_v)
def _get_fc(self):
return (self.v_fc_flow >> 20) & 0xff
def _set_fc(self, v):
self.v_fc_flow = (self.v_fc_flow & ~0xff00000L) | (v << 20)
fc = property(_get_fc, _set_fc)
def _get_flow(self):
return self.v_fc_flow & 0xfffff
def _set_flow(self, v):
self.v_fc_flow = (self.v_fc_flow & ~0xfffff) | (v & 0xfffff)
flow = property(_get_flow, _set_flow)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.extension_hdrs = dict(((i, None) for i in ext_hdrs))
if self.plen:
buf = self.data[:self.plen]
else: # due to jumbo payload or TSO
buf = self.data
next = self.nxt
while (next in ext_hdrs):
ext = ext_hdrs_cls[next](buf)
self.extension_hdrs[next] = ext
buf = buf[ext.length:]
next = ext.nxt
# set the payload protocol id
setattr(self, 'p', next)
try:
self.data = self._protosw[next](buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
def headers_str(self):
"""
Output extension headers in order defined in RFC1883 (except dest opts)
"""
header_str = ""
for hdr in ext_hdrs:
if not self.extension_hdrs[hdr] is None:
header_str += str(self.extension_hdrs[hdr])
return header_str
def __str__(self):
if (self.nxt == 6 or self.nxt == 17 or self.nxt == 58) and \
not self.data.sum:
# XXX - set TCP, UDP, and ICMPv6 checksums
p = str(self.data)
s = dpkt.struct.pack('>16s16sxBH', self.src, self.dst, self.nxt, len(p))
s = dpkt.in_cksum_add(0, s)
s = dpkt.in_cksum_add(s, p)
try:
self.data.sum = dpkt.in_cksum_done(s)
except AttributeError:
pass
return self.pack_hdr() + self.headers_str() + str(self.data)
def set_proto(cls, p, pktclass):
cls._protosw[p] = pktclass
set_proto = classmethod(set_proto)
def get_proto(cls, p):
return cls._protosw[p]
get_proto = classmethod(get_proto)
import ip
# We are most likely still in the middle of ip.__load_protos() which
# implicitly loads this module through __import__(), so the content of
# ip.IP._protosw is still incomplete at the moment. By sharing the
# same dictionary by reference as opposed to making a copy, when
# ip.__load_protos() finishes, we will also automatically get the most
# up-to-date dictionary.
IP6._protosw = ip.IP._protosw
class IP6ExtensionHeader(dpkt.Packet):
"""
An extension header is very similar to a 'sub-packet'.
We just want to re-use all the hdr unpacking etc.
"""
pass
class IP6OptsHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0) # option data length in 8 octect units (ignoring first 8 octets) so, len 0 == 64bit header
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
setattr(self, 'length', (self.len + 1) * 8)
options = []
index = 0
while (index < self.length - 2):
opt_type = ord(self.data[index])
# PAD1 option
if opt_type == 0:
index += 1
continue;
opt_length = ord(self.data[index + 1])
if opt_type == 1: # PADN option
# PADN uses opt_length bytes in total
index += opt_length + 2
continue
options.append({'type': opt_type, 'opt_length': opt_length, 'data': self.data[index + 2:index + 2 + opt_length]})
# add the two chars and the option_length, to move to the next option
index += opt_length + 2
setattr(self, 'options', options)
class IP6HopOptsHeader(IP6OptsHeader): pass
class IP6DstOptsHeader(IP6OptsHeader): pass
class IP6RoutingHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # extension data length in 8 octect units (ignoring first 8 octets) (<= 46 for type 0)
('type', 'B', 0), # routing type (currently, only 0 is used)
('segs_left', 'B', 0), # remaining segments in route, until destination (<= 23)
('rsvd_sl_bits', 'I', 0), # reserved (1 byte), strict/loose bitmap for addresses
)
def _get_sl_bits(self):
return self.rsvd_sl_bits & 0xffffff
def _set_sl_bits(self, v):
self.rsvd_sl_bits = (self.rsvd_sl_bits & ~0xfffff) | (v & 0xfffff)
sl_bits = property(_get_sl_bits, _set_sl_bits)
def unpack(self, buf):
hdr_size = 8
addr_size = 16
dpkt.Packet.unpack(self, buf)
addresses = []
num_addresses = self.len / 2
buf = buf[hdr_size:hdr_size + num_addresses * addr_size]
for i in range(num_addresses):
addresses.append(buf[i * addr_size: i * addr_size + addr_size])
self.data = buf
setattr(self, 'addresses', addresses)
setattr(self, 'length', self.len * 8 + 8)
class IP6FragmentHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('resv', 'B', 0), # reserved, set to 0
('frag_off_resv_m', 'H', 0), # frag offset (13 bits), reserved zero (2 bits), More frags flag
('id', 'I', 0) # fragments id
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
setattr(self, 'length', self.__hdr_len__)
def _get_frag_off(self):
return self.frag_off_resv_m >> 3
def _set_frag_off(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfff8) | (v << 3)
frag_off = property(_get_frag_off, _set_frag_off)
def _get_m_flag(self):
return self.frag_off_resv_m & 1
def _set_m_flag(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfffe) | v
m_flag = property(_get_m_flag, _set_m_flag)
class IP6AHHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # length of header in 4 octet units (ignoring first 2 units)
('resv', 'H', 0), # reserved, 2 bytes of 0
('spi', 'I', 0), # SPI security parameter index
('seq', 'I', 0) # sequence no.
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
setattr(self, 'length', (self.len + 2) * 4)
setattr(self, 'auth_data', self.data[:(self.len - 1) * 4])
class IP6ESPHeader(IP6ExtensionHeader):
def unpack(self, buf):
raise NotImplementedError("ESP extension headers are not supported.")
ext_hdrs = [ip.IP_PROTO_HOPOPTS, ip.IP_PROTO_ROUTING, ip.IP_PROTO_FRAGMENT, ip.IP_PROTO_AH, ip.IP_PROTO_ESP, ip.IP_PROTO_DSTOPTS]
ext_hdrs_cls = {ip.IP_PROTO_HOPOPTS: IP6HopOptsHeader,
ip.IP_PROTO_ROUTING: IP6RoutingHeader,
ip.IP_PROTO_FRAGMENT: IP6FragmentHeader,
ip.IP_PROTO_ESP: IP6ESPHeader,
ip.IP_PROTO_AH: IP6AHHeader,
ip.IP_PROTO_DSTOPTS: IP6DstOptsHeader}
if __name__ == '__main__':
import unittest
class IP6TestCase(unittest.TestCase):
def test_IP6(self):
s = '`\x00\x00\x00\x00(\x06@\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11$\xff\xfe\x8c\x11\xde\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80r\xcd\xca\x00\x16\x04\x84F\xd5\x00\x00\x00\x00\xa0\x02\xff\xff\xf8\t\x00\x00\x02\x04\x05\xa0\x01\x03\x03\x00\x01\x01\x08\n}\x185?\x00\x00\x00\x00'
ip = IP6(s)
#print `ip`
ip.data.sum = 0
s2 = str(ip)
ip2 = IP6(s)
#print `ip2`
assert(s == s2)
def test_IP6RoutingHeader(self):
s = '`\x00\x00\x00\x00<+@ H\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca G\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02\x00\x00\x00\x00 \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00P\x00\x00\x00\x00\x00\x00\x00\x00P\x02 \x00\x91\x7f\x00\x00'
ip = IP6(s)
s2 = str(ip)
# 43 is Routing header id
assert(len(ip.extension_hdrs[43].addresses) == 2)
assert(ip.tcp)
assert(s == s2)
def test_IP6FragmentHeader(self):
s = '\x06\xee\xff\xfb\x00\x00\xff\xff'
fh = IP6FragmentHeader(s)
s2 = str(fh)
assert(fh.nxt == 6)
assert(fh.id == 65535)
assert(fh.frag_off == 8191)
assert(fh.m_flag == 1)
def test_IP6OptionsHeader(self):
s = ';\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00'
options = IP6OptsHeader(s).options
assert(len(options) == 3)
def test_IP6AHHeader(self):
s = ';\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
ah = IP6AHHeader(s)
assert(ah.length == 24)
assert(ah.auth_data == 'xxxxxxxx')
assert(ah.spi == 0x2020202)
assert(ah.seq == 0x1010101)
def test_IP6ExtensionHeaders(self):
p = '`\x00\x00\x00\x00<+@ H\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca G\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02\x00\x00\x00\x00 \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00P\x00\x00\x00\x00\x00\x00\x00\x00P\x02 \x00\x91\x7f\x00\x00'
ip = IP6(p)
o = ';\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00'
options = IP6HopOptsHeader(o)
ip.extension_hdrs[0] = options
fh = '\x06\xee\xff\xfb\x00\x00\xff\xff'
ip.extension_hdrs[44] = IP6FragmentHeader(fh)
ah = ';\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
ip.extension_hdrs[51] = IP6AHHeader(ah)
do = ';\x02\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
ip.extension_hdrs[60] = IP6DstOptsHeader(do)
assert(len([k for k in ip.extension_hdrs if (not ip.extension_hdrs[k] is None)]) == 5)
unittest.main()
|
|
# Copyright 2019 The Waymo Open Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for Frame protos."""
from typing import Dict, List, Tuple, Optional
import numpy as np
import tensorflow as tf
from waymo_open_dataset import dataset_pb2
from waymo_open_dataset.utils import range_image_utils
from waymo_open_dataset.utils import transform_utils
RangeImages = Dict['dataset_pb2.LaserName.Name', List[dataset_pb2.MatrixFloat]]
CameraProjections = Dict['dataset_pb2.LaserName.Name',
List[dataset_pb2.MatrixInt32]]
SegmentationLabels = Dict['dataset_pb2.LaserName.Name',
List[dataset_pb2.MatrixInt32]]
ParsedFrame = Tuple[RangeImages, CameraProjections, SegmentationLabels,
Optional[dataset_pb2.MatrixFloat]]
def parse_range_image_and_camera_projection(
frame: dataset_pb2.Frame) -> ParsedFrame:
"""Parse range images and camera projections given a frame.
Args:
frame: open dataset frame proto
Returns:
range_images: A dict of {laser_name,
[range_image_first_return, range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return,
camera_projection_from_second_return]}.
seg_labels: segmentation labels, a dict of {laser_name,
[seg_label_first_return, seg_label_second_return]}
range_image_top_pose: range image pixel pose for top lidar.
"""
range_images = {}
camera_projections = {}
seg_labels = {}
range_image_top_pose = None
for laser in frame.lasers:
if len(laser.ri_return1.range_image_compressed) > 0: # pylint: disable=g-explicit-length-test
range_image_str_tensor = tf.io.decode_compressed(
laser.ri_return1.range_image_compressed, 'ZLIB')
ri = dataset_pb2.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
range_images[laser.name] = [ri]
if laser.name == dataset_pb2.LaserName.TOP:
range_image_top_pose_str_tensor = tf.io.decode_compressed(
laser.ri_return1.range_image_pose_compressed, 'ZLIB')
range_image_top_pose = dataset_pb2.MatrixFloat()
range_image_top_pose.ParseFromString(
bytearray(range_image_top_pose_str_tensor.numpy()))
camera_projection_str_tensor = tf.io.decode_compressed(
laser.ri_return1.camera_projection_compressed, 'ZLIB')
cp = dataset_pb2.MatrixInt32()
cp.ParseFromString(bytearray(camera_projection_str_tensor.numpy()))
camera_projections[laser.name] = [cp]
if len(laser.ri_return1.segmentation_label_compressed) > 0: # pylint: disable=g-explicit-length-test
seg_label_str_tensor = tf.io.decode_compressed(
laser.ri_return1.segmentation_label_compressed, 'ZLIB')
seg_label = dataset_pb2.MatrixInt32()
seg_label.ParseFromString(bytearray(seg_label_str_tensor.numpy()))
seg_labels[laser.name] = [seg_label]
if len(laser.ri_return2.range_image_compressed) > 0: # pylint: disable=g-explicit-length-test
range_image_str_tensor = tf.io.decode_compressed(
laser.ri_return2.range_image_compressed, 'ZLIB')
ri = dataset_pb2.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
range_images[laser.name].append(ri)
camera_projection_str_tensor = tf.io.decode_compressed(
laser.ri_return2.camera_projection_compressed, 'ZLIB')
cp = dataset_pb2.MatrixInt32()
cp.ParseFromString(bytearray(camera_projection_str_tensor.numpy()))
camera_projections[laser.name].append(cp)
if len(laser.ri_return2.segmentation_label_compressed) > 0: # pylint: disable=g-explicit-length-test
seg_label_str_tensor = tf.io.decode_compressed(
laser.ri_return2.segmentation_label_compressed, 'ZLIB')
seg_label = dataset_pb2.MatrixInt32()
seg_label.ParseFromString(bytearray(seg_label_str_tensor.numpy()))
seg_labels[laser.name].append(seg_label)
return range_images, camera_projections, seg_labels, range_image_top_pose
def convert_range_image_to_cartesian(frame,
range_images,
range_image_top_pose,
ri_index=0,
keep_polar_features=False):
"""Convert range images from polar coordinates to Cartesian coordinates.
Args:
frame: open dataset frame
range_images: A dict of {laser_name, [range_image_first_return,
range_image_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
ri_index: 0 for the first return, 1 for the second return.
keep_polar_features: If true, keep the features from the polar range image
(i.e. range, intensity, and elongation) as the first features in the
output range image.
Returns:
dict of {laser_name, (H, W, D)} range images in Cartesian coordinates. D
will be 3 if keep_polar_features is False (x, y, z) and 6 if
keep_polar_features is True (range, intensity, elongation, x, y, z).
"""
cartesian_range_images = {}
frame_pose = tf.convert_to_tensor(
value=np.reshape(np.array(frame.pose.transform), [4, 4]))
# [H, W, 6]
range_image_top_pose_tensor = tf.reshape(
tf.convert_to_tensor(value=range_image_top_pose.data),
range_image_top_pose.shape.dims)
# [H, W, 3, 3]
range_image_top_pose_tensor_rotation = transform_utils.get_rotation_matrix(
range_image_top_pose_tensor[..., 0], range_image_top_pose_tensor[..., 1],
range_image_top_pose_tensor[..., 2])
range_image_top_pose_tensor_translation = range_image_top_pose_tensor[..., 3:]
range_image_top_pose_tensor = transform_utils.get_transform(
range_image_top_pose_tensor_rotation,
range_image_top_pose_tensor_translation)
for c in frame.context.laser_calibrations:
range_image = range_images[c.name][ri_index]
if len(c.beam_inclinations) == 0: # pylint: disable=g-explicit-length-test
beam_inclinations = range_image_utils.compute_inclination(
tf.constant([c.beam_inclination_min, c.beam_inclination_max]),
height=range_image.shape.dims[0])
else:
beam_inclinations = tf.constant(c.beam_inclinations)
beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4])
range_image_tensor = tf.reshape(
tf.convert_to_tensor(value=range_image.data), range_image.shape.dims)
pixel_pose_local = None
frame_pose_local = None
if c.name == dataset_pb2.LaserName.TOP:
pixel_pose_local = range_image_top_pose_tensor
pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0)
frame_pose_local = tf.expand_dims(frame_pose, axis=0)
range_image_cartesian = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(range_image_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(tf.convert_to_tensor(value=beam_inclinations), axis=0),
pixel_pose=pixel_pose_local,
frame_pose=frame_pose_local)
range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0)
if keep_polar_features:
# If we want to keep the polar coordinate features of range, intensity,
# and elongation, concatenate them to be the initial dimensions of the
# returned Cartesian range image.
range_image_cartesian = tf.concat(
[range_image_tensor[..., 0:3], range_image_cartesian], axis=-1)
cartesian_range_images[c.name] = range_image_cartesian
return cartesian_range_images
def convert_range_image_to_point_cloud(frame,
range_images,
camera_projections,
range_image_top_pose,
ri_index=0,
keep_polar_features=False):
"""Convert range images to point cloud.
Args:
frame: open dataset frame
range_images: A dict of {laser_name, [range_image_first_return,
range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return,
camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
ri_index: 0 for the first return, 1 for the second return.
keep_polar_features: If true, keep the features from the polar range image
(i.e. range, intensity, and elongation) as the first features in the
output range image.
Returns:
points: {[N, 3]} list of 3d lidar points of length 5 (number of lidars).
(NOTE: Will be {[N, 6]} if keep_polar_features is true.
cp_points: {[N, 6]} list of camera projections of length 5
(number of lidars).
"""
calibrations = sorted(frame.context.laser_calibrations, key=lambda c: c.name)
points = []
cp_points = []
cartesian_range_images = convert_range_image_to_cartesian(
frame, range_images, range_image_top_pose, ri_index, keep_polar_features)
for c in calibrations:
range_image = range_images[c.name][ri_index]
range_image_tensor = tf.reshape(
tf.convert_to_tensor(value=range_image.data), range_image.shape.dims)
range_image_mask = range_image_tensor[..., 0] > 0
range_image_cartesian = cartesian_range_images[c.name]
points_tensor = tf.gather_nd(range_image_cartesian,
tf.compat.v1.where(range_image_mask))
cp = camera_projections[c.name][ri_index]
cp_tensor = tf.reshape(tf.convert_to_tensor(value=cp.data), cp.shape.dims)
cp_points_tensor = tf.gather_nd(cp_tensor,
tf.compat.v1.where(range_image_mask))
points.append(points_tensor.numpy())
cp_points.append(cp_points_tensor.numpy())
return points, cp_points
def convert_frame_to_dict(frame: dataset_pb2.Frame) -> Dict[str, np.ndarray]:
"""Convert the frame proto into a dict of numpy arrays.
The keys, shapes, and data types are:
POSE: 4x4 float32 array
TIMESTAMP: int64 scalar
For each lidar:
<LIDAR_NAME>_BEAM_INCLINATION: H float32 array
<LIDAR_NAME>_LIDAR_EXTRINSIC: 4x4 float32 array
<LIDAR_NAME>_RANGE_IMAGE_FIRST_RETURN: HxWx6 float32 array
<LIDAR_NAME>_RANGE_IMAGE_SECOND_RETURN: HxWx6 float32 array
<LIDAR_NAME>_CAM_PROJ_FIRST_RETURN: HxWx6 int64 array
<LIDAR_NAME>_CAM_PROJ_SECOND_RETURN: HxWx6 float32 array
(top lidar only) TOP_RANGE_IMAGE_POSE: HxWx6 float32 array
For each camera:
<CAMERA_NAME>_IMAGE: HxWx3 uint8 array
<CAMERA_NAME>_INTRINSIC: 9 float32 array
<CAMERA_NAME>_EXTRINSIC: 4x4 float32 array
<CAMERA_NAME>_WIDTH: int64 scalar
<CAMERA_NAME>_HEIGHT: int64 scalar
<CAMERA_NAME>_SDC_VELOCITY: 6 float32 array
<CAMERA_NAME>_POSE: 4x4 float32 array
<CAMERA_NAME>_POSE_TIMESTAMP: float32 scalar
<CAMERA_NAME>_ROLLING_SHUTTER_DURATION: float32 scalar
<CAMERA_NAME>_ROLLING_SHUTTER_DIRECTION: int64 scalar
<CAMERA_NAME>_CAMERA_TRIGGER_TIME: float32 scalar
<CAMERA_NAME>_CAMERA_READOUT_DONE_TIME: float32 scalar
NOTE: This function only works in eager mode for now.
See the LaserName.Name and CameraName.Name enums in dataset.proto for the
valid lidar and camera name strings that will be present in the returned
dictionaries.
Args:
frame: open dataset frame
Returns:
Dict from string field name to numpy ndarray.
"""
range_images, camera_projection_protos, _, range_image_top_pose = (
parse_range_image_and_camera_projection(frame))
first_return_cartesian_range_images = convert_range_image_to_cartesian(
frame,
range_images,
range_image_top_pose,
ri_index=0,
keep_polar_features=True)
second_return_cartesian_range_images = convert_range_image_to_cartesian(
frame,
range_images,
range_image_top_pose,
ri_index=1,
keep_polar_features=True)
data_dict = {}
# Save the beam inclinations, extrinsic matrices, first/second return range
# images, and first/second return camera projections for each lidar.
for c in frame.context.laser_calibrations:
laser_name_str = dataset_pb2.LaserName.Name.Name(c.name)
beam_inclination_key = f'{laser_name_str}_BEAM_INCLINATION'
if len(c.beam_inclinations) == 0: # pylint: disable=g-explicit-length-test
data_dict[beam_inclination_key] = range_image_utils.compute_inclination(
tf.constant([c.beam_inclination_min, c.beam_inclination_max]),
height=range_images[c.name][0].shape.dims[0]).numpy()
else:
data_dict[beam_inclination_key] = np.array(c.beam_inclinations,
np.float32)
data_dict[f'{laser_name_str}_LIDAR_EXTRINSIC'] = np.reshape(
np.array(c.extrinsic.transform, np.float32), [4, 4])
data_dict[f'{laser_name_str}_RANGE_IMAGE_FIRST_RETURN'] = (
first_return_cartesian_range_images[c.name].numpy())
data_dict[f'{laser_name_str}_RANGE_IMAGE_SECOND_RETURN'] = (
second_return_cartesian_range_images[c.name].numpy())
first_return_cp = camera_projection_protos[c.name][0]
data_dict[f'{laser_name_str}_CAM_PROJ_FIRST_RETURN'] = np.reshape(
np.array(first_return_cp.data), first_return_cp.shape.dims)
second_return_cp = camera_projection_protos[c.name][1]
data_dict[f'{laser_name_str}_CAM_PROJ_SECOND_RETURN'] = np.reshape(
np.array(second_return_cp.data), second_return_cp.shape.dims)
# Save the H x W x 3 RGB image for each camera, extracted from JPEG.
for im in frame.images:
cam_name_str = dataset_pb2.CameraName.Name.Name(im.name)
data_dict[f'{cam_name_str}_IMAGE'] = tf.io.decode_jpeg(im.image).numpy()
data_dict[f'{cam_name_str}_SDC_VELOCITY'] = np.array([
im.velocity.v_x, im.velocity.v_y, im.velocity.v_z, im.velocity.w_x,
im.velocity.w_y, im.velocity.w_z
], np.float32)
data_dict[f'{cam_name_str}_POSE'] = np.reshape(
np.array(im.pose.transform, np.float32), (4, 4))
data_dict[f'{cam_name_str}_POSE_TIMESTAMP'] = np.array(
im.pose_timestamp, np.float32)
data_dict[f'{cam_name_str}_ROLLING_SHUTTER_DURATION'] = np.array(im.shutter)
data_dict[f'{cam_name_str}_CAMERA_TRIGGER_TIME'] = np.array(
im.camera_trigger_time)
data_dict[f'{cam_name_str}_CAMERA_READOUT_DONE_TIME'] = np.array(
im.camera_readout_done_time)
# Save the intrinsics, 4x4 extrinsic matrix, width, and height of each camera.
for c in frame.context.camera_calibrations:
cam_name_str = dataset_pb2.CameraName.Name.Name(c.name)
data_dict[f'{cam_name_str}_INTRINSIC'] = np.array(c.intrinsic, np.float32)
data_dict[f'{cam_name_str}_EXTRINSIC'] = np.reshape(
np.array(c.extrinsic.transform, np.float32), [4, 4])
data_dict[f'{cam_name_str}_WIDTH'] = np.array(c.width)
data_dict[f'{cam_name_str}_HEIGHT'] = np.array(c.height)
data_dict[f'{cam_name_str}_ROLLING_SHUTTER_DIRECTION'] = np.array(
c.rolling_shutter_direction)
# Save the range image pixel pose for the top lidar.
data_dict['TOP_RANGE_IMAGE_POSE'] = np.reshape(
np.array(range_image_top_pose.data, np.float32),
range_image_top_pose.shape.dims)
data_dict['POSE'] = np.reshape(
np.array(frame.pose.transform, np.float32), (4, 4))
data_dict['TIMESTAMP'] = np.array(frame.timestamp_micros)
return data_dict
|
|
import os
import logging
import requests
import json
from programy.utils.license.keys import LicenseKeys
class NewsArticle(object):
def __init__(self):
self.title = None
self.description = None
self.published_at = None
self.author = None
self.url = None
self.url_to_image = None
def _get_json_attribute(self, data, name, def_value=None):
if name in data:
return data[name]
else:
logging.debug("Attribute [%s] missing from New API Article data"%name)
return def_value
def parse_json(self, data):
self.title = self._get_json_attribute(data, "title")
self.description = self._get_json_attribute(data, "description")
self.published_at = self._get_json_attribute(data, "publishedAt")
self.author = self._get_json_attribute(data, "author")
self.url = self._get_json_attribute(data, "url")
self.url_to_image = self._get_json_attribute(data, "urlToImage")
def to_json(self):
data = {}
data["title"] = self.title
data["description"] = self.description
data["publishedAt"] = self.published_at
data["author"] = self.author
data["url"] = self.url
data["urlToImage"] = self.url_to_image
return data
# https://newsapi.org/bbc-news-api
class NewsAPI(object):
BASE_URL = "https://newsapi.org/v1/articles?source=%s&sortBy=%s&apiKey=%s"
# Single news feeds
ABC_NEWS_AU = "abc-news-au"
AL_JAZEERA_ENGLISH = "al-jazeera-english"
ARS_TECHNICA = "ars-technica"
ASSOCIATED_PRESS = "associated-press"
BBC_NEWS = "bbc-news"
BBC_SPORT = "bbc-sport"
BLOOMBERG = "bloomberg"
BUSINESS_INSIDER = "business-insider"
BUSINESS_INSIDER_UK = "business-insider-uk"
BUZZFEED = "buzzfeed"
CNBC = "cnbc"
CNN = "cnn"
DAILY_MAIL = "daily-mail"
ENGADGET = "engadget"
ENTERTAINMENT_WEEKLY = "entertainment-weekly"
ESPN = "espn"
ESPN_CRIC_INFO = "espn-cric-info"
FINANCIAL_TIMES = "financial-times"
FOOTBALL_ITALIA = "football-italia"
FORTUNE = "fortune"
FOUR_FOUR_TWO = "four-four-two"
FOX_SPORTS = "fox-sports"
GOOGLE_NEWS = "google-news"
HACKER_NEWS = "hacker-news"
IGN = "ign"
INDEPENDENT = "independent"
MASHABLE = "mashable"
METRO = "metro"
MIRROR = "mirror"
MTV_NEWS = "mtv-news"
MTV_NEWS_UK = "mtv-news-uk"
NATIONAL_GEOGRAPHIC = "national-geographic"
NEW_SCIENTIST = "new-scientist"
NEWSWEEK = "newsweek"
NEW_YORK_MAGAZINE = "new-york-magazine"
NFL_NEWS = "nfl-news"
POLYGON = "polygon"
RECODE = "recode"
REDDIT_R_ALL = "reddit-r-all"
REUTERS = "reuters"
TALKSPORT = "talksport"
TECHCRUNCH = "techcrunch"
TECHRADAR = "techradar"
THE_ECONOMIST = "the-economist"
THE_GUARDIAN_AU = "the-guardian-au"
THE_GUARDIAN_UK = "the-guardian-uk"
THE_HUFFINGTON_POST = "the-huffington-post"
THE_NEW_YORK_TIMES = "the-new-york-times"
THE_NEXT_WEB = "the-next-web"
THE_SPORT_BIBLE = "the-sport-bible"
THE_TELEGRAPH = "the-telegraph"
THE_VERGE = "the-verge"
THE_WALL_STREET_JOURNAL = "the-wall-street-journal"
THE_WASHINGTON_POST = "the-washington-post"
TIME = "time"
USA_TODAY = "usa-today"
# Collections
BUSINESS = "business"
ENTERTAINMENT = " entertainment"
GAMING = "gaming"
MUSIC = "music"
SCIENCE_AND_NATURE = "science_and_nature"
SPORT = "sport"
TECHNOLOGY = "technology"
UK_NEWS = "uk_news"
UK_NEWSPAPERS = "uk_newspapers"
def __init__(self, license_keys):
self.function_mapping = {
NewsAPI.ABC_NEWS_AU: NewsAPI.abc_news_au,
NewsAPI.AL_JAZEERA_ENGLISH: NewsAPI.al_jazeera_english,
NewsAPI.ARS_TECHNICA: NewsAPI.ars_technica,
NewsAPI.ASSOCIATED_PRESS: NewsAPI.associated_press,
NewsAPI.BBC_NEWS: NewsAPI.bbc_news,
NewsAPI.BBC_SPORT: NewsAPI.bbc_sport,
NewsAPI.BLOOMBERG: NewsAPI.bloomberg,
NewsAPI.BUSINESS_INSIDER: NewsAPI.business_insider,
NewsAPI.BUSINESS_INSIDER_UK: NewsAPI.business_insider_uk,
NewsAPI.BUZZFEED: NewsAPI.buzzfeed,
NewsAPI.CNBC: NewsAPI.cnbc,
NewsAPI.CNN: NewsAPI.cnn,
NewsAPI.DAILY_MAIL: NewsAPI.daily_mail,
NewsAPI.ENGADGET: NewsAPI.engadget,
NewsAPI.ENTERTAINMENT_WEEKLY: NewsAPI.entertainment_weekly,
NewsAPI.ESPN: NewsAPI.espn,
NewsAPI.ESPN_CRIC_INFO: NewsAPI.espn_cric_info,
NewsAPI.FINANCIAL_TIMES: NewsAPI.financial_times,
NewsAPI.FOOTBALL_ITALIA: NewsAPI.football_italia,
NewsAPI.FORTUNE: NewsAPI.fortune,
NewsAPI.FOUR_FOUR_TWO: NewsAPI.four_four_two,
NewsAPI.FOX_SPORTS: NewsAPI.fox_sports,
NewsAPI.GOOGLE_NEWS: NewsAPI.google_news,
NewsAPI.HACKER_NEWS: NewsAPI.hacker_news,
NewsAPI.IGN: NewsAPI.ign,
NewsAPI.INDEPENDENT: NewsAPI.independent,
NewsAPI.MASHABLE: NewsAPI.mashable,
NewsAPI.METRO: NewsAPI.metro,
NewsAPI.MIRROR: NewsAPI.mirror,
NewsAPI.MTV_NEWS: NewsAPI.mtv_news,
NewsAPI.MTV_NEWS_UK: NewsAPI.mtv_news_uk,
NewsAPI.NATIONAL_GEOGRAPHIC: NewsAPI.national_geographic,
NewsAPI.NEW_SCIENTIST: NewsAPI.new_scientist,
NewsAPI.NEWSWEEK: NewsAPI.newsweek,
NewsAPI.NEW_YORK_MAGAZINE: NewsAPI.new_york_magazine,
NewsAPI.NFL_NEWS: NewsAPI.nfl_news,
NewsAPI.POLYGON: NewsAPI.polygon,
NewsAPI.RECODE: NewsAPI.recode,
NewsAPI.REDDIT_R_ALL: NewsAPI.reddit,
NewsAPI.REUTERS: NewsAPI.reuters,
NewsAPI.TALKSPORT: NewsAPI.talksport,
NewsAPI.TECHCRUNCH: NewsAPI.techcrunch,
NewsAPI.TECHRADAR: NewsAPI.techradar,
NewsAPI.THE_ECONOMIST: NewsAPI.the_economist,
NewsAPI.THE_GUARDIAN_AU: NewsAPI.the_guardian_au,
NewsAPI.THE_GUARDIAN_UK: NewsAPI.the_guardian_uk,
NewsAPI.THE_HUFFINGTON_POST: NewsAPI.the_huffington_post,
NewsAPI.THE_NEW_YORK_TIMES: NewsAPI.the_new_york_times,
NewsAPI.THE_NEXT_WEB: NewsAPI.the_next_web,
NewsAPI.THE_SPORT_BIBLE: NewsAPI.the_sport_bible,
NewsAPI.THE_TELEGRAPH: NewsAPI.the_telegraph,
NewsAPI.THE_VERGE: NewsAPI.the_verge,
NewsAPI.THE_WALL_STREET_JOURNAL: NewsAPI.the_wall_street_journal,
NewsAPI.THE_WASHINGTON_POST: NewsAPI.the_washington_post,
NewsAPI.TIME: NewsAPI.time,
NewsAPI.USA_TODAY: NewsAPI.usa_today,
NewsAPI.BUSINESS: NewsAPI.business,
NewsAPI.ENTERTAINMENT: NewsAPI. entertainment,
NewsAPI.GAMING: NewsAPI.gaming,
NewsAPI.MUSIC: NewsAPI.music,
NewsAPI.SCIENCE_AND_NATURE: NewsAPI.science_and_nature,
NewsAPI.SPORT: NewsAPI.sport,
NewsAPI.TECHNOLOGY: NewsAPI.technology,
NewsAPI.UK_NEWS: NewsAPI.uk_news,
NewsAPI.UK_NEWSPAPERS: NewsAPI.uk_newspapers,
}
if license_keys.has_key('NEWSAPI_API_KEY'):
self.api_key = license_keys.get_key('NEWSAPI_API_KEY')
else:
raise Exception ("No valid license key METOFFICE_API_KEY found")
@staticmethod
def _format_url(service, api_key, sort_by="top"):
return NewsAPI.BASE_URL%(service, sort_by, api_key)
@staticmethod
def _get_data(url_str, api_key, max_articles, sort, reverse):
url = NewsAPI._format_url(url_str, api_key)
return NewsAPI._get_news_feed_articles(url, max_articles, sort, reverse)
@staticmethod
def _get_news_feed_articles(url, max_articles, sort, reverse):
logging.debug("News API URL: [%s]"%url)
response = requests.get(url)
articles = []
if response.status_code == 200:
header_splits = response.headers['content-type'].split(";")
if header_splits[0] == 'application/json':
json_data = response.json()
for article_data in json_data['articles']:
article = NewsArticle()
article.parse_json(article_data)
articles.append(article)
logging.debug(article.description)
if sort is True:
logging.debug("Sorting articles,, reverse=%s" % str(reverse))
articles.sort(key=lambda article: article.published_at, reverse=reverse)
if max_articles != 0:
logging.debug("Returning max_articles %d articles" % max_articles)
articles = articles[:max_articles]
else:
logging.debug("Returning all articles")
else:
logging.error("NewsAPI request none JSON object")
else:
logging.error("NewsAPI request returned error code %d"%response.status_code)
return articles
@staticmethod
def abc_news_au(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.ABC_NEWS_AU, api_key, max_articles, sort, reverse)
@staticmethod
def al_jazeera_english(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.AL_JAZEERA_ENGLISH, api_key, max_articles, sort, reverse)
@staticmethod
def ars_technica(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.ARS_TECHNICA, api_key, max_articles, sort, reverse)
@staticmethod
def associated_press(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.ASSOCIATED_PRESS, api_key, max_articles, sort, reverse)
@staticmethod
def bbc_news(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.BBC_NEWS, api_key, max_articles, sort, reverse)
@staticmethod
def bbc_sport(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.BBC_SPORT, api_key, max_articles, sort, reverse)
@staticmethod
def bloomberg(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.BLOOMBERG, api_key, max_articles, sort, reverse)
@staticmethod
def business_insider(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.BUSINESS_INSIDER, api_key, max_articles, sort, reverse)
@staticmethod
def business_insider_uk(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.BUSINESS_INSIDER_UK, api_key, max_articles, sort, reverse)
@staticmethod
def buzzfeed(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.BUZZFEED, api_key, max_articles, sort, reverse)
@staticmethod
def cnbc(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.CNBC, api_key, max_articles, sort, reverse)
@staticmethod
def cnn(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.CNN, api_key, max_articles, sort, reverse)
@staticmethod
def daily_mail(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.DAILY_MAIL, api_key, max_articles, sort, reverse)
@staticmethod
def engadget(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.ENGADGET, api_key, max_articles, sort, reverse)
@staticmethod
def entertainment_weekly(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.ENTERTAINMENT_WEEKLY, api_key, max_articles, sort, reverse)
@staticmethod
def espn(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.ESPN, api_key, max_articles, sort, reverse)
@staticmethod
def espn_cric_info(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.ESPN_CRIC_INFO, api_key, max_articles, sort, reverse)
@staticmethod
def financial_times(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.FINANCIAL_TIMES, api_key, max_articles, sort, reverse)
@staticmethod
def football_italia(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.FOOTBALL_ITALIA, api_key, max_articles, sort, reverse)
@staticmethod
def fortune(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.FORTUNE, api_key, max_articles, sort, reverse)
@staticmethod
def four_four_two(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.FOUR_FOUR_TWO, api_key, max_articles, sort, reverse)
@staticmethod
def fox_sports(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.FOX_SPORTS, api_key, max_articles, sort, reverse)
@staticmethod
def google_news(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.GOOGLE_NEWS, api_key, max_articles, sort, reverse)
@staticmethod
def hacker_news(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.HACKER_NEWS, api_key, max_articles, sort, reverse)
@staticmethod
def ign(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.IGN, api_key, max_articles, sort, reverse)
@staticmethod
def independent(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.INDEPENDENT, api_key, max_articles, sort, reverse)
@staticmethod
def mashable(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.MASHABLE, api_key, max_articles, sort, reverse)
@staticmethod
def metro(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.METRO, api_key, max_articles, sort, reverse)
@staticmethod
def mirror(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.MIRROR, api_key, max_articles, sort, reverse)
@staticmethod
def mtv_news(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.MTV_NEWS, api_key, max_articles, sort, reverse)
@staticmethod
def mtv_news_uk(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.MTV_NEWS_UK, api_key, max_articles, sort, reverse)
@staticmethod
def national_geographic(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.NATIONAL_GEOGRAPHIC, api_key, max_articles, sort, reverse)
@staticmethod
def new_scientist(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.NEW_SCIENTIST, api_key, max_articles, sort, reverse)
@staticmethod
def newsweek(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.NEWSWEEK, api_key, max_articles, sort, reverse)
@staticmethod
def new_york_magazine(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.NEW_YORK_MAGAZINE, api_key, max_articles, sort, reverse)
@staticmethod
def nfl_news(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.NFL_NEWS, api_key, max_articles, sort, reverse)
@staticmethod
def polygon(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.POLYGON, api_key, max_articles, sort, reverse)
@staticmethod
def recode(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.RECODE, api_key, max_articles, sort, reverse)
@staticmethod
def reddit(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.REDDIT_R_ALL, api_key, max_articles, sort, reverse)
@staticmethod
def reuters(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.REUTERS, api_key, max_articles, sort, reverse)
@staticmethod
def talksport(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.TALKSPORT, api_key, max_articles, sort, reverse)
@staticmethod
def techcrunch(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.TECHCRUNCH, api_key, max_articles, sort, reverse)
@staticmethod
def techradar(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.TECHRADAR, api_key, max_articles, sort, reverse)
@staticmethod
def the_economist(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_ECONOMIST, api_key, max_articles, sort, reverse)
@staticmethod
def the_guardian_au(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_GUARDIAN_AU, api_key, max_articles, sort, reverse)
@staticmethod
def the_guardian_uk(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_GUARDIAN_UK, api_key, max_articles, sort, reverse)
@staticmethod
def the_huffington_post(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_HUFFINGTON_POST, api_key, max_articles, sort, reverse)
@staticmethod
def the_new_york_times(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_NEW_YORK_TIMES, api_key, max_articles, sort, reverse)
@staticmethod
def the_next_web(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_NEXT_WEB, api_key, max_articles, sort, reverse)
@staticmethod
def the_sport_bible(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_SPORT_BIBLE, api_key, max_articles, sort, reverse)
@staticmethod
def the_telegraph(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_TELEGRAPH, api_key, max_articles, sort, reverse)
@staticmethod
def the_verge(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_VERGE, api_key, max_articles, sort, reverse)
@staticmethod
def the_wall_street_journal(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_WALL_STREET_JOURNAL, api_key, max_articles, sort, reverse)
@staticmethod
def the_washington_post(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_WASHINGTON_POST, api_key, max_articles, sort, reverse)
@staticmethod
def time(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.TIME, api_key, max_articles, sort, reverse)
@staticmethod
def usa_today(api_key, max_articles, sort, reverse):
return NewsAPI._get_data(NewsAPI.USA_TODAY, api_key, max_articles, sort, reverse)
@staticmethod
def business(api_key, max_articles, sort, reverse):
articles = []
articles.extend(NewsAPI.bloomberg(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.business_insider(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.business_insider_uk(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.cnbc(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.financial_times(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.fortune(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.the_economist(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.the_wall_street_journal(api_key, max_articles, sort, reverse))
return articles
@staticmethod
def entertainment(api_key, max_articles, sort, reverse):
articles = []
articles.extend(NewsAPI.buzzfeed(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.daily_mail(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.entertainment_weekly(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.mashable(api_key, max_articles, sort, reverse))
return articles
@staticmethod
def gaming(api_key, max_articles, sort, reverse):
articles = []
articles.extend(NewsAPI.ign(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.polygon(api_key, max_articles, sort, reverse))
return articles
@staticmethod
def music(api_key, max_articles, sort, reverse):
articles = []
articles.extend(NewsAPI.mtv_news(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.mtv_news_uk(api_key, max_articles, sort, reverse))
return articles
@staticmethod
def science_and_nature(api_key, max_articles, sort, reverse):
articles = []
articles.extend(NewsAPI.national_geographic(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.new_scientist(api_key, max_articles, sort, reverse))
return articles
@staticmethod
def sport(api_key, max_articles, sort, reverse):
articles = []
articles.extend(NewsAPI.bbc_sport(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.espn(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.espn_cric_info(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.football_italia(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.four_four_two(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.fox_sports(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.talksport(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.the_sport_bible(api_key, max_articles, sort, reverse))
return articles
@staticmethod
def technology(api_key, max_articles, sort, reverse):
articles = []
articles.extend(NewsAPI.ars_technica(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.engadget(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.hacker_news(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.recode(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.techcrunch(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.techradar(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.the_verge(api_key, max_articles, sort, reverse))
return articles
@staticmethod
def uk_news(api_key, max_articles, sort, reverse):
articles = []
articles.extend(NewsAPI.bbc_news(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.uk_newspapers(api_key, max_articles, sort, reverse))
return articles
@staticmethod
def uk_newspapers(api_key, max_articles, sort, reverse):
articles = []
articles.extend(NewsAPI.the_guardian_uk(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.mirror(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.the_telegraph(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.daily_mail(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.financial_times(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.independent(api_key, max_articles, sort, reverse))
articles.extend(NewsAPI.metro(api_key, max_articles, sort, reverse))
return articles
def get_headlines(self, source, max_articles=0, sort=False, reverse=False):
if source in self.function_mapping:
function = self.function_mapping[source]
return function(self.api_key, max_articles, sort, reverse)
else:
logging.error("No source available for %s"%source)
return []
@staticmethod
def to_json(articles):
data = {}
data['articles'] = []
for article in articles:
data['articles'].append(article.to_json())
return data
@staticmethod
def json_to_file(filename, json_data):
with open(filename, 'w+') as json_file:
json.dump(json_data, json_file)
@staticmethod
def json_from_file(filename):
with open(filename, 'r+') as json_file:
return json.load(json_file)
@staticmethod
def to_program_y_text(articles, break_str=" <br /> "):
return break_str.join("%s - %s" % (article.title, article.description) for article in articles)
if __name__ == '__main__':
# Running these tools drops test files into the geocode test folder
app_license_keys = LicenseKeys()
app_license_keys.load_license_key_file(os.path.dirname(__file__) + '/../../../../bots/y-bot/config/license.keys')
news_api = NewsAPI(app_license_keys)
results = news_api.get_headlines(NewsAPI.BBC_NEWS)
json_data = NewsAPI.to_json(results)
NewsAPI.json_to_file('../../../test/utils/newsapi/newsapi.json', json_data)
# Running these tools drops test files into the geocode test folder
|
|
#!/usr/bin/env python3
__author__ = 'Petr Ankudinov'
from jinja2 import meta, FileSystemLoader
import jinja2.nodes
import os
import sys
import yaml
from modules.tools import merge_dict, build_dict
def build_dict_recursive(lst_or_tpl):
# Recursive function that builds a hierarchical dictionary from lists and sublists of (key_list, value) tuples.
if isinstance(lst_or_tpl, tuple):
if isinstance(lst_or_tpl[1], list):
value = list()
for e in lst_or_tpl[1]:
value.append(build_dict_recursive(e))
elif isinstance(lst_or_tpl[1], tuple):
value = build_dict_recursive(lst_or_tpl[1])
else:
value = lst_or_tpl[1]
result = build_dict(list(reversed(lst_or_tpl[0])), value)
elif isinstance(lst_or_tpl, list):
result = dict()
for e in lst_or_tpl:
result = merge_dict(result, build_dict_recursive(e))
else:
result = lst_or_tpl
return result
value_dict = {
# these values will be assigned to extracted variables
'not defined': '{{ not defined }}',
'error': 'Error!',
'list': '{{ more elements in the list }}',
}
class J2Meta:
def __init__(self, template_realpath):
self.env = jinja2.Environment(loader=FileSystemLoader(searchpath=os.path.dirname(template_realpath)))
self.parent_template = os.path.basename(template_realpath)
self.known_templates = self.get_known_templates(self.parent_template)
# INTERNAL methods
def get_known_templates(self, template_name):
# initialise known template list and append parent template name
known_template_list = set()
known_template_list.add(template_name)
# parse parent template
template_src = self.env.loader.get_source(self.env, template_name)[0]
parsed_template = self.env.parse(source=template_src)
# get referenced templates and walk over these templates recursively
referenced_template_list = meta.find_referenced_templates(parsed_template)
for child_template in referenced_template_list:
known_template_list.add(child_template)
known_template_list.update(self.get_known_templates(child_template))
# return parent and all child template names
return known_template_list
def j2_ast_walk_main(self, j2node):
# The script will start walking over Jinja2 AST here looking for Getattr, Assign, Name, For nodes.
result_list = list()
recursion_required_nodes = [jinja2.nodes.Template, jinja2.nodes.Output]
recursion_required = False
for node in recursion_required_nodes:
if isinstance(j2node, node):
recursion_required = True
if recursion_required:
for child_node in j2node.iter_child_nodes():
# Recursion to get more specific nodes
for e in self.j2_ast_walk_main(child_node):
result_list.append(e)
else:
# Node specific walk
if isinstance(j2node, jinja2.nodes.For):
for e in self.j2_ast_walk_for(j2node):
result_list.append(e)
if isinstance(j2node, jinja2.nodes.If):
for e in self.j2_ast_walk_if(j2node):
result_list.append(e)
if isinstance(j2node, jinja2.nodes.Getattr):
for e in self.j2_ast_walk_getattr(j2node):
result_list.append(e)
if isinstance(j2node, jinja2.nodes.Assign):
for e in self.j2_ast_walk_assign(j2node):
result_list.append(e)
if isinstance(j2node, jinja2.nodes.Name):
for e in self.j2_ast_walk_name(j2node):
result_list.append(e)
# Ignore following nodes
ignored_node_list = [
jinja2.nodes.TemplateData,
jinja2.nodes.Literal,
jinja2.nodes.Expr,
jinja2.nodes.Const,
jinja2.nodes.Include,
]
for ignored_node in ignored_node_list:
if isinstance(j2node, ignored_node):
pass # do nothing
# Generate alert for future debugging
alert_nodes_list = [
jinja2.nodes.Macro,
jinja2.nodes.CallBlock,
jinja2.nodes.FilterBlock,
jinja2.nodes.With,
jinja2.nodes.Block,
jinja2.nodes.Import,
jinja2.nodes.FromImport,
jinja2.nodes.ExprStmt,
jinja2.nodes.AssignBlock,
jinja2.nodes.BinExpr,
jinja2.nodes.UnaryExpr,
jinja2.nodes.Tuple,
jinja2.nodes.List,
jinja2.nodes.Dict,
jinja2.nodes.Pair,
jinja2.nodes.Keyword,
jinja2.nodes.CondExpr,
jinja2.nodes.Filter,
jinja2.nodes.Test,
jinja2.nodes.Call,
jinja2.nodes.Getitem,
jinja2.nodes.Slice,
jinja2.nodes.Concat,
jinja2.nodes.Compare,
jinja2.nodes.Operand,
]
for i, ignored_node in enumerate(alert_nodes_list):
if isinstance(j2node, ignored_node):
print("Ignoring %s!" % alert_nodes_list[i], file=sys.stderr)
print(j2node, file=sys.stderr)
return result_list
@staticmethod
def j2_ast_walk_name(j2node):
key_list = [j2node.name]
value = False
if j2node.ctx == 'load':
value = value_dict['not defined']
else: # ctx == 'store'
pass # ctx should be 'load' for Name node
if not value:
value = value_dict['error']
key_list = list(key_list)
return [(key_list, value)] # return a list with a single tuple
def j2_ast_walk_getattr(self, j2node):
result_list = list()
for child_node in j2node.iter_child_nodes():
for e in self.j2_ast_walk_main(child_node):
result_list.append(e)
for tpl in result_list:
tpl[0].append(j2node.attr) # add parent key to each tuple
return result_list
def j2_ast_walk_assign(self, j2node):
key_list = list()
value = False
for child in j2node.iter_child_nodes():
if isinstance(child, jinja2.nodes.Name):
if child.ctx == 'store': # 'store' should be the only context for Assign node
key_list.append(child.name)
else:
value = child.name
if isinstance(child, jinja2.nodes.Pair):
if isinstance(child.value, jinja2.nodes.Const):
if not value:
value = child.value.value
if isinstance(child.value, jinja2.nodes.Name):
if not value:
value = child.value.name
if isinstance(child.value, jinja2.nodes.Dict):
for temp_list, value in self.j2_ast_walk_assign(child.value):
key_list = key_list + temp_list
key_list.append(child.key.value)
if isinstance(child, jinja2.nodes.Dict):
temp_list, value = self.j2_ast_walk_assign(child)
key_list = key_list + temp_list
key_list = list(reversed(key_list))
return [(key_list, value)]
def j2_ast_walk_for(self, j2node):
result_list = list()
iter_list = self.j2_ast_walk_main(j2node.iter)
target_key_list = self.j2_ast_walk_main(j2node.target) # value will be ignored
target_key_length = len(target_key_list)
target_child_key_list = list()
for node in j2node.body:
for e in self.j2_ast_walk_main(node):
for tk in target_key_list:
if e[0][:target_key_length] == tk[0]:
if e[0][target_key_length:]: # verify if there are any other key apart from target
target_child_key_list.append((e[0][target_key_length:], e[1]))
else:
result_list.append(e)
for ik in iter_list:
if target_child_key_list:
result_list.append((ik[0], [target_child_key_list, value_dict['list']]))
else:
result_list.append((ik[0], [ik[1], value_dict['list']]))
return result_list
def j2_ast_walk_if(self, j2node):
result_list = list()
if isinstance(j2node.test, jinja2.nodes.Compare):
for key_list, value in self.j2_ast_walk_getattr(j2node.test.expr):
result_list.append((key_list, value))
for node in j2node.body:
for key_list, value in self.j2_ast_walk_main(node):
result_list.append((key_list, value))
for node in j2node.else_:
for key_list, value in self.j2_ast_walk_main(node):
result_list.append((key_list, value))
return result_list
# EXTERNAL methods
def get_template_list(self):
return self.known_templates
def parse(self, variables):
j2_template = self.env.get_template(self.parent_template) # get parent template
config = j2_template.render(variables)
return config
def get_variables(self):
result_list = list()
for template in self.known_templates:
template_src = self.env.loader.get_source(self.env, template)[0]
parsed_template = self.env.parse(source=template_src)
for e in self.j2_ast_walk_main(parsed_template):
result_list.append(e)
var_dict = build_dict_recursive(result_list)
return var_dict
if __name__ == '__main__':
# Extract variables from the specified template and display as YAML
template_name = sys.argv[1]
template_meta = J2Meta(template_name)
print(
yaml.dump(template_meta.get_variables(), default_flow_style=False)
)
|
|
"""Form implementation"""
from collections import OrderedDict
from webob.multidict import MultiDict
from pyramid.compat import string_types
from pyramid.decorator import reify
from pyramid.renderers import NullRendererHelper
from pyramid.interfaces import IResponse
from pyramid.httpexceptions import HTTPException, HTTPForbidden
from pyramid.config.views import DefaultViewMapper
from djed.renderer import render, template_filter
from djed.message import add_message
from .field import Field
from .fieldset import Fieldset
from .button import Buttons, Actions
from .interfaces import Invalid, HTTPResponseIsReady
@template_filter('form:error')
def form_error_message(context, request):
""" form error renderer """
errors = [err for err in context
if (isinstance(err, str) or
(isinstance(err, Invalid) and err.field is None))]
return {'errors': errors}
class FormWidgets(OrderedDict):
""" Form widgets manager.
Widget is bound to content field. """
prefix = 'widgets.'
fieldsets = ()
def __init__(self, fields, form, request):
self.form_fields = fields
self.form = form
self.request = request
super(FormWidgets, self).__init__()
def fields(self):
return self.fieldset.fields()
def update(self):
form = self.form
params = form.form_params()
content = form.form_content()
prefix = '%s%s' % (form.prefix, self.prefix)
fieldsets = self.fieldsets = []
self.fieldset = self.form_fields.bind(
self.request, content, params, prefix, form)
# Walk through each field, making a widget out of it.
for fieldset in self.fieldset.fieldsets():
widgets = []
for widget in fieldset.fields():
widget.update()
widgets.append(widget)
self[widget.name] = widget
fieldsets.append(
{'fieldset': fieldset,
'name': fieldset.name,
'title': fieldset.title,
'widgets': widgets})
def extract(self):
data, errors = self.fieldset.extract()
# additional form validation
self.form.validate_form(data, errors)
# convert strings
errors = [Invalid(err) if isinstance(err, string_types) else err
for err in errors]
# set errors to fields
for err in errors:
if isinstance(err.field, Field) and err.field.error is None:
err.field.error = err
return data, errors
class FormViewMapper(DefaultViewMapper):
def __init__(self, **kw):
super(FormViewMapper, self).__init__(**kw)
if kw.get('renderer'):
self.map_class_native = self.map_class_native_update
def map_class_native_update(self, form_view):
def _class_view(context, request, _view=form_view):
inst = _view(context, request)
request.__original_view__ = inst
try:
result = inst.update_form()
if result is None:
result = {}
except HTTPResponseIsReady as exc:
result = exc.args[0]
except HTTPException as exc:
result = exc
request.__view__ = inst
return result
return _class_view
class Form(object):
""" A form
``id``: Form id
``name``: Form name
``prefix``: Form prefix, it used for html elements `id` generations.
``fields``: Form fields :py:class:`djed.form.Fieldset`
``buttons``: Form buttons :py:class:`djed.form.Buttons`
``actions``: Instance of :py:class:`djed.form.Actions` class
``widgets``: Instance of :py:class:`FormWidgets` class
``content``: Form content, it should be `None` or dictionary with
data for fields.
``params``: Form request parameters
``action``: Form action, by default ``request.url``
``method``: HTML Form method (`post`, `get`)
``csrf``: Enable/disable form csrf protection
``csrf_name``: Form csrf field name
``csrf_token``: Form csrf token value
"""
prefix = 'form.'
actions = None
widgets = None
buttons = None
fields = Fieldset()
content = None
method = 'post'
enctype = 'multipart/form-data'
accept = None
accept_charset = 'utf-8'
params = None
context = None
klass = 'form-horizontal'
csrf = False
csrf_name = 'csrf-token'
csrf_token = ''
tmpl_view = 'form:form'
tmpl_actions = 'form:form-actions'
tmpl_widget = 'form:widget'
__name__ = ''
__parent__ = None
__view_mapper__ = FormViewMapper
def __init__(self, context, request, **kw):
self.__dict__.update(kw)
self.context = context
self.request = request
self.__parent__ = context
if self.buttons is None:
self.buttons = Buttons()
# convert fields to Fieldset
if not isinstance(self.fields, Fieldset):
self.fields = Fieldset(*self.fields)
# set tmpl_widget
for fieldset in self.fields.fieldsets():
for field in fieldset.fields():
if field.cls.tmpl_widget is None:
field.cls.tmpl_widget = self.tmpl_widget
@reify
def id(self):
return self.name.replace('.', '-')
@reify
def name(self):
return self.prefix.strip('.')
@reify
def action(self):
return self.request.url
@reify
def csrf_token(self):
return self.request.session.get_csrf_token()
def form_content(self):
""" Return form content.
By default it returns ``Form.content`` attribute. """
return self.content
def form_params(self):
""" get form request params """
if self.params is not None:
if not isinstance(self.params, MultiDict):
return MultiDict(self.params)
return self.params
if self.method == 'post':
return self.request.POST
elif self.method == 'get':
return self.request.GET
else:
return self.params
def update_widgets(self):
""" prepare form widgets """
self.widgets = FormWidgets(self.fields, self, self.request)
self.widgets.update()
def update_actions(self):
""" Prepare form actions, this method should be called directly.
``Form.update`` calls this method during initialization."""
self.actions = Actions(self, self.request)
self.actions.update()
def update_form(self, data=None):
""" update form """
if not self.content and data:
self.content = data
self.update_widgets()
self.update_actions()
ac_result = self.actions.execute()
if IResponse.providedBy(ac_result):
raise HTTPResponseIsReady(ac_result)
result = self.update()
if IResponse.providedBy(result):
raise HTTPResponseIsReady(result)
if result is None:
result = {}
if ac_result is not None:
result.update(ac_result)
return result
def update(self):
""" Update form """
return {}
def render(self):
""" render form """
return render(self.request, self.tmpl_view, self,
actions = self.actions,
widgets = self.widgets)
def validate(self, data, errors):
""" Custom form validation """
def validate_form(self, data, errors):
""" Form validation """
self.validate_csrf_token()
try:
self.validate(data, errors)
except Invalid as err:
errors.append(err)
def validate_csrf_token(self):
""" csrf token validation """
if self.csrf:
token = self.form_params().get(self.csrf_name, None)
if token is not None:
if self.csrf_token == token:
return
raise HTTPForbidden("Form authenticator is not found.")
def extract(self):
""" extract form values """
return self.widgets.extract()
def add_error_message(self, msg):
""" add form error message """
add_message(self.request, msg, 'form:error')
def __call__(self):
""" update form and render form to response """
try:
result = self.update_form()
except HTTPResponseIsReady as result:
return result.args[0]
except HTTPException as result:
return result
response = self.request.registry.queryAdapterOrSelf(result, IResponse)
if response is not None:
return response
body = self.render()
response = self.request.response
if isinstance(body, bytes):
response.body = body
else:
response.text = body
return response
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Private utilities for managing multiple TensorBoard processes."""
import base64
import collections
import datetime
import errno
import json
import os
import subprocess
import tempfile
import time
from tensorboard import version
from tensorboard.util import tb_logging
# Type descriptors for `TensorBoardInfo` fields.
#
# We represent timestamps as int-seconds-since-epoch rather than
# datetime objects to work around a bug in Python on Windows. See:
# https://github.com/tensorflow/tensorboard/issues/2017.
_FieldType = collections.namedtuple(
"_FieldType",
(
"serialized_type",
"runtime_type",
"serialize",
"deserialize",
),
)
_type_int = _FieldType(
serialized_type=int,
runtime_type=int,
serialize=lambda n: n,
deserialize=lambda n: n,
)
_type_str = _FieldType(
serialized_type=str, # `json.loads` always gives Unicode
runtime_type=str,
serialize=str,
deserialize=str,
)
# Information about a running TensorBoard instance.
_TENSORBOARD_INFO_FIELDS = collections.OrderedDict(
(
("version", _type_str),
("start_time", _type_int), # seconds since epoch
("pid", _type_int),
("port", _type_int),
("path_prefix", _type_str), # may be empty
("logdir", _type_str), # may be empty
("db", _type_str), # may be empty
("cache_key", _type_str), # opaque, as given by `cache_key` below
)
)
TensorBoardInfo = collections.namedtuple(
"TensorBoardInfo",
_TENSORBOARD_INFO_FIELDS,
)
def data_source_from_info(info):
"""Format the data location for the given TensorBoardInfo.
Args:
info: A TensorBoardInfo value.
Returns:
A human-readable string describing the logdir or database connection
used by the server: e.g., "logdir /tmp/logs".
"""
if info.db:
return "db %s" % info.db
else:
return "logdir %s" % info.logdir
def _info_to_string(info):
"""Convert a `TensorBoardInfo` to string form to be stored on disk.
The format returned by this function is opaque and should only be
interpreted by `_info_from_string`.
Args:
info: A valid `TensorBoardInfo` object.
Raises:
ValueError: If any field on `info` is not of the correct type.
Returns:
A string representation of the provided `TensorBoardInfo`.
"""
for key in _TENSORBOARD_INFO_FIELDS:
field_type = _TENSORBOARD_INFO_FIELDS[key]
if not isinstance(getattr(info, key), field_type.runtime_type):
raise ValueError(
"expected %r of type %s, but found: %r"
% (key, field_type.runtime_type, getattr(info, key))
)
if info.version != version.VERSION:
raise ValueError(
"expected 'version' to be %r, but found: %r"
% (version.VERSION, info.version)
)
json_value = {
k: _TENSORBOARD_INFO_FIELDS[k].serialize(getattr(info, k))
for k in _TENSORBOARD_INFO_FIELDS
}
return json.dumps(json_value, sort_keys=True, indent=4)
def _info_from_string(info_string):
"""Parse a `TensorBoardInfo` object from its string representation.
Args:
info_string: A string representation of a `TensorBoardInfo`, as
produced by a previous call to `_info_to_string`.
Returns:
A `TensorBoardInfo` value.
Raises:
ValueError: If the provided string is not valid JSON, or if it is
missing any required fields, or if any field is of incorrect type.
"""
try:
json_value = json.loads(info_string)
except ValueError:
raise ValueError("invalid JSON: %r" % (info_string,))
if not isinstance(json_value, dict):
raise ValueError("not a JSON object: %r" % (json_value,))
expected_keys = frozenset(_TENSORBOARD_INFO_FIELDS)
actual_keys = frozenset(json_value)
missing_keys = expected_keys - actual_keys
if missing_keys:
raise ValueError(
"TensorBoardInfo missing keys: %r" % (sorted(missing_keys),)
)
# For forward compatibility, silently ignore unknown keys.
# Validate and deserialize fields.
fields = {}
for key in _TENSORBOARD_INFO_FIELDS:
field_type = _TENSORBOARD_INFO_FIELDS[key]
if not isinstance(json_value[key], field_type.serialized_type):
raise ValueError(
"expected %r of type %s, but found: %r"
% (key, field_type.serialized_type, json_value[key])
)
fields[key] = field_type.deserialize(json_value[key])
return TensorBoardInfo(**fields)
def cache_key(working_directory, arguments, configure_kwargs):
"""Compute a `TensorBoardInfo.cache_key` field.
The format returned by this function is opaque. Clients may only
inspect it by comparing it for equality with other results from this
function.
Args:
working_directory: The directory from which TensorBoard was launched
and relative to which paths like `--logdir` and `--db` are
resolved.
arguments: The command-line args to TensorBoard, as `sys.argv[1:]`.
Should be a list (or tuple), not an unparsed string. If you have a
raw shell command, use `shlex.split` before passing it to this
function.
configure_kwargs: A dictionary of additional argument values to
override the textual `arguments`, with the same semantics as in
`tensorboard.program.TensorBoard.configure`. May be an empty
dictionary.
Returns:
A string such that if two (prospective or actual) TensorBoard
invocations have the same cache key then it is safe to use one in
place of the other. The converse is not guaranteed: it is often safe
to change the order of TensorBoard arguments, or to explicitly set
them to their default values, or to move them between `arguments`
and `configure_kwargs`, but such invocations may yield distinct
cache keys.
"""
if not isinstance(arguments, (list, tuple)):
raise TypeError(
"'arguments' should be a list of arguments, but found: %r "
"(use `shlex.split` if given a string)" % (arguments,)
)
datum = {
"working_directory": working_directory,
"arguments": arguments,
"configure_kwargs": configure_kwargs,
}
raw = base64.b64encode(
json.dumps(datum, sort_keys=True, separators=(",", ":")).encode("utf-8")
)
# `raw` is of type `bytes`, even though it only contains ASCII
# characters; we want it to be `str` in both Python 2 and 3.
return str(raw.decode("ascii"))
def _get_info_dir():
"""Get path to directory in which to store info files.
The directory returned by this function is "owned" by this module. If
the contents of the directory are modified other than via the public
functions of this module, subsequent behavior is undefined.
The directory will be created if it does not exist.
"""
path = os.path.join(tempfile.gettempdir(), ".tensorboard-info")
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
else:
os.chmod(path, 0o777)
return path
def _get_info_file_path():
"""Get path to info file for the current process.
As with `_get_info_dir`, the info directory will be created if it
does not exist.
"""
return os.path.join(_get_info_dir(), "pid-%d.info" % os.getpid())
def write_info_file(tensorboard_info):
"""Write TensorBoardInfo to the current process's info file.
This should be called by `main` once the server is ready. When the
server shuts down, `remove_info_file` should be called.
Args:
tensorboard_info: A valid `TensorBoardInfo` object.
Raises:
ValueError: If any field on `info` is not of the correct type.
"""
payload = "%s\n" % _info_to_string(tensorboard_info)
with open(_get_info_file_path(), "w") as outfile:
outfile.write(payload)
def remove_info_file():
"""Remove the current process's TensorBoardInfo file, if it exists.
If the file does not exist, no action is taken and no error is
raised.
"""
try:
os.unlink(_get_info_file_path())
except OSError as e:
if e.errno == errno.ENOENT:
# The user may have wiped their temporary directory or something.
# Not a problem: we're already in the state that we want to be in.
pass
else:
raise
def get_all():
"""Return TensorBoardInfo values for running TensorBoard processes.
This function may not provide a perfect snapshot of the set of running
processes. Its result set may be incomplete if the user has cleaned
their /tmp/ directory while TensorBoard processes are running. It may
contain extraneous entries if TensorBoard processes exited uncleanly
(e.g., with SIGKILL or SIGQUIT).
Entries in the info directory that do not represent valid
`TensorBoardInfo` values will be silently ignored.
Returns:
A fresh list of `TensorBoardInfo` objects.
"""
info_dir = _get_info_dir()
results = []
for filename in os.listdir(info_dir):
filepath = os.path.join(info_dir, filename)
try:
with open(filepath) as infile:
contents = infile.read()
except IOError as e:
if e.errno == errno.EACCES:
# May have been written by this module in a process whose
# `umask` includes some bits of 0o444.
continue
else:
raise
try:
info = _info_from_string(contents)
except ValueError:
# Ignore unrecognized files, logging at debug only.
tb_logging.get_logger().debug(
"invalid info file: %r",
filepath,
exc_info=True,
)
else:
results.append(info)
return results
# The following five types enumerate the possible return values of the
# `start` function.
# Indicates that a call to `start` was compatible with an existing
# TensorBoard process, which can be reused according to the provided
# info.
StartReused = collections.namedtuple("StartReused", ("info",))
# Indicates that a call to `start` successfully launched a new
# TensorBoard process, which is available with the provided info.
StartLaunched = collections.namedtuple("StartLaunched", ("info",))
# Indicates that a call to `start` tried to launch a new TensorBoard
# instance, but the subprocess exited with the given exit code and
# output streams. (If the contents of the output streams are no longer
# available---e.g., because the user has emptied /tmp/---then the
# corresponding values will be `None`.)
StartFailed = collections.namedtuple(
"StartFailed",
(
"exit_code", # int, as `Popen.returncode` (negative for signal)
"stdout", # str, or `None` if the stream could not be read
"stderr", # str, or `None` if the stream could not be read
),
)
# Indicates that a call to `start` failed to invoke the subprocess.
#
# If the TensorBoard executable was chosen via the `TENSORBOARD_BINARY`
# environment variable, then the `explicit_binary` field contains the
# path to that binary; otherwise, the field is `None`.
StartExecFailed = collections.namedtuple(
"StartExecFailed",
(
"os_error", # `OSError` due to `Popen` invocation
"explicit_binary", # `str` or `None`; see type-level comment
),
)
# Indicates that a call to `start` launched a TensorBoard process, but
# that process neither exited nor wrote its info file within the allowed
# timeout period. The process may still be running under the included
# PID.
StartTimedOut = collections.namedtuple("StartTimedOut", ("pid",))
def start(arguments, timeout=datetime.timedelta(seconds=60)):
"""Start a new TensorBoard instance, or reuse a compatible one.
If the cache key determined by the provided arguments and the current
working directory (see `cache_key`) matches the cache key of a running
TensorBoard process (see `get_all`), that process will be reused.
Otherwise, a new TensorBoard process will be spawned with the provided
arguments, using the `tensorboard` binary from the system path.
Args:
arguments: List of strings to be passed as arguments to
`tensorboard`. (If you have a raw command-line string, see
`shlex.split`.)
timeout: `datetime.timedelta` object describing how long to wait for
the subprocess to initialize a TensorBoard server and write its
`TensorBoardInfo` file. If the info file is not written within
this time period, `start` will assume that the subprocess is stuck
in a bad state, and will give up on waiting for it and return a
`StartTimedOut` result. Note that in such a case the subprocess
will not be killed. Default value is 60 seconds.
Returns:
A `StartReused`, `StartLaunched`, `StartFailed`, or `StartTimedOut`
object.
"""
this_cache_key = cache_key(
working_directory=os.getcwd(),
arguments=arguments,
configure_kwargs={},
)
match = _find_matching_instance(this_cache_key)
if match:
return StartReused(info=match)
(stdout_fd, stdout_path) = tempfile.mkstemp(prefix=".tensorboard-stdout-")
(stderr_fd, stderr_path) = tempfile.mkstemp(prefix=".tensorboard-stderr-")
start_time_seconds = time.time()
explicit_tb = os.environ.get("TENSORBOARD_BINARY", None)
try:
p = subprocess.Popen(
["tensorboard" if explicit_tb is None else explicit_tb] + arguments,
stdout=stdout_fd,
stderr=stderr_fd,
)
except OSError as e:
return StartExecFailed(os_error=e, explicit_binary=explicit_tb)
finally:
os.close(stdout_fd)
os.close(stderr_fd)
poll_interval_seconds = 0.5
end_time_seconds = start_time_seconds + timeout.total_seconds()
while time.time() < end_time_seconds:
time.sleep(poll_interval_seconds)
subprocess_result = p.poll()
if subprocess_result is not None:
return StartFailed(
exit_code=subprocess_result,
stdout=_maybe_read_file(stdout_path),
stderr=_maybe_read_file(stderr_path),
)
info = _find_matching_instance(this_cache_key)
if info:
# Don't check that `info.pid == p.pid`, since on Windows that may
# not be the case: see #4300.
return StartLaunched(info=info)
else:
return StartTimedOut(pid=p.pid)
def _find_matching_instance(cache_key):
"""Find a running TensorBoard instance compatible with the cache key.
Returns:
A `TensorBoardInfo` object, or `None` if none matches the cache key.
"""
infos = get_all()
candidates = [info for info in infos if info.cache_key == cache_key]
for candidate in sorted(candidates, key=lambda x: x.port):
# TODO(@wchargin): Check here that the provided port is still live.
return candidate
return None
def _maybe_read_file(filename):
"""Read the given file, if it exists.
Args:
filename: A path to a file.
Returns:
A string containing the file contents, or `None` if the file does
not exist.
"""
try:
with open(filename) as infile:
return infile.read()
except IOError as e:
if e.errno == errno.ENOENT:
return None
|
|
from grid.cell import Cell
from grid.vertex import Vertex
from grid.triangle import Triangle
import numpy
import matplotlib.pyplot as plt
class Mesh(object):
'''
Description: (Quad) Mesh object
Attributes:
bounding_box: [xmin, xmax, ymin, ymax]
children: Cell, list of cells contained in mesh
vertex_list: Vertex, list of vertices (run number_vertices)
connectivity: int, numpy array - element connectivity matrix (run build_connectivity)
max_depth: int, maximum number of times each of the mesh's cell can be refined
balanced: bool, true if mesh is balanced.
Methods:
'''
def __init__(self, box=[0.,1.,0.,1.], nx=2, ny=2):
'''
Description: Constructor, initialize rectangular grid
Inputs:
box: double, boundary vertices of rectangular grid, box = [x_min, x_max, y_min, y_max]
nx: int, number of cells in x-direction
ny: int, number of cells in y-direction
type: 'MESH'
'''
self.bounding_box = box
self.type = 'MESH'
self.children_array_size = (nx,ny)
#
# Define cells in mesh
#
xmin, xmax, ymin, ymax = box
x = numpy.linspace(xmin, xmax, nx+1)
y = numpy.linspace(ymin, ymax, ny+1)
mesh_cells = {}
for i in range(nx):
for j in range(ny):
if i == 0 and j == 0:
v_sw = Vertex((x[i] ,y[j] ))
v_se = Vertex((x[i+1],y[j] ))
v_ne = Vertex((x[i+1],y[j+1]))
v_nw = Vertex((x[i] ,y[j+1]))
elif i > 0 and j == 0:
v_se = Vertex((x[i+1],y[j] ))
v_ne = Vertex((x[i+1],y[j+1]))
v_sw = mesh_cells[i-1,j].vertices['SE']
v_nw = mesh_cells[i-1,j].vertices['NE']
elif i == 0 and j > 0:
v_ne = Vertex((x[i+1],y[j+1]))
v_nw = Vertex((x[i] ,y[j+1]))
v_sw = mesh_cells[i,j-1].vertices['NW']
v_se = mesh_cells[i,j-1].vertices['NE']
elif i > 0 and j > 0:
v_ne = Vertex((x[i+1],y[j+1]))
v_nw = mesh_cells[i-1,j].vertices['NE']
v_sw = mesh_cells[i,j-1].vertices['NW']
v_se = mesh_cells[i,j-1].vertices['NE']
cell_vertices = {'SW': v_sw, 'SE': v_se, 'NE': v_ne, 'NW': v_nw}
cell_address = [i,j]
mesh_cells[i,j] = Cell(cell_vertices, self, cell_address)
self.children = mesh_cells
self.vertex_list = []
self.connectivity = None
self.max_depth = 0
self.__num_vertices = 0
self.__num_cells = 0
self.__balanced = False
self.__triangles = []
def leaves(self):
"""
Description: Returns a list of all leaf sub-cells of the mesh
Input:
group: string, optional sorting criterium (None, or 'depth')
Output:
leaves: list of LEAF cells
"""
#
# All leaves go in a long list
#
leaves = []
for child in self.children.itervalues():
leaves.extend(child.find_leaves())
self.__num_cells = len(leaves)
return leaves
def triangles(self):
"""
Returns a list of triangles
"""
if len(self.__triangles) == 0:
#
# Mesh has not been triangulated yet
#
self.triangulate()
return self.__triangles
else:
#
# Mesh triangulated
#
return self.__triangles
def vertices(self):
"""
Returns a list of vertices.
POSSIBLE BUG: if vertex has been marked outside of
this function, it will not show up in the list.
"""
n_vertices = -1
vertices = []
for leaf in self.leaves():
for v in leaf.vertices.itervalues():
if not v.is_marked():
n_vertices += 1
vertices.append(v)
v.set_node_number(n_vertices)
#
# Mark vertices in the list
#
v.mark()
self.__num_vertices = n_vertices
#
# Unmark all vertices again
#
for v in vertices:
v.unmark()
def cells_at_depth(self, depth):
"""
Return all cells at a given depth > 0
"""
cells = []
for child in self.children.itervalues():
cells.extend(child.cells_at_depth(depth))
return cells
def has_children(self):
"""
Determine whether the mesh has children
"""
return any(child != None for child in self.children.itervalues())
def get_max_depth(self):
"""
Determine the maximum depth of the mesh
"""
def unmark_all(self):
"""
Unmark all cells in mesh
"""
if self.has_children():
for child in self.children.itervalues():
child.unmark_all()
def refine(self):
"""
Refine mesh by splitting marked cells.
"""
leaves = self.leaves()
for leaf in leaves:
if leaf.flag:
leaf.split()
leaf.unmark()
self.__balanced = False
def coarsen(self):
"""
Coarsen mesh by collapsing marked cells
"""
leaves = self.leaves()
for leaf in leaves:
parent = leaf.parent
if parent.flag:
parent.children.clear()
self.remove_supports()
self.__balanced = False
def balance_tree(self):
"""
Ensure the 2:1 rule holds
"""
leaves = self.leaves()
leaf_dict = {'N': ['SE', 'SW'], 'S': ['NE', 'NW'],
'E': ['NW', 'SW'], 'W': ['NE', 'SE']}
while len(leaves) > 0:
leaf = leaves.pop()
flag = False
#
# Check if leaf needs to be split
#
for direction in ['N', 'S', 'E', 'W']:
nb = leaf.find_neighbor(direction)
if nb == None:
pass
elif nb.type == 'LEAF':
pass
else:
for pos in leaf_dict[direction]:
#
# If neighor's children nearest to you aren't LEAVES,
# then split and add children to list of leaves!
#
if nb.children[pos].type != 'LEAF':
leaf.mark()
leaf.split()
for child in leaf.children.itervalues():
child.mark_support_cell()
leaves.append(child)
#
# Check if there are any neighbors that should
# now also be split.
#
for direction in ['N', 'S', 'E', 'W']:
nb = leaf.find_neighbor(direction)
if nb != None and nb.depth < leaf.depth:
leaves.append(nb)
flag = True
break
if flag:
break
self.__balanced = True
def remove_supports(self):
"""
Remove the supporting cells
"""
leaves = self.leaves()
while len(leaves) > 0:
leaf = leaves.pop()
if leaf.support_cell:
#
# Check whether its safe to delete the support cell
#
safe_to_coarsen = True
for direction in ['N', 'S', 'E', 'W']:
nb = leaf.find_neighbor(direction)
if nb.has_children():
safe_to_coarsen = False
break
if safe_to_coarsen:
parent = leaf.parent
for child in parent.children.itervalues():
#
# Delete cells individually
#
del child
parent.children.clear()
leaves.append(parent)
self.__balanced = False
def triangulate(self):
"""
Generate triangulation of mesh:
balance if necessary
populate cells with triangles
generate connectivity matrix.
#TODO: unfinished
"""
triangles = []
if not self.__balanced:
#
# Balance mesh first
#
self.balance_tree()
for leaf in self.leaves():
v = leaf.vertices
#
# Determine whether Steiner Point is necessary
#
if any([v.has_key(direction) for direction in ['N','S','E','W']]):
#
# Add Steiner vertex
#
x0, x1, y0, y1 = leaf.box()
vm = Vertex((0.5*(x0 + x1), 0.5*(y0 + y1)))
leaf.vertices['M'] = vm
sub_edge_dict = {'S': ['SW','S','SE'], \
'E': ['NE','E','SE'], \
'N': ['NE','N','NW'], \
'W': ['NW','W','SW']}
for direction in ['S','E','N','W']:
se = sub_edge_dict[direction]
if v.has_key(direction):
#
# Midpoint on this edge
#
tri = [Triangle([v[se[0]],v[se[1]],vm],parent_cell=leaf),
Triangle([v[se[1]],v[se[2]],vm],parent_cell=leaf)]
else:
#
# No midpoint
#
tri = [Triangle([v[se[0]],v[se[2]],vm],parent_cell=leaf)]
triangles.extend(tri)
else:
#
# No Steiner vertex - simple triangulation
#
tri = [Triangle([v['SW'],v['SE'],v['NE']], parent_cell=leaf), \
Triangle([v['NE'],v['NW'],v['SW']], parent_cell=leaf)]
triangles.extend(tri)
self.__triangles = triangles
def build_connectivity(self):
"""
Returns the connectivity matrix for the tree
"""
# TODO: FIX build_connectivity
econn = []
num_vertices = len(self.vertex_list)
#
# Balance tree first
#
#self.balance_tree()
for leaf in self.leaves():
add_steiner_pt = False
#
# Get global indices for each corner vertex
#
gi = {}
for pos in ['NW', 'SW', 'NE', 'SE']:
gi[pos] = leaf.vertices[pos].node_number
edges = {'S': [[gi['SW'], gi['SE']]], 'N': [[gi['NE'], gi['NW']]],
'W': [[gi['NW'], gi['SW']]], 'E': [[gi['SE'], gi['NE']]] }
opposite_direction = {'N': 'S', 'S': 'N', 'W': 'E', 'E': 'W'}
for direction in ['S', 'N', 'E', 'W']:
neighbor = leaf.find_neighbor(direction)
if neighbor != None and neighbor.type != 'LEAF':
# If neighbor has children, then add the midpoint to
# your list of vertices, update the list of edges and
# remember to add the Steiner point later on.
#
od = opposite_direction[direction]
leaf.vertices[direction] = neighbor.vertices[od]
gi[direction] = leaf.vertices[direction].node_number
add_steiner_pt = True
edges[direction] = [[edges[direction][0][0], gi[direction]],
[gi[direction], edges[direction][0][1]]]
#
# Add the Triangles to connectivity
#
if not add_steiner_pt:
#
# Simple Triangulation
#
econn.extend([[gi['SW'], gi['SE'], gi['NE']],
[gi['NE'], gi['NW'], gi['SW']]] )
elif not leaf.vertices.has_key('M') or leaf.vertices['M'] == None:
#
# Add Steiner Vertex
#
x0, x1, y0, y1 = leaf.box()
vm = Vertex((0.5*(x0 + x1), 0.5*(y0 + y1)), node_number=num_vertices)
leaf.vertices['M'] = vm
gi['M'] = vm.node_number
self.vertex_list.append(vm)
num_vertices += 1
for direction in ['N', 'S', 'E', 'W']:
for sub_edge in edges[direction]:
econn.append([sub_edge[0], sub_edge[1], gi['M']])
return econn
def plot_quadmesh(self, ax, name=None, show=True, set_axis=True):
'''
Plot the current quadmesh
'''
if self.has_children():
if set_axis:
x0, x1, y0, y1 = self.bounding_box
hx = x1 - x0
hy = y1 - y0
ax.set_xlim(x0-0.1*hx, x1+0.1*hx)
ax.set_ylim(y0-0.1*hy, y1+0.1*hy)
for child in self.children.itervalues():
ax = child.plot(ax, set_axis=False)
else:
x0, y0 = self.vertices['SW'].coordinate
x1, y1 = self.vertices['NE'].coordinate
# Plot current cell
plt.plot([x0, x0, x1, x1],[y0, y1, y0, y1],'r.')
points = [[x0, y0], [x1, y0], [x1, y1], [x0, y1]]
if self.flag:
rect = plt.Polygon(points, fc='r', edgecolor='k')
else:
rect = plt.Polygon(points, fc='w', edgecolor='k')
ax.add_patch(rect)
return ax
def plot_trimesh(self, ax):
"""
Plot triangular mesh
"""
e_conn = self.build_connectivity()
for element in e_conn:
points = []
for node_num in element:
x, y = self.vertex_list[node_num].coordinate
points.append([x,y])
triangle = plt.Polygon(points, fc='w', ec='k')
ax.add_patch(triangle)
|
|
"""Release data for pythonUtils.
The information of the version is in the version.py file.
"""
from __future__ import absolute_import
import os
import sys
import time
import datetime
basedir = os.path.abspath(os.path.split(__file__)[0])
## Quantify the version
MAJOR = 0
MINOR = 0
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
def write_version_py(filename=None):
cnt = """\
version = '%s'
"""
if not filename:
filename = os.path.join(
os.path.dirname(__file__), 'pythonUtils', 'version.py')
a = open(filename, 'w')
try:
a.write(cnt % (version))
finally:
a.close()
def write_versionfile():
"""Creates a static file containing version information."""
versionfile = os.path.join(basedir, 'version.py')
text = '''"""
Version information for pythonUtils, created during installation by
setup.py.
Do not add this file to the repository.
"""
import datetime
version = %(version)r
date = %(date)r
# Development version
dev = %(dev)r
# Format: (name, major, minor, micro, revision)
version_info = %(version_info)r
# Format: a 'datetime.datetime' instance
date_info = %(date_info)r
# Format: (vcs, vcs_tuple)
vcs_info = %(vcs_info)r
'''
# Try to update all information
date, date_info, version, version_info, vcs_info = get_info(dynamic=True)
def writefile():
fh = open(versionfile, 'w')
subs = {
'dev': dev,
'version': version,
'version_info': version_info,
'date': date,
'date_info': date_info,
'vcs_info': vcs_info
}
fh.write(text % subs)
fh.close()
## Mercurial? Change that
if vcs_info[0] == 'mercurial':
# Then, we want to update version.py.
writefile()
else:
if os.path.isfile(versionfile):
# This is *good*, and the most likely place users will be when
# running setup.py. We do not want to overwrite version.py.
# Grab the version so that setup can use it.
sys.path.insert(0, basedir)
from version import version
del sys.path[0]
else:
# Then we write a new file.
writefile()
return version
def get_revision():
"""Returns revision and vcs information, dynamically obtained."""
vcs, revision, tag = None, None, None
hgdir = os.path.join(basedir, '..', '.hg')
gitdir = os.path.join(basedir, '..', '.git')
if os.path.isdir(gitdir):
vcs = 'git'
# For now, we are not bothering with revision and tag.
vcs_info = (vcs, (revision, tag))
return revision, vcs_info
def get_info(dynamic=True):
## Date information
date_info = datetime.datetime.now()
date = time.asctime(date_info.timetuple())
revision, version, version_info, vcs_info = None, None, None, None
import_failed = False
dynamic_failed = False
if dynamic:
revision, vcs_info = get_revision()
if revision is None:
dynamic_failed = True
if dynamic_failed or not dynamic:
# All info should come from version.py. If it does not exist, then
# no vcs information will be provided.
sys.path.insert(0, basedir)
try:
from version import date, date_info, version, version_info,\
vcs_info
except ImportError:
import_failed = True
vcs_info = (None, (None, None))
else:
revision = vcs_info[1][0]
del sys.path[0]
if import_failed or (dynamic and not dynamic_failed):
# We are here if:
# we failed to determine static versioning info, or
# we successfully obtained dynamic revision info
version = ''.join([str(major), '.', str(minor), '.', str(micro)])
if dev:
version += '.dev_' + date_info.strftime("%Y%m%d%H%M%S")
version_info = (name, major, minor, micro, revision)
return date, date_info, version, version_info, vcs_info
## Version information
name = 'pythonUtils'
major = "0"
minor = "0"
micro = "0"
## Declare current release as a development release.
## Change to False before tagging a release; then change back.
dev = True
description = """Python package to ease coding task and help."""
long_description = """
This package is a collection of different subpackages that they do not have
connection between each other but the use to complement other codes in python.
They are useful to save time and reduce complexity in other projects in python.
They wrap commonly used python libraries as numpy or pandas to add
functionalities oriented to the tasks I usually do.
"""
## Main author
author = 'T. Gonzalez Quintela',
author_email = '[email protected]',
license = 'MIT'
authors = {'tgquintela': ('T. Gonzalez Quintela', '[email protected]')}
maintainer = ""
maintainer_email = ""
url = ''
download_url = ''
platforms = ['Linux', 'Mac OSX', 'Windows', 'Unix']
keywords = ['math', 'data analysis', 'Mathematics', 'software']
classifiers = [
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
# Specify the Python versions you support here
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
# Topic information
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Mathematics']
date, date_info, version, version_info, vcs_info = get_info()
if __name__ == '__main__':
# Write versionfile for nightly snapshots.
write_versionfile()
|
|
"""
Models for environments.
"""
import itertools
from collections import defaultdict
from django.db import models
from ..mtmodel import MTModel
class Profile(MTModel):
"""
A set of Environments for a type of product.
For instance, a "browser testing" Profile might be a set of
environments relevant to testing browsers.
"""
name = models.CharField(max_length=200)
def __unicode__(self):
"""Return unicode representation."""
return self.name
@classmethod
def generate(cls, name, *elements, **kwargs):
"""
Create profile of environments as Cartesian product of given elements.
Elements are split by category, and then an environment is generated
for each combination of one element from each category.
"""
by_category = defaultdict(list)
for element in elements:
by_category[element.category].append(element)
new = cls.objects.create(name=name, **kwargs)
for element_list in itertools.product(*by_category.values()):
e = Environment.objects.create(profile=new)
e.elements.add(*element_list)
return new
def clone(self, *args, **kwargs):
"""Clone profile, with environments."""
kwargs.setdefault("cascade", ["environments"])
overrides = kwargs.setdefault("overrides", {})
overrides.setdefault("name", "Cloned: {0}".format(self.name))
return super(Profile, self).clone(*args, **kwargs)
def categories(self):
"""Return an iterable of categories that are part of this profile."""
return Category.objects.filter(
elements__environments__profile=self).distinct().order_by("name")
class Category(MTModel):
"""
A category of parallel environment elements.
For instance, the category "Operating System" could include "Linux", "OS
X", "Windows"...
"""
name = models.CharField(db_index=True, max_length=200)
def __unicode__(self):
"""Return unicode representation."""
return self.name
class Meta:
ordering = ["name"]
verbose_name_plural = "categories"
# @@@ there should be some way to annotate this onto a queryset efficiently
@property
def deletable(self):
"""Return True if this category can be deleted, otherwise False."""
return not Environment.objects.filter(elements__category=self).exists()
# @@@ this protection should apply to queryset.delete as well
def delete(self, *args, **kwargs):
"""Delete this category, or raise ProtectedError if its in use."""
if not self.deletable:
raise models.ProtectedError(
"Category '{0}' is in use and cannot be deleted.".format(
self.name),
list(Environment.objects.filter(elements__category=self).all())
)
return super(Category, self).delete(*args, **kwargs)
class Element(MTModel):
"""
An individual environment factor (e.g. "OS X" or "English").
"""
name = models.CharField(db_index=True, max_length=200)
category = models.ForeignKey(Category, related_name="elements")
def __unicode__(self):
"""Return unicode representation."""
return self.name
class Meta:
ordering = ["name"]
# @@@ there should be some way to annotate this onto a queryset efficiently
@property
def deletable(self):
"""Return True if this element can be deleted, otherwise False."""
return not self.environments.exists()
# @@@ this protection should apply to queryset.delete as well
def delete(self, *args, **kwargs):
"""Delete this element, or raise ProtectedError if its in use."""
if not self.deletable:
raise models.ProtectedError(
"Element '{0}' is in use and cannot be deleted.".format(
self.name),
list(self.environments.all())
)
return super(Element, self).delete(*args, **kwargs)
class Environment(MTModel):
"""
A collection of elements representing a testing environment.
For instance, an Environment for testing a web application might include
the elements "Firefox 10", "English", "Windows 7".
An Environment containing multiple elements from the same category
(e.g. both "Linux" and "OS X") means that either of those elements matches
this environment: in other words, the test can be run on either Linux or OS
X, it doesn't matter for the purposes of this test.
"""
profile = models.ForeignKey(
Profile, blank=True, null=True, related_name="environments")
elements = models.ManyToManyField(Element, related_name="environments")
def __unicode__(self):
"""Return unicode representation."""
return u", ".join(unicode(e) for e in self.ordered_elements())
class Meta:
permissions = [
(
"manage_environments",
"Can add/edit/delete environments, profiles, etc."
)
]
def ordered_elements(self):
"""All elements in category name order."""
return iter(self.elements.order_by("category__name"))
def clone(self, *args, **kwargs):
"""Clone environment, including element relationships."""
kwargs.setdefault("cascade", ["elements"])
return super(Environment, self).clone(*args, **kwargs)
# @@@ there should be some way to annotate this onto a queryset efficiently
@property
def deletable(self):
"""Return True if this environment can be deleted, otherwise False."""
from moztrap.model import ProductVersion
return not ProductVersion.objects.filter(environments=self).exists()
# @@@ this protection should apply to queryset.delete as well
def delete(self, *args, **kwargs):
"""Delete this environment, or raise ProtectedError if its in use."""
if not self.deletable:
from moztrap.model import ProductVersion
raise models.ProtectedError(
"Environment '{0}' is in use and cannot be deleted.".format(
str(self)),
list(ProductVersion.objects.filter(environments=self).all())
)
return super(Environment, self).delete(*args, **kwargs)
def remove_from_profile(self, user=None):
"""Remove environment from its profile and delete it if not in use."""
if self.deletable:
self.delete(user=user)
else:
self.profile = None
self.save(force_update=True, user=user)
class HasEnvironmentsModel(models.Model):
"""
Base for models that inherit/cascade environments to/from parents/children.
Subclasses should implement ``parent`` property and ``cascade_envs_to``
classmethod.
"""
environments = models.ManyToManyField(
'environments.Environment', related_name="%(class)s")
class Meta:
abstract = True
def save(self, *args, **kwargs):
"""Save instance; new instances get parent environments."""
adding = False
if self.id is None:
adding = True
ret = super(HasEnvironmentsModel, self).save(*args, **kwargs)
if adding and isinstance(self.parent, HasEnvironmentsModel):
self.environments.add(*self.parent.environments.all())
return ret
@property
def parent(self):
"""
The model instance to inherit environments from.
"""
return None
@classmethod
def cascade_envs_to(cls, objs, adding):
"""
Return model instances to cascade env profile changes to.
Return value should be a dictionary mapping model classes to iterables
of model instances to cascade to.
``objs`` arg is list of objs`` of this class to cascade from;
``adding`` arg is True if cascading for an addition of envs to the
profile, False if cascading a removal.
"""
return {}
@classmethod
def _remove_envs(cls, objs, envs):
"""Remove one or environments from one or more objects of this class."""
for model, instances in cls.cascade_envs_to(objs, adding=False).items():
model._remove_envs(instances, envs)
m2m_reverse_name = cls.environments.field.related_query_name()
cls.environments.through._base_manager.filter(
**{
"{0}__in".format(m2m_reverse_name): objs,
"environment__in": envs
}
).delete()
def remove_envs(self, *envs):
"""Remove one or more environments from this object's profile."""
self._remove_envs([self], envs)
def add_envs(self, *envs):
"""Add one or more environments to this object's profile."""
# @@@ optimize this to reduce queries once we have bulk insert in 1.4
self.environments.add(*envs)
for model, instances in self.cascade_envs_to(
[self], adding=True).items():
for instance in instances:
instance.add_envs(*envs)
|
|
# Copyright (c) <2016> <GUANGHAN NING>. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Script File: ROLO_network_test_all.py
Description:
ROLO is short for Recurrent YOLO, aimed at simultaneous object detection and tracking
Paper: http://arxiv.org/abs/1607.05781
Author: Guanghan Ning
Webpage: http://guanghan.info/
'''
# Imports
import ROLO_utils as utils
import tensorflow as tf
from tensorflow.models.rnn import rnn, rnn_cell
import cv2
import numpy as np
import os.path
import time
import random
class ROLO_TF:
disp_console = True
restore_weights = True#False
# YOLO parameters
fromfile = None
tofile_img = 'test/output.jpg'
tofile_txt = 'test/output.txt'
imshow = True
filewrite_img = False
filewrite_txt = False
disp_console = True
yolo_weights_file = 'weights/YOLO_small.ckpt'
alpha = 0.1
threshold = 0.2
iou_threshold = 0.5
num_class = 20
num_box = 2
grid_size = 7
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train","tvmonitor"]
w_img, h_img = [352, 240]
# ROLO Network Parameters
rolo_weights_file = 'null'
# rolo_weights_file = '/u03/Guanghan/dev/ROLO-dev/model_dropout_30.ckpt'
lstm_depth = 3
num_steps = 3 # number of frames as an input sequence
num_feat = 4096
num_predict = 6 # final output of LSTM 6 loc parameters
num_gt = 4
num_input = num_feat + num_predict # data input: 4096+6= 5002
# ROLO Parameters
batch_size = 1
display_step = 1
# tf Graph input
x = tf.placeholder("float32", [None, num_steps, num_input])
istate = tf.placeholder("float32", [None, 2*num_input]) #state & cell => 2x num_input
y = tf.placeholder("float32", [None, num_gt])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([num_input, num_predict]))
}
biases = {
'out': tf.Variable(tf.random_normal([num_predict]))
}
def __init__(self,argvs = []):
print("ROLO init")
self.ROLO(argvs)
def LSTM_single(self, name, _X, _istate, _weights, _biases):
with tf.device('/gpu:0'):
# input shape: (batch_size, n_steps, n_input)
_X = tf.transpose(_X, [1, 0, 2]) # permute num_steps and batch_size
# Reshape to prepare input to hidden activation
_X = tf.reshape(_X, [self.num_steps * self.batch_size, self.num_input]) # (num_steps*batch_size, num_input)
# Split data because rnn cell needs a list of inputs for the RNN inner loop
_X = tf.split(0, self.num_steps, _X) # n_steps * (batch_size, num_input)
cell = tf.nn.rnn_cell.LSTMCell(self.num_input, self.num_input)
state = _istate
for step in range(self.num_steps):
outputs, state = tf.nn.rnn(cell, [_X[step]], state)
tf.get_variable_scope().reuse_variables()
return outputs
# Experiment with dropout
def dropout_features(self, feature, prob):
num_drop = int(prob * 4096)
drop_index = random.sample(xrange(4096), num_drop)
for i in range(len(drop_index)):
index = drop_index[i]
feature[index] = 0
return feature
'''---------------------------------------------------------------------------------------'''
def build_networks(self):
if self.disp_console : print "Building ROLO graph..."
# Build rolo layers
self.lstm_module = self.LSTM_single('lstm_test', self.x, self.istate, self.weights, self.biases)
self.ious= tf.Variable(tf.zeros([self.batch_size]), name="ious")
self.sess = tf.Session()
self.sess.run(tf.initialize_all_variables())
self.saver = tf.train.Saver()
#self.saver.restore(self.sess, self.rolo_weights_file)
if self.disp_console : print "Loading complete!" + '\n'
def testing(self, x_path, y_path):
total_loss = 0
# Use rolo_input for LSTM training
pred = self.LSTM_single('lstm_train', self.x, self.istate, self.weights, self.biases)
#print("pred: ", pred)
self.pred_location = pred[0][:, 4097:4101]
#print("pred_location: ", self.pred_location)
#print("self.y: ", self.y)
self.correct_prediction = tf.square(self.pred_location - self.y)
#print("self.correct_prediction: ", self.correct_prediction)
self.accuracy = tf.reduce_mean(self.correct_prediction) * 100
#print("self.accuracy: ", self.accuracy)
#optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.accuracy) # Adam Optimizer
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
if (self.restore_weights == True):
sess.run(init)
self.saver.restore(sess, self.rolo_weights_file)
print "Loading complete!" + '\n'
else:
sess.run(init)
id = 0 #don't change this
total_time = 0.0
#id= 1
# Keep training until reach max iterations
while id < self.testing_iters - self.num_steps:
# Load training data & ground truth
batch_xs = self.rolo_utils.load_yolo_output_test(x_path, self.batch_size, self.num_steps, id) # [num_of_examples, num_input] (depth == 1)
# Apply dropout to batch_xs
#for item in range(len(batch_xs)):
# batch_xs[item] = self.dropout_features(batch_xs[item], 0.4)
batch_ys = self.rolo_utils.load_rolo_gt_test(y_path, self.batch_size, self.num_steps, id)
batch_ys = utils.locations_from_0_to_1(self.w_img, self.h_img, batch_ys)
# Reshape data to get 3 seq of 5002 elements
batch_xs = np.reshape(batch_xs, [self.batch_size, self.num_steps, self.num_input])
batch_ys = np.reshape(batch_ys, [self.batch_size, 4])
#print("Batch_ys: ", batch_ys)
start_time = time.time()
pred_location= sess.run(self.pred_location,feed_dict={self.x: batch_xs, self.y: batch_ys, self.istate: np.zeros((self.batch_size, 2*self.num_input))})
cycle_time = time.time() - start_time
total_time += cycle_time
#print("ROLO Pred: ", pred_location)
#print("len(pred) = ", len(pred_location))
#print("ROLO Pred in pixel: ", pred_location[0][0]*self.w_img, pred_location[0][1]*self.h_img, pred_location[0][2]*self.w_img, pred_location[0][3]*self.h_img)
#print("correct_prediction int: ", (pred_location + 0.1).astype(int))
# Save pred_location to file
utils.save_rolo_output_test(self.output_path, pred_location, id, self.num_steps, self.batch_size)
#sess.run(optimizer, feed_dict={self.x: batch_xs, self.y: batch_ys, self.istate: np.zeros((self.batch_size, 2*self.num_input))})
if id % self.display_step == 0:
# Calculate batch loss
loss = sess.run(self.accuracy, feed_dict={self.x: batch_xs, self.y: batch_ys, self.istate: np.zeros((self.batch_size, 2*self.num_input))})
#print "Iter " + str(id*self.batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) #+ "{:.5f}".format(self.accuracy)
total_loss += loss
id += 1
#print(id)
#print "Testing Finished!"
avg_loss = total_loss/id
print "Avg loss: " + str(avg_loss)
print "Time Spent on Tracking: " + str(total_time)
print "fps: " + str(id/total_time)
#save_path = self.saver.save(sess, self.rolo_weights_file)
#print("Model saved in file: %s" % save_path)
return None
def ROLO(self, argvs):
self.rolo_utils= utils.ROLO_utils()
self.rolo_utils.loadCfg()
self.params = self.rolo_utils.params
arguments = self.rolo_utils.argv_parser(argvs)
if self.rolo_utils.flag_train is True:
self.training(utils.x_path, utils.y_path)
elif self.rolo_utils.flag_track is True:
self.build_networks()
self.track_from_file(utils.file_in_path)
elif self.rolo_utils.flag_detect is True:
self.build_networks()
self.detect_from_file(utils.file_in_path)
else:
print "Default: running ROLO test."
self.build_networks()
evaluate_st = 0
evaluate_ed = 29
for test in range(evaluate_st, evaluate_ed + 1):
[self.w_img, self.h_img, sequence_name, dummy_1, self.testing_iters] = utils.choose_video_sequence(test)
x_path = os.path.join('benchmark/DATA', sequence_name, 'yolo_out/')
y_path = os.path.join('benchmark/DATA', sequence_name, 'groundtruth_rect.txt')
self.output_path = os.path.join('benchmark/DATA', sequence_name, 'rolo_out_test/')
utils.createFolder(self.output_path)
#self.rolo_weights_file = '/u03/Guanghan/dev/ROLO-dev/output/ROLO_model/model_nodrop_30_2.ckpt' #no dropout
#self.rolo_weights_file = '/u03/Guanghan/dev/ROLO-dev/output/ROLO_model/model_online.ckpt'
#self.rolo_weights_file = '/u03/Guanghan/dev/ROLO-dev/output/MOLO/model_MOT.ckpt'
#self.rolo_weights_file = '/u03/Guanghan/dev/ROLO-dev/output/MOLO/model_MOT_0.2.ckpt'
#self.rolo_weights_file= '/u03/Guanghan/dev/ROLO-dev/output/ROLO_model/model_step6_exp0.ckpt'
#self.rolo_weights_file= '/u03/Guanghan/dev/ROLO-dev/output/ROLO_model/model_step3_exp1.ckpt'
#self.rolo_weights_file= '/u03/Guanghan/dev/ROLO-dev/output/ROLO_model/model_step6_exp2.ckpt'
#self.rolo_weights_file= '/u03/Guanghan/dev/ROLO-dev/output/ROLO_model/model_step3_exp2.ckpt'
#self.rolo_weights_file= '/u03/Guanghan/dev/ROLO-dev/output/ROLO_model/model_step9_exp2.ckpt'
#self.rolo_weights_file= '/u03/Guanghan/dev/ROLO-dev/output/ROLO_model/model_step1_exp2.ckpt'
self.rolo_weights_file= '/u03/Guanghan/dev/ROLO-dev/output/ROLO_model/model_step3_exp1_old.ckpt'
self.num_steps = 3 # number of frames as an input sequence
print("TESTING ROLO on video sequence: ", sequence_name)
self.testing(x_path, y_path)
'''----------------------------------------main-----------------------------------------------------'''
def main(argvs):
ROLO_TF(argvs)
if __name__=='__main__':
main(' ')
|
|
#!/usr/bin/env python
import os,sys,tempfile,string,math,re
_palettes = ('gray','rainbow','heat','iraf','aips','pgplot','a','bb','he','i8','ds','cyclic')
_colors = ('w','k','r','g','b','c','m','y','o','gy','gc','bc','bm','rm','dg','lg')
_fills = ('s','h','/','#')
_fonts = ('sf','rm','it','cu')
_lstyles = ('-','--','.-',':','-...')
_symbols = ('s','.','+','*','o','x','^','oplus','odot','ps','d','st','o+','david','arrow')
# Private variables set by _wipopen().
_wipfile = '???' # the temporary wip file that holds all the plot commands
_tmplist = [] # list of any temp files we have made
_optionsobj = None # set to class _options
_panelobj = None # set to class _panel
class _options:
def __init__(self):
# Note, these are all defined in the way WIP would use them.
self.font = '2' # default font (roman)
self.lwidth = '1' # default line width
self.lstyle = '1' # default line style (solid)
self.color = '1' # default color (black)
self.size = '1' # default size
self.bg = '-1' # default background text color, i.e. transparent
self.rgbFlag = False # set to true when rgb values are specified
def update(self,fp,**sargs):
'''Write out any options specified by the user'''
for k in sargs.keys():
if k == 'color':
if isinstance(sargs[k],str):
tmp = self.rgb(fp,sargs[k])
fp.write('color %s\n' %tmp)
elif k == 'font':
fp.write('font %s\n' %_translatefont(sargs[k]))
elif k == 'size':
fp.write('expand %s\n' %sargs[k])
elif k == 'style':
if isinstance(sargs[k],str):
sym = _translatesymbol(sargs[k]) # don't attempt for symbols
if sym == '99':
sym = _translatelstyle(sargs[k])
fp.write('lstyle %s\n' %sym)
elif k == 'width':
fp.write('lwidth %s\n' %sargs[k])
elif k == 'bg':
tmp = self.rgb(fp,sargs[k])
fp.write('bgci %s\n' %tmp)
def rgb(self,fp,color):
'''Handle RGB color conversion'''
tmp = _translatecolor(color)
if tmp == 'rgb':
tmp = color.replace(',',' ')
fp.write('rgb 1 %s\n' %(tmp)) # change color index 1
self.rgbFlag = True
return '1'
else:
if self.rgbFlag: # changed index 1, so change back
fp.write('rgb 1 0 0 0\n')
self.rgbFlag = False
return tmp
def reset(self,fp,**sargs):
'''Reset any options changed by self.update to their defaults'''
if self.rgbFlag:
fp.write('rgb 1 0 0 0\n') # reset color index 1 to black
self.rgbFlag = False
for k in sargs.keys():
if k == 'color': fp.write('color %s\n' %self.color)
elif k == 'fillcolor': fp.write('color %s\n' %self.color)
elif k == 'font': fp.write('font %s\n' %self.font)
elif k == 'size': fp.write('expand %s\n' %self.size)
elif k == 'style': fp.write('lstyle %s\n' %self.lstyle)
elif k == 'width': fp.write('lwidth %s\n' %self.lwidth)
elif k == 'bg': fp.write('bgci %s\n' %self.bg)
def default(self,fp,**sargs):
'''Change the default values'''
for k in sargs.keys():
if k == 'color': self.color = self.rgb(fp,sargs[k])
elif k == 'font': self.font = _translatefont(sargs['font'])
elif k == 'size': self.size = str(sargs['size'])
elif k == 'style': self.lstyle = _translatelstyle(sargs['style'])
elif k == 'width': self.lwidth = str(sargs['width'])
elif k == 'bg':
if sargs[k] == 't': # t for transparent
self.bg = '-1'
else:
self.bg = self.rgb(sargs[k])
self.update(fp,**sargs)
class _panel:
def __init__(self):
self.nx = 0 # number of panels in x direction (set by self.resize)
self.ny = 0 # number of panels in y direction (set by self.resize)
self.idx = 0 # index of current panel number
self.gapx = 2 # space between panels in x
self.gapy = 2 # space between panels in y
self.start = 'top' # control if numbering starts in top or bottom left
self.limits = [] # flag whether limits are set for each panel
self.logx = [] # flag whether logx is plotted
self.logy = [] # flag whether logy is plotted
self.image = [] # name of image, if any, for each panel
self.header = [] # header for each image (px, rd, etc)
self.scale = [] # image scaling for each panel (linear,log,sqrt)
self.curves = [] # curves to plot for legend
self.palette = [] # palette used in each panel
self.resize(nx=1,ny=1,gapx=2,gapy=2,start='top')
def get(self,key):
'''Return the specified value for the current panel'''
if key == 'limits' : return self.limits[self.idx]
elif key == 'logx' : return self.logx[self.idx]
elif key == 'logy' : return self.logy[self.idx]
elif key == 'image' : return self.image[self.idx]
elif key == 'scale' : return self.scale[self.idx]
elif key == 'header' : return self.header[self.idx]
elif key == 'palette': return self.palette[self.idx]
else: _warning('_panel(): Invalid key requested: %s' %key)
def writelimits(self,fp,**args):
"""New function for limits"""
if args.has_key('logx'): self.logx[self.idx] = args['logx']
if args.has_key('logy'): self.logy[self.idx] = args['logy']
if args.has_key('limits') and args['limits'] == None:
del(args['limits'])
if args.has_key('limits'):
if args['limits'] == 'last': # use last set limits from other panel
idx = self.idx - 1
while idx >= 0:
if self.limits[idx]:
self.limits[self.idx] = self.limits[idx]
self.logx[self.idx] = self.logx[idx]
self.logy[self.idx] = self.logy[idx]
break
idx = idx - 1
else:
tmp = list(args['limits'])
if self.logx[self.idx]:
## TODO: No warning for changing these values
if tmp[0] == 0: tmp[0] = 1e-5
if tmp[1] == 0: tmp[1] = 1e-5
tmp[0] = math.log10(tmp[0])
tmp[1] = math.log10(tmp[1])
if self.logy[self.idx]:
if tmp[2] == 0: tmp[2] = 1e-5
if tmp[3] == 0: tmp[3] = 1e-5
tmp[2] = math.log10(tmp[2])
tmp[3] = math.log10(tmp[3])
fp.write('set \\1 %g\n' %tmp[0])
fp.write('set \\2 %g\n' %tmp[1])
fp.write('set \\3 %g\n' %tmp[2])
fp.write('set \\4 %g\n' %tmp[3])
self.limits[self.idx] = True
fp.write('limits \\1 \\2 \\3 \\4\n')
elif self.limits[self.idx]: # limits already exist for this panel,
pass # so reuse them
#fp.write('limits \\1 \\2 \\3 \\4\n')
else: # no limits set in this panel, so make new ones
fp.write('limits\n')
fp.write('set \\1 x1\n')
fp.write('set \\2 x2\n')
fp.write('set \\3 y1\n')
fp.write('set \\4 y2\n')
if args.has_key('reversex') and args['reversex']:
fp.write('set \\1 x2\n')
fp.write('set \\2 x1\n')
if args.has_key('reversey') and args['reversey']:
fp.write('set \\1 y2\n')
fp.write('set \\2 y1\n')
if self.logx[self.idx]:
## TODO: No warning for changing these values
fp.write('if (\\1 == 0) set \\1 1e-5\n')
fp.write('if (\\2 == 0) set \\2 1e-5\n')
if self.logy[self.idx]:
fp.write('if (\\3 == 0) set \\3 1e-5\n')
fp.write('if (\\4 == 0) set \\4 1e-5\n')
self.limits[self.idx] = True
fp.write('limits \\1 \\2 \\3 \\4\n')
def resize(self,**args):
'''Change size of a panel, either newly-created, or from panel() cmd'''
if args.has_key('gapx'): self.gapx = args['gapx']
if args.has_key('gapy'): self.gapy = args['gapy']
if args.has_key('start'): self.start = args['start']
if args.has_key('nx'):
nx = args['nx']
else:
nx = self.nx
if args.has_key('ny'):
ny = args['ny']
else:
ny = self.ny
if self.start not in ('top','bottom'):
_error('_panel(): start keyword must be top or bottom!')
for i in range(self.nx*self.ny,nx*ny):
self.limits.append(False)
self.logx.append(False)
self.logy.append(False)
self.image.append(None)
self.scale.append(None)
self.header.append(None)
self.palette.append(None)
self.nx = nx
self.ny = ny
def set(self,**args):
'''Set the specified value for the current panel'''
for k,v in args.iteritems():
if k == 'logx' : self.logx[self.idx] = v
elif k == 'logy' : self.logy[self.idx] = v
elif k == 'image' : self.image[self.idx] = v
elif k == 'scale' : self.scale[self.idx] = v
elif k == 'header' : self.header[self.idx] = v
elif k == 'curve' : self.curves.append(v)
elif k == 'limits' : self.limits[self.idx] = v
elif k == 'palette': self.palette[self.idx] = v
def _checkallowed(funcname,inputargs,allowedargs):
'''Check the list of inputargs keywords against the list of allowedargs'''
extra_args = list(set(inputargs) - set(allowedargs))
if len(extra_args) != 0:
argstring = ' '.join(extra_args)
if len(extra_args) == 1:
_error("%s() does not allow the keyword: %s" %(funcname,argstring))
else:
_error("%s() does not allow the keywords: %s" %(funcname,argstring))
def _count(datafile):
'''Count number of non-comment lines in given datafile and return'''
if os.path.exists(datafile):
fp = open(datafile,'r')
line = fp.readline()
num = 0
while line:
if line[0] != '#':
num = num + 1
line = fp.readline()
fp.close()
return num
else:
_error("_count(): datafile %s does not exist!" %datafile)
return 0
def _error(msg):
'''Print the error message to standard error'''
if msg[-1] == '\n':
sys.stderr.write('### PyWip Error! %s' %msg)
else:
sys.stderr.write('### PyWip Error! %s\n' %msg)
sys.exit()
def _isseq(var):
'''Test whether var is a list or tuple'''
return isinstance(var,(list,tuple))
def _lookup(rcol=1,gcol=2,bcol=3,scol=4,datafile=None,reverse=False):
'''Define a color palette using RGB values.
Note, the halftone() command can use a lookup table directly through
the palette keyword. You probably only really need this command if
you want to specify a lookup table without a datafile.
The red, green, and blue values must be given as fractions between 0
and 1. Same for the scale column (scol). scol defines the rgb color
for fractions of the max values plotted by halftone(). Linear
interpolation for values in the image between specified levels will be
performed. I think WIP has an inherent limit of 255 color levels
(probably something to do with PGPLOT).
rcol,gcol,bcol - Integers or list/tuple of red, green, blue data
scol - Integer or list/tuple of scaling data.
datafile - String name of input data file. Leave as None if
rcol,gcol,bcol, and scol are all sequences of numbers
reverse - Set to True if you want to invert the color lookup
table (like putting a negative sign for palette).'''
## TODO: Can probably simplify this a lot by hard-coding some things.
## I don't think anyone would ever want to call this manually.
fp = _wipopen('_lookup')
if datafile is None:
nr = len(rcol)
ng = len(gcol)
nb = len(bcol)
ns = len(scol)
if nr == ng == nb == ns:
blah = tempfile.mktemp()
_tmplist.append(blah)
fp2 = open(blah,'w')
for r,g,b,s in zip(rcol,gcol,bcol,scol):
fp2.write("%g %g %g %g\n" %(r,g,b,s))
fp2.close()
rcol = 1
gcol = 2
bcol = 3
scol = 4
datafile = blah
else:
_error("_lookup(): You must have equal # of elements for rcol, gcol, bcol, scol!")
else:
fp.write("data %s\n" %datafile)
fp.write("xcol %d\n" %rcol)
fp.write("ycol %d\n" %gcol)
fp.write("ecol %d\n" %bcol)
fp.write("pcol %d\n" %scol)
if reverse is True:
fp.write("lookup -1\n")
else:
fp.write("lookup\n")
fp.close()
def _makecurve(**args):
'''Does all the stuff for adding a curve to the legend(). This does
NOT check for allowed arguments since this function is called
directly by plot() and others which may have additional args.'''
c = {'color' : _colors[int(_optionsobj.color)], 'size' : _optionsobj.size,
'style' : _lstyles[int(_optionsobj.lstyle)-1],
'width' : _optionsobj.lwidth, 'text' : 'Generic Curve',
'fillcolor' : _optionsobj.bg,
'fillsize' : _optionsobj.size,
'fillstyle' : _lstyles[int(_optionsobj.lstyle)-1]}
if args.has_key('style') and args['style'] == None: # don't add style=None
return # to legend
for k in args.keys():
if k == 'text':
if args['text']: # not set to None
c[k] = _translatelatex(args['text'])
else:
return # don't add to list of curves for legend
else:
c[k] = str(args[k])
# to properly set fill factor requires all other args to be parsed first
if args.has_key('fillcolor'):
fillstyle,fillfactor = _translatefillsymbol(args['style'])
c['fillcolor'] = c['fillcolor']
c['fillsize'] = fillfactor*float(c['size'])
c['fillstyle'] = fillstyle
_panelobj.set(curve=c)
def _maketempfile(xcol,ycol,datafile=None,xerr=None,yerr=None,**args):
'''Make a temporary data file for reading by wip.
xcol - either an integer (for a column from datafile) or a list/tuple
ycol - either an integer (for a column from datafile) or a list/tuple
xerr - either an integer (for a column from datafile) or a list/tuple
yerr - either an integer (for a column from datafile) or a list/tuple
datafile - set to a filename to read data from that file'''
global _tmplist
eFlag = False # set to true if color for each point
sFlag = False # set to true if symbol for each point
fFlag = False # set to true if fillcolor for each point
logx = _panelobj.get('logx')
logy = _panelobj.get('logy')
if not datafile:
if xcol == 'NR':
xcol = range(len(ycol))
elif ycol == 'NR':
ycol = range(len(xcol))
n1 = len(xcol)
n2 = len(ycol)
if n1 != n2: _error('_maketempfile(): x and y arrays must be the same length!')
if xerr:
if len(xerr) != n1:
_error('_maketempfile(): xerr array must have same length as x and y arrays!')
if yerr:
if len(yerr) != n1:
_error('_maketempfile(): yerr array must have same length as x and y arrays!')
if args.has_key('color') and _isseq(args['color']):
eFlag = True
ecol = args['color']
if len(ecol) != n1:
_error('_maketempfile(): color array must have same length as x and y arrays!')
if args.has_key('style') and _isseq(args['style']):
sFlag = True
pcol = args['style']
if len(pcol) != n1:
_error('_maketempfile(): style array must have same length as x and y arrays!')
if args.has_key('fillcolor') and _isseq(args['fillcolor']):
fFlag = True
fcol = args['fillcolor']
if len(fcol) != n1:
_error('_maketempfile(): fillcolor array must have same length as x and y arrays!')
elif not os.path.exists(datafile):
_error('_maketempfile(): file %s does not exist for reading!' %datafile)
blah = tempfile.mktemp()
_tmplist.append(blah)
fp2 = open(blah,'w')
idx = 0 # counting for cases where xcol or ycol is NR
if datafile:
fp1 = open(datafile,'r')
line = fp1.readline()
while line:
if line[0] != '#':
tmp = line.split()
if xcol == 'NR':
xtmp = idx
idx += 1
else:
xtmp = tmp[xcol-1]
if ycol == 'NR':
ytmp = idx
idx += 1
else:
ytmp = tmp[ycol-1]
if _panelobj.get('image') and _panelobj.get('header') == 'rd':
fp2.write('%6.6e %6.6e ' %(_translatecoords(xtmp,'ra'),
_translatecoords(ytmp,'dec')))
else:
fp2.write('%s %s ' %(xtmp,ytmp))
if xerr:
_maketemphelper(fp2,float(xtmp),float(tmp[xerr-1]),logx)
if yerr:
_maketemphelper(fp2,float(ytmp),float(tmp[yerr-1]),logy)
fp2.write('\n')
line = fp1.readline()
fp1.close()
else:
for i in range(n1):
fp2.write('%6.6e %6.6e ' %(_translatecoords(xcol[i],'ra'),
_translatecoords(ycol[i],'dec')))
if xerr:
_maketemphelper(fp2,float(xcol[i]),float(xerr[i]),logx)
if yerr:
_maketemphelper(fp2,float(ycol[i]),float(yerr[i]),logy)
if eFlag:
fp2.write('%s ' %(_translatecolor(ecol[i])))
else:
fp2.write('0 ')
if sFlag:
fp2.write('%s ' %(_translatesymbol(pcol[i])))
else:
fp2.write('0 ')
if fFlag:
fp2.write('%s ' %(_translatecolor(fcol[i])))
else:
fp2.write('0 ')
fp2.write('\n')
fp2.close()
return blah
def _maketemphelper(fp,value,error,logFlag):
'''Helper function for _maketempfile that consolidates the code for making
errorbars and log errorbars with WIP. You have to do some extra
gymnastics to make these happen in WIP.'''
if logFlag:
if value == 0:
fp.write('1 ')
else:
fp.write('%6.6e ' %((value+error)/value))
if value == error:
fp.write('1 ')
else:
# when value-err < 0, this causes the errorbars to be drawn funny
# due to taking the log of a negative number. So, we fix
# by forcing err, the errorbar value to be ~99% of value. This
# reduces the value/(value-err) to 99.
if value - error < 0:
fp.write('%6.6e ' %99)
else:
fp.write('%6.6e ' %(value/(value-error)))
else:
fp.write('%s ' %error)
def _mtext(fp,text,offset=0,align='center',side='top',**args):
'''Combine stuff for using mtext, which is used by xlabel(), ylabel(),
and title()
text - a string of text
offset - offset for text in addition to standard offset (which depends
on the chosen side).
align - alignment for label. Either left, center, or right, or a
number between zero and one. (zero=left, one=right).
side - put text on this side. Options are left, right, top, bottom.
Allowed optional **args:
color - a string giving the color for the title
font - a string giving the font to use for the title
size - a number giving the size for the title
style - a string giving the line style for the title
width - a number giving the width of the lines
bg - background color for text'''
al = _translatealign(align)
if side == 'top':
off = str(2.0 + float(offset))
elif side == 'left':
off = str(2.2 + float(offset))
elif side == 'right':
off = str(2.2 + float(offset))
elif side == 'bottom':
off = str(3.2 + float(offset))
else:
_error('_mtext(): Side keyword must be one of: top, bottom left, right!')
# doesn't seem to properly pick-up default parameters that are set, so
# we force them to be written out. TODO: Still a problem?
#_optionsobj.reset(fp,color=1,font=1,size=1,style=1,width=1,bg=1)
# Now override defaults
_optionsobj.update(fp,**args)
fp.write('mtext %c %s 0.5 %s %s\n' %(side[0].upper(),off,al,_translatelatex(text)))
_optionsobj.reset(fp,**args)
def _plotpoints(fp,xlist,ylist,**args):
'''Plot data points from input lists of coordinates.
This function is a helper to the plot command. When there are less
than 10 data points, plot will call this function rather than go through
the process of making a temp file. Like plot(), you can show points
and lines.
fp - The file pointer where wip commands are written
xlist,ylist - Lists or tuples with the x and y positions of points to
plot
Allowed optional **args:
color - If a string, use as the color for every point. If an integer,
read that column from the datafile for color index for each
point.
size - The size for each data point.
style - If a string, use as the symbol or line style. If an integer,
then read from datafile for symbol for each point.
width - Line width'''
_panelobj.set(**args)
_optionsobj.update(fp,**args)
if _panelobj.get('logx'):
for i in range(len(xlist)):
try:
xlist[i] = math.log10(xlist[i])
except ValueError:
_error("_plotpoints(): problem with taking log of %f" %xlist[i])
if _panelobj.get('logy'):
for i in range(len(ylist)):
try:
ylist[i] = math.log10(ylist[i])
except ValueError:
_error("_plotpoints(): problem with taking log of %f" %ylist[i])
_panelobj.writelimits(fp,**args)
if args.has_key('style'):
if args['style'] == None: # skip plotting if style=None
return
else:
sym = _translatesymbol(args['style'])
else:
sym = _translatesymbol('o')
if sym == '99':
line = _translatelstyle(args['style'])
fp.write('lstyle %s\n' %line)
fp.write('move %f %f\n' %(_translatecoords(xlist[0],'ra'),_translatecoords(ylist[0],'dec')))
for i in range(1,len(xlist)):
fp.write('draw %f %f\n' %(_translatecoords(xlist[i],'ra'),_translatecoords(ylist[i],'dec')))
_optionsobj.reset(fp,**args)
else:
fp.write('symbol %s\n' %sym)
for a,b in zip(xlist,ylist):
fp.write('move %f %f\n' %(_translatecoords(a,'ra'),_translatecoords(b,'dec')))
fp.write('dot\n')
_optionsobj.reset(fp,**args)
def _readimage(fp,image):
'''Perform image and subimage commands on a given image name.
Return x and y pixel limits as a tuple.'''
blah = re.findall(r'(\(|\[){1}',image)
if len(blah) == 0:
name = image
else:
name = image[:image.index(blah[0])]
subimage = re.findall(r'\[.*\]',image) # get subimage pixels
planenum = re.findall(r'\([0-9]*\)',image) # get plane number
if len(planenum) > 1:
_error('_readimage(): found more than one plane number!')
elif len(planenum) == 1:
planenum = int(planenum[0][1:-1])
else:
planenum = 1
if os.path.exists(name):
fp.write('image %s %d\n' %(name,planenum))
else:
_error('_readimage(): Image %s does not exist!' %name)
if len(subimage) > 1:
_error('_readimage(): found more than one subimage range!')
elif len(subimage) == 1:
blah = subimage[0][1:-1].split(',') #[1:-1] splits off [] at begin/end
if len(blah) != 2:
_error('_readimage(): You must specify image range as [xmin:xmax,ymin:ymax]!')
try:
blah = tuple(map(int,blah[0].split(':') + blah[1].split(':')))
except ValueError:
_error('_readimage(): Image range must be integer pixel values!')
if len(blah) != 4:
_error('_readimage(): You must specify image range as [xmin:xmax,ymin:ymax]!')
fp.write('subimage %d %d %d %d\n' %blah)
return blah
else:
return None
def _translatealign(align):
'''Take useful alignment string and convert to wip format.'''
if align == 'left': return '0.0'
elif align == 'center': return '0.5'
elif align == 'right': return '1.0'
else:
try:
blah = float(align)
if blah < 0 or blah > 1:
_error('_translatealign(): Invalid alignment. Try left,center,right, or a number')
return align
except ValueError:
_error('_translatealign(): Invalid alignment. Try left,center,right, or a number')
def _translateaxis(**args):
"""Convert **args into wip format box commands."""
xaxis = ''
yaxis = ''
for k,v in args.iteritems():
if k == 'box':
for side in v:
if side == 'bottom': xaxis += 'b'
elif side == 'top': xaxis += 'c'
elif side == 'left': yaxis += 'b'
elif side == 'right': yaxis += 'c'
else: _error('_translateaxis(): unknown side for box: %s' %side)
elif k == 'drawtickx': pass # if True, don't do anything since defaults
elif k == 'drawticky': pass # to draw. if False, again do nothing
elif k == 'firstx':
if not v: xaxis += 'f'
elif k == 'firsty':
if not v: yaxis += 'f'
elif k == 'format':
if len(v) != 2: _error('_translateaxis(): format must have two values!')
if v[0] == 'wcs': xaxis += 'hz'
elif v[0] == 'dec': xaxis += '1'
elif v[0] == 'exp': xaxis += '2'
elif v[0] == 'auto': pass
else: _error('_translateaxis(): unknown format style: %s' %v[0])
if v[1] == 'wcs': yaxis += 'dz'
elif v[1] == 'dec': yaxis += '1'
elif v[1] == 'exp': yaxis += '2'
elif v[1] == 'auto': pass
else: _error('_translateaxis(): unknown format style: %s' %v[1])
elif k == 'gridx':
if v: xaxis += 'g'
elif k == 'gridy':
if v: yaxis += 'g'
elif k == 'logx':
if v: xaxis += 'l'
elif k == 'logy':
if v: yaxis += 'l'
elif k == 'majortickx':
if v: xaxis += 't'
elif k == 'majorticky':
if v: yaxis += 't'
elif k == 'number':
for side in v:
if side == 'bottom': xaxis += 'n'
elif side == 'top': xaxis += 'm'
elif side == 'left': yaxis += 'n'
elif side == 'right': yaxis += 'm'
else: _error('_translateaxis(): unknown side for number: %s' %side)
elif k == 'subtickx':
if v: xaxis += 's'
elif k == 'subticky':
if v: yaxis += 's'
elif k == 'tickstyle':
if len(v) != 2: _error('_translateaxis(): drawtick must have two values!')
if v[0] == 'inside': pass # the default
elif v[0] == 'outside': xaxis += 'i'
elif v[0] == 'both': xaxis += 'p'
else: _error('_translateaxis(): unknown tickstyle location: %s' %v[0])
if v[1] == 'inside': pass # the default
elif v[1] == 'outside': yaxis += 'i'
elif v[1] == 'both': yaxis += 'p'
else: _error('_translateaxis(): unknown tickstyle location: %s' %v[1])
elif k == 'verticaly':
if v: yaxis += 'v'
elif k == 'xinterval': pass
elif k == 'yinterval': pass
elif k == 'zerox':
if not v: xaxis += 'o'
elif k == 'zeroy':
if not v: yaxis += 'o'
if xaxis == '': xaxis = '0'
if yaxis == '': yaxis = '0'
return xaxis,yaxis
def _translatecolor(col):
'''Take useful color string and convert to wip format.
Note that for k and w, I assume you have changed your PGPLOT_BACKGROUND
and PGPLOT_FOREGROUND colors so that black and white are switched.'''
try:
return str(list(_colors).index(col))
except ValueError:
junk = str(col)
if junk.startswith('gray'):
try:
junk2 = int(junk[4:])
if junk2 not in range(1,101):
_error('_translatecolor(): Invalid gray index "%s"' %col)
junk2 = round((junk2-1)*2.4141 + 16) # interpolate to 16-255
return junk2
except ValueError:
_error('_translatecolor(): Invalid gray color name "%s"' %col)
else:
tmp = junk.split(',')
if len(tmp) == 3: # see if rgb color code
return 'rgb'
else:
_error('_translatecolor(): Invalid color name "%s"' %col)
def _translatecoords(text,coord):
'''Translate ra/dec coordinates into ones useful for WIP'''
if isinstance(text,str): # if a string, assume we have ra/dec coords
tmp = text.split(':')
mul = 3600.0
outval = 0
for x in tmp:
outval = outval + abs(mul*float(x))
mul = mul/60.0
if float(tmp[0]) < 0:
outval = -1*outval
if len(tmp) == 1: # didn't split by :, so assume user input degrees
if coord == 'ra':
outval = outval/15.0 # convert arcseconds to hour-seconds
return outval
else: # If user didn't give a string, assume coordinates are okay as-is
return text
def _translatefill(fill):
'''Take useful fill string and convert to wip format.
This is the type of fill string used for boxes. For filled symbols,
see below.'''
try:
return str(list(_fills).index(fill)+1)
except ValueError:
_error('_translatefill(): Invalid fill style %s. Try s,h,/, or #.' %fillstr)
def _translatefillsymbol(style):
'''Translate a symbol style into a fill style (later retranslated by
_translatesymbol). This is for filled symbols. For filling of boxes, see
above'''
if style == 'o': fillstyle = 'fo'
elif style == '^': fillstyle = 'f^'
elif style == 's': fillstyle = 'fs'
elif style == 'st': fillstyle = 'fst'
else: _error('_translatefillsymbol(): Only circles, triangles, squares, and five-point stars can have a fill color!')
if fillstyle in ['fo','fs']:
fillfactor = 1.4
elif fillstyle == 'fst':
fillfactor = 0.8
else:
fillfactor = 0.8
return fillstyle,fillfactor
def _translatefont(fontname):
'''Translate a useful font name into wip.'''
try:
return str(list(_fonts).index(fontname)+1)
except ValueError:
_error('_translatefont(): Invalid font %s. Try rm, it, sf, or cu!' %fontname)
def _translatelatex(latex):
'''Translate latex string into something usable by WIP.'''
greeklatex = (r'\alpha',r'\beta',r'\xi',r'\delta',r'\epsilon',r'\phi',
r'\gamma',r'\theta',r'\iota',r'\kappa',r'\lambda',r'\mu',r'\nu',r'\pi',
r'\psi',r'\rho',r'\sigma',r'\tau',r'\upsilon',r'\omega',r'\chi',r'\eta',
r'\zeta',r'\Xi',r'\Delta',r'\Phi',r'\Gamma',r'\Theta',r'\Lambda',r'\Pi',
r'\Psi',r'\Sigma',r'\Upsilon',r'\Omega')
wiplatex = (r'\ga',r'\gb',r'\gc',r'\gd',r'\ge',r'\gf',r'\gg',r'\gh',r'\gi',
r'\gk',r'\gl',r'\gm',r'\gn',r'\gp',r'\gq',r'\gr',r'\gs',r'\gt',r'\gu',
r'\gw',r'\gx',r'\gy',r'\gz',r'\gC',r'\gD',r'\gF',r'\gG',r'\gH',r'\gL',
r'\gP',r'\gQ',r'\gS',r'\gU',r'\gW')
stack = [] # keep track of super/subscript stuff
if _optionsobj.font == '1':
defaultfont = r'\fn'
elif _optionsobj.font == '2':
defaultfont = r'\fr'
elif _optionsobj.font == '3':
defaultfont = r'\fi'
elif _optionsobj.font == '4':
defaultfont = r'\fs'
else:
_error('_translatelatex(): Invalid default font: %s!' %defaultfont)
outstr = latex
for g,w in zip(greeklatex,wiplatex):
outstr = outstr.replace(g,w)
i = 0
outstr = outstr.replace(r'\times',r'\x')
outstr = outstr.replace(r'\AA','\A')
outstr = outstr.replace(r'\odot',r'\(2281)')
outstr = outstr.replace(r'\oplus',r'\(2284)')
outstr = outstr.replace(r'\pm',r'\(2233)')
outstr = outstr.replace(r'\geq',r'\(2244)')
outstr = outstr.replace(r'\leq',r'\(2243)')
outstr = outstr.replace(r'#',r'\(733)') #wip thinks pound signs are comments
outstr = outstr.replace(r'\circ',r'\(902)')
outstr = outstr.replace(r'\propto',r'\(2245)')
while i < len(outstr):
if outstr[i:i+2] == '^{':
outstr = outstr[:i] + r'\u' + outstr[i+2:]
i = i + 2
stack.append(r'\d')
elif outstr[i:i+2] == '_{':
outstr = outstr[:i] + r'\d' + outstr[i+2:]
i = i + 2
stack.append(r'\u')
elif outstr[i:i+4] == r'\sf{':
outstr = outstr[:i] + r'\fn' + outstr[i+4:]
i = i + 4
stack.append(defaultfont)
elif outstr[i:i+4] == r'\rm{':
outstr = outstr[:i] + r'\fr' + outstr[i+4:]
i = i + 4
stack.append(defaultfont)
elif outstr[i:i+4] == r'\it{':
outstr = outstr[:i] + r'\fi' + outstr[i+4:]
i = i + 4
stack.append(defaultfont)
elif outstr[i:i+4] == r'\cu{':
outstr = outstr[:i] + r'\fs' + outstr[i+4:]
i = i + 4
stack.append(defaultfont)
elif outstr[i:i+2] == r'\{':
outstr = outstr[:i] + '{' + outstr[i+2:]
i = i + 2
elif outstr[i:i+2] == '\}':
outstr = outstr[:i] + '}' + outstr[i+2:]
i = i + 2
elif outstr[i] == '}':
try:
char = stack.pop()
outstr = outstr[:i] + char + outstr[i+1:]
except IndexError: # emptystack
pass
i = i + 1
else:
i = i + 1
# fix bug where the carat, ^, doesn't render properly with WIP
outstr = outstr.replace(r'^',r'\(756)')
return outstr
def _translatelevels(levels,unit):
'''Translate levels which can be a list, tuple, string, or int into something
usable by wip'''
if isinstance(levels,str):
levs = []
blah = levels.split(':')
try:
blah2 = map(float,blah) # convert to floats
except ValueError:
_error('_translatelevels(): Specify levels values as numbers!')
if len(blah2) == 3:
if unit == 'step':
count = int((blah2[1] - blah2[0])/blah2[2])
elif unit == 'nbin':
count = blah2[2]
blah2[2] = (blah2[1] - blah2[0])/blah2[2] # set stepsize
else:
count = 39
blah2[2] = blah2[1]
if count > 39:
_error('_translatelevels(): You cannot plot more than 40 contours!')
elif count < 0:
_error('_translatelevels(): Number of contour levels is negative!')
levs = tuple(blah2[0] + n*blah2[2] for n in range(count+1))
else:
_error('_translatelevels(): Specify levels as val1:val2:val3 !')
return ' '.join(map(str,levs))
elif _isseq(levels):
if len(levels) > 40:
_error('_translatelevels(): You cannot plot more than 40 contours!')
return ' '.join(map(str,levels))
elif isinstance(levels,int):
return levels
else:
_error('_translatelevels(): You must give a list/tuple, string, or integer for the levels command!')
def _translatepalette(palette):
'''Translate a useful palette string to wip format.'''
palettestr = str(palette)
negFlag = 1 # set to one if we want the reverse palette
if palettestr[0] == '-':
negFlag = -1
palettestr = palettestr[1:]
if palettestr in _palettes:
return str(negFlag*(list(_palettes).index(palettestr)+1))
elif palettestr == 'lookup':
return 'lookup'
else:
if not os.path.exists(palettestr):
_error('_translatepalette(): Cannot find lookup table %s!' %palettestr)
return 'lookup'
def _translatelstyle(lstyle):
'''Translate a useful line style into wip.'''
try:
return str(list(_lstyles).index(lstyle)+1)
except ValueError:
_error('_translatelstyle(): Invalid line style %s. Try - , -- , .- , : , or -...' %lstyle)
def _translatesymbol(sym):
'''Take useful symbol string and convert to wip format.'''
symbol = str(sym) # ensure we have a string
if symbol == 's': return '0' # square
elif symbol == '.': return '1' # dot
elif symbol == '+': return '2' # plus sign
elif symbol == '*': return '3' # asterisks
elif symbol == 'o': return '4' # circle
elif symbol == 'x': return '5' # cross
elif symbol == '^': return '7' # triangle
elif symbol == 'oplus': return '8' # circle with plus sign
elif symbol == 'odot': return '9' # circle with dot
elif symbol == 'ps': return '10' # pointed square
elif symbol == 'd': return '11' # diamond
elif symbol == 'st': return '12' # five-point star
elif symbol == 'f^': return '13' # filled triangle
elif symbol == 'o+': return '14' # open plus symbol
elif symbol == 'david': return '15' # star of david
elif symbol == 'fs': return '16' # filled square
elif symbol == 'fo': return '17' # filled circle
elif symbol == 'fst': return '18' # filled five-point star
elif symbol == 'arrow': return '29' # an arrow, or \(29)
else: return '99'
def _vptoxy(fp,x,y,r1,r2):
'''Convert viewport x/y to physical x/y.
x/y - floats of x/y viewport values
r1,r2 - strings of register names to set holding values'''
fp.write(r'set %s ((x2 - x1) * (%s - vx1) / (vx2 - vx1)) + x1' %(r1,x))
fp.write('\n')
fp.write(r'set %s ((y2 - y1) * (%s - vy1) / (vy2 - vy1)) + y1' %(r2,y))
fp.write('\n')
def _warning(msg):
'''Print the warning message to standard error.'''
if msg[-1] == '\n':
sys.stderr.write('### PyWip Warning! %s' %msg)
else:
sys.stderr.write('### PyWip Warning! %s\n' %msg)
def _wipopen(funcname,keys,allowed):
'''Open the wip file for writing. If one does not already exist, start a
new one
funcname - a string with the name of the calling function
keys - a list of the keys given as variable arguments
allowed - a list of allowed keys'''
_checkallowed(funcname,keys,allowed)
global _wipfile,_optionsobj,_panelobj
if _wipfile == '???':
tempfile.tempdir = os.getcwd()
_wipfile = tempfile.mktemp(suffix='.wip')
_optionsobj = _options()
_panelobj = _panel()
fp = open(_wipfile,'w')
fp.write('set print ignore\n')
fp.write('set maxarray 1000000\n') #TODO: does this work?
fp.write('color %s\n' %_optionsobj.color)
fp.write('font %s\n' %_optionsobj.font)
fp.write('expand %s\n' %_optionsobj.size)
fp.write('lstyle %s\n' %_optionsobj.lstyle)
fp.write('lwidth %s\n' %_optionsobj.lwidth)
fp.write('bgci %s\n' %_optionsobj.bg)
fp.write('### Start %s()\n' %funcname)
else:
fp = open(_wipfile,'a')
fp.write('### Start %s()\n' %funcname)
return fp
def _xytovp(fp,x,y,r1,r2):
'''Convert x/y values to viewport values
x,y - x and y coordinates as floats
r1,r2 - strings of registers or variables to set holding values'''
fp.write(r'set %s ((vx2 - vx1) * (%s - x1) / (x2 - x1)) + vx1' %(r1,x))
fp.write('\n')
fp.write(r'set %s ((vy2 - vy1) * (%s - y1) / (y2 - y1)) + vy1' %(r2,y))
fp.write('\n')
|
|
"""Functions for creating or importing topologies for experiments.
To create a custom topology, create a function returning an instance of the
`IcnTopology` class. An IcnTopology is simply a subclass of a Topology class
provided by FNSS.
A valid ICN topology must have the following attributes:
* Each node must have one stack among: source, receiver, router
* The topology must have an attribute called `icr_candidates` which is a set
of router nodes on which a cache may be possibly deployed. Caches are not
deployed directly at topology creation, instead they are deployed by a
cache placement algorithm.
"""
from __future__ import division
from os import path
import networkx as nx
import fnss
from icarus.registry import register_topology_factory
__all__ = [
'IcnTopology',
'topology_tree',
'topology_path',
'topology_geant',
'topology_tiscali',
'topology_wide',
'topology_garr',
'topology_rocketfuel_latency'
]
# Delays
# These values are suggested by this Computer Networks 2011 paper:
# http://www.cs.ucla.edu/classes/winter09/cs217/2011CN_NameRouting.pdf
# which is citing as source of this data, measurements from this IMC'06 paper:
# http://www.mpi-sws.org/~druschel/publications/ds2-imc.pdf
INTERNAL_LINK_DELAY = 2
EXTERNAL_LINK_DELAY = 34
# Path where all topologies are stored
TOPOLOGY_RESOURCES_DIR = path.abspath(path.join(path.dirname(__file__),
path.pardir, path.pardir,
'resources', 'topologies'))
class IcnTopology(fnss.Topology):
"""Class modelling an ICN topology
An ICN topology is a simple FNSS Topology with addition methods that
return sets of caching nodes, sources and receivers.
"""
def cache_nodes(self):
"""Return a dictionary mapping nodes with a cache and respective cache
size
Returns
-------
cache_nodes : dict
Dictionary mapping node identifiers and cache size
"""
source_nodes = {v: self.node[v]['stack'][0]
for v in self
if 'stack' in self.node[v]
and 'source' in self.node[v]['stack'][0]
}
cache_nodes = {v: self.node[v]['stack'][1]['cache_size']
for v in self
if 'stack' in self.node[v]
and 'cache_size' in self.node[v]['stack'][1]
}
cache_or_source = dict(source_nodes, **cache_nodes)
for n in cache_or_source:
print n
return cache_or_source
def sources(self):
"""Return a set of source nodes
Returns
-------
sources : set
Set of source nodes
"""
return set(v for v in self
if 'stack' in self.node[v]
and self.node[v]['stack'][0] == 'source')
def receivers(self):
"""Return a set of receiver nodes
Returns
-------
receivers : set
Set of receiver nodes
"""
return set(v for v in self
if 'stack' in self.node[v]
and self.node[v]['stack'][0] == 'receiver')
@register_topology_factory('TREE')
def topology_tree(k, h, delay=1, **kwargs):
"""Returns a tree topology, with a source at the root, receivers at the
leafs and caches at all intermediate nodes.
Parameters
----------
h : height
The height of the tree
k : branching factor
The branching factor of the tree
delay : float
The link delay in milliseconds
Returns
-------
topology : IcnTopology
The topology object
"""
topology = fnss.k_ary_tree_topology(k, h)
receivers = [v for v in topology.nodes_iter()
if topology.node[v]['depth'] == h]
sources = [v for v in topology.nodes_iter()
if topology.node[v]['depth'] == 0]
routers = [v for v in topology.nodes_iter()
if topology.node[v]['depth'] > 0
and topology.node[v]['depth'] < h]
topology.graph['icr_candidates'] = set(routers)
for v in sources:
fnss.add_stack(topology, v, 'source')
for v in receivers:
fnss.add_stack(topology, v, 'receiver')
for v in routers:
fnss.add_stack(topology, v, 'router')
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, delay, 'ms')
# label links as internal
for u, v in topology.edges_iter():
topology.edge[u][v]['type'] = 'internal'
return IcnTopology(topology)
@register_topology_factory('PATH')
def topology_path(n, delay=1, **kwargs):
"""Return a path topology with a receiver on node `0` and a source at node
'n-1'
Parameters
----------
n : int (>=3)
The number of nodes
delay : float
The link delay in milliseconds
Returns
-------
topology : IcnTopology
The topology object
"""
topology = fnss.line_topology(n)
receivers = [0]
routers = range(1, n-1)
sources = [n-1]
topology.graph['icr_candidates'] = set(routers)
for v in sources:
fnss.add_stack(topology, v, 'source')
for v in receivers:
fnss.add_stack(topology, v, 'receiver')
for v in routers:
fnss.add_stack(topology, v, 'router')
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, delay, 'ms')
# label links as internal or external
for u, v in topology.edges_iter():
topology.edge[u][v]['type'] = 'internal'
return IcnTopology(topology)
@register_topology_factory('GEANT')
def topology_geant(**kwargs):
"""Return a scenario based on GEANT topology
Parameters
----------
seed : int, optional
The seed used for random number generation
Returns
-------
topology : fnss.Topology
The topology object
"""
# 240 nodes in the main component
topology = fnss.parse_topology_zoo(path.join(TOPOLOGY_RESOURCES_DIR,
'Geant2012.graphml')
).to_undirected()
topology = list(nx.connected_component_subgraphs(topology))[0]
deg = nx.degree(topology)
receivers = [v for v in topology.nodes() if deg[v] == 1] # 8 nodes
icr_candidates = [v for v in topology.nodes() if deg[v] > 2] # 19 nodes
# attach sources to topology
source_attachments = [v for v in topology.nodes() if deg[v] == 2] # 13 nodes
sources = []
for v in source_attachments:
u = v + 1000 # node ID of source
topology.add_edge(v, u)
sources.append(u)
routers = [v for v in topology.nodes() if v not in sources + receivers]
# add stacks to nodes
topology.graph['icr_candidates'] = set(icr_candidates)
for v in sources:
fnss.add_stack(topology, v, 'source')
for v in receivers:
fnss.add_stack(topology, v, 'receiver')
for v in routers:
fnss.add_stack(topology, v, 'router')
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms')
# label links as internal or external
for u, v in topology.edges_iter():
if u in sources or v in sources:
topology.edge[u][v]['type'] = 'external'
# this prevents sources to be used to route traffic
fnss.set_weights_constant(topology, 1000.0, [(u, v)])
fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms', [(u, v)])
else:
topology.edge[u][v]['type'] = 'internal'
return IcnTopology(topology)
@register_topology_factory('TISCALI')
def topology_tiscali(**kwargs):
"""Return a scenario based on Tiscali topology, parsed from RocketFuel dataset
Parameters
----------
seed : int, optional
The seed used for random number generation
Returns
-------
topology : fnss.Topology
The topology object
"""
# 240 nodes in the main component
topology = fnss.parse_rocketfuel_isp_map(path.join(TOPOLOGY_RESOURCES_DIR,
'3257.r0.cch')
).to_undirected()
topology = list(nx.connected_component_subgraphs(topology))[0]
# degree of nodes
deg = nx.degree(topology)
# nodes with degree = 1
onedeg = [v for v in topology.nodes() if deg[v] == 1] # they are 80
# we select as caches nodes with highest degrees
# we use as min degree 6 --> 36 nodes
# If we changed min degrees, that would be the number of caches we would have:
# Min degree N caches
# 2 160
# 3 102
# 4 75
# 5 50
# 6 36
# 7 30
# 8 26
# 9 19
# 10 16
# 11 12
# 12 11
# 13 7
# 14 3
# 15 3
# 16 2
icr_candidates = [v for v in topology.nodes() if deg[v] >= 6] # 36 nodes
# sources are node with degree 1 whose neighbor has degree at least equal to 5
# we assume that sources are nodes connected to a hub
# they are 44
sources = [v for v in onedeg if deg[list(topology.edge[v].keys())[0]] > 4.5] # they are
# receivers are node with degree 1 whose neighbor has degree at most equal to 4
# we assume that receivers are nodes not well connected to the network
# they are 36
receivers = [v for v in onedeg if deg[list(topology.edge[v].keys())[0]] < 4.5]
# we set router stacks because some strategies will fail if no stacks
# are deployed
routers = [v for v in topology.nodes() if v not in sources + receivers]
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms')
# Deploy stacks
topology.graph['icr_candidates'] = set(icr_candidates)
for v in sources:
fnss.add_stack(topology, v, 'source')
for v in receivers:
fnss.add_stack(topology, v, 'receiver')
for v in routers:
fnss.add_stack(topology, v, 'router')
# label links as internal or external
for u, v in topology.edges():
if u in sources or v in sources:
topology.edge[u][v]['type'] = 'external'
# this prevents sources to be used to route traffic
fnss.set_weights_constant(topology, 1000.0, [(u, v)])
fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms', [(u, v)])
else:
topology.edge[u][v]['type'] = 'internal'
return IcnTopology(topology)
@register_topology_factory('WIDE')
def topology_wide(**kwargs):
"""Return a scenario based on GARR topology
Parameters
----------
seed : int, optional
The seed used for random number generation
Returns
-------
topology : fnss.Topology
The topology object
"""
topology = fnss.parse_topology_zoo(path.join(TOPOLOGY_RESOURCES_DIR, 'WideJpn.graphml')).to_undirected()
# sources are nodes representing neighbouring AS's
sources = [9, 8, 11, 13, 12, 15, 14, 17, 16, 19, 18]
# receivers are internal nodes with degree = 1
receivers = [27, 28, 3, 5, 4, 7]
# caches are all remaining nodes --> 27 caches
routers = [n for n in topology.nodes() if n not in receivers + sources]
# All routers can be upgraded to ICN functionalitirs
icr_candidates = routers
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms')
# Deploy stacks
topology.graph['icr_candidates'] = set(icr_candidates)
for v in sources:
fnss.add_stack(topology, v, 'source')
for v in receivers:
fnss.add_stack(topology, v, 'receiver')
for v in routers:
fnss.add_stack(topology, v, 'router')
# label links as internal or external
for u, v in topology.edges():
if u in sources or v in sources:
topology.edge[u][v]['type'] = 'external'
# this prevents sources to be used to route traffic
fnss.set_weights_constant(topology, 1000.0, [(u, v)])
fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms',[(u, v)])
else:
topology.edge[u][v]['type'] = 'internal'
return IcnTopology(topology)
@register_topology_factory('GARR')
def topology_garr(**kwargs):
"""Return a scenario based on GARR topology
Parameters
----------
seed : int, optional
The seed used for random number generation
Returns
-------
topology : fnss.Topology
The topology object
"""
topology = fnss.parse_topology_zoo(path.join(TOPOLOGY_RESOURCES_DIR, 'Garr201201.graphml')).to_undirected()
# sources are nodes representing neighbouring AS's
sources = [0, 2, 3, 5, 13, 16, 23, 24, 25, 27, 51, 52, 54]
# receivers are internal nodes with degree = 1
receivers = [1, 7, 8, 9, 11, 12, 19, 26, 28, 30, 32, 33, 41, 42, 43, 47, 48, 50, 53, 57, 60]
# caches are all remaining nodes --> 27 caches
routers = [n for n in topology.nodes() if n not in receivers + sources]
icr_candidates = routers
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms')
# Deploy stacks
topology.graph['icr_candidates'] = set(icr_candidates)
for v in sources:
fnss.add_stack(topology, v, 'source')
for v in receivers:
fnss.add_stack(topology, v, 'receiver')
for v in routers:
fnss.add_stack(topology, v, 'router')
# label links as internal or external
for u, v in topology.edges():
if u in sources or v in sources:
topology.edge[u][v]['type'] = 'external'
# this prevents sources to be used to route traffic
fnss.set_weights_constant(topology, 1000.0, [(u, v)])
fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms',[(u, v)])
else:
topology.edge[u][v]['type'] = 'internal'
return IcnTopology(topology)
@register_topology_factory('GARR_2')
def topology_garr2(**kwargs):
"""Return a scenario based on GARR topology.
Differently from plain GARR, this topology some receivers are appended to
routers and only a subset of routers which are actually on the path of some
traffic are selected to become ICN routers. These changes make this
topology more realistic.
Parameters
----------
seed : int, optional
The seed used for random number generation
Returns
-------
topology : fnss.Topology
The topology object
"""
topology = fnss.parse_topology_zoo(path.join(TOPOLOGY_RESOURCES_DIR, 'Garr201201.graphml')).to_undirected()
# sources are nodes representing neighbouring AS's
sources = [0, 2, 3, 5, 13, 16, 23, 24, 25, 27, 51, 52, 54]
# receivers are internal nodes with degree = 1
receivers = [1, 7, 8, 9, 11, 12, 19, 26, 28, 30, 32, 33, 41, 42, 43, 47, 48, 50, 53, 57, 60]
# routers are all remaining nodes --> 27 caches
routers = [n for n in topology.nodes_iter() if n not in receivers + sources]
artificial_receivers = list(range(1000, 1000 + len(routers)))
for i in range(len(routers)):
topology.add_edge(routers[i], artificial_receivers[i])
receivers += artificial_receivers
# Caches to nodes with degree > 3 (after adding artificial receivers)
degree = nx.degree(topology)
icr_candidates = [n for n in topology.nodes_iter() if degree[n] > 3.5]
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms')
# Deploy stacks
topology.graph['icr_candidates'] = set(icr_candidates)
for v in sources:
fnss.add_stack(topology, v, 'source')
for v in receivers:
fnss.add_stack(topology, v, 'receiver')
for v in routers:
fnss.add_stack(topology, v, 'router')
# label links as internal or external
for u, v in topology.edges():
if u in sources or v in sources:
topology.edge[u][v]['type'] = 'external'
# this prevents sources to be used to route traffic
fnss.set_weights_constant(topology, 1000.0, [(u, v)])
fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms',[(u, v)])
else:
topology.edge[u][v]['type'] = 'internal'
return IcnTopology(topology)
@register_topology_factory('GEANT_2')
def topology_geant2(**kwargs):
"""Return a scenario based on GEANT topology.
Differently from plain GEANT, this topology some receivers are appended to
routers and only a subset of routers which are actually on the path of some
traffic are selected to become ICN routers. These changes make this
topology more realistic.
Parameters
----------
seed : int, optional
The seed used for random number generation
Returns
-------
topology : fnss.Topology
The topology object
"""
# 53 nodes
topology = fnss.parse_topology_zoo(path.join(TOPOLOGY_RESOURCES_DIR,
'Geant2012.graphml')
).to_undirected()
topology = list(nx.connected_component_subgraphs(topology))[0]
deg = nx.degree(topology)
receivers = [v for v in topology.nodes() if deg[v] == 1] # 8 nodes
# attach sources to topology
source_attachments = [v for v in topology.nodes() if deg[v] == 2] # 13 nodes
sources = []
for v in source_attachments:
u = v + 1000 # node ID of source
topology.add_edge(v, u)
sources.append(u)
routers = [v for v in topology.nodes() if v not in sources + receivers]
# Put caches in nodes with top betweenness centralities
betw = nx.betweenness_centrality(topology)
routers = sorted(routers, key=lambda k: betw[k])
# Select as ICR candidates the top 50% routers for betweenness centrality
icr_candidates = routers[len(routers)//2:]
# add stacks to nodes
topology.graph['icr_candidates'] = set(icr_candidates)
for v in sources:
fnss.add_stack(topology, v, 'source')
for v in receivers:
fnss.add_stack(topology, v, 'receiver')
for v in routers:
fnss.add_stack(topology, v, 'router')
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms')
# label links as internal or external
for u, v in topology.edges_iter():
if u in sources or v in sources:
topology.edge[u][v]['type'] = 'external'
# this prevents sources to be used to route traffic
fnss.set_weights_constant(topology, 1000.0, [(u, v)])
fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms', [(u, v)])
else:
topology.edge[u][v]['type'] = 'internal'
return IcnTopology(topology)
@register_topology_factory('TISCALI_2')
def topology_tiscali2(**kwargs):
"""Return a scenario based on Tiscali topology, parsed from RocketFuel dataset
Differently from plain Tiscali, this topology some receivers are appended to
routers and only a subset of routers which are actually on the path of some
traffic are selected to become ICN routers. These changes make this
topology more realistic.
Parameters
----------
seed : int, optional
The seed used for random number generation
Returns
-------
topology : fnss.Topology
The topology object
"""
# 240 nodes in the main component
topology = fnss.parse_rocketfuel_isp_map(path.join(TOPOLOGY_RESOURCES_DIR,
'3257.r0.cch')
).to_undirected()
topology = list(nx.connected_component_subgraphs(topology))[0]
# degree of nodes
deg = nx.degree(topology)
# nodes with degree = 1
onedeg = [v for v in topology.nodes() if deg[v] == 1] # they are 80
# we select as caches nodes with highest degrees
# we use as min degree 6 --> 36 nodes
# If we changed min degrees, that would be the number of caches we would have:
# Min degree N caches
# 2 160
# 3 102
# 4 75
# 5 50
# 6 36
# 7 30
# 8 26
# 9 19
# 10 16
# 11 12
# 12 11
# 13 7
# 14 3
# 15 3
# 16 2
icr_candidates = [v for v in topology.nodes() if deg[v] >= 6] # 36 nodes
# Add remove caches to adapt betweenness centrality of caches
for i in [181, 208, 211, 220, 222, 250, 257]:
icr_candidates.remove(i)
icr_candidates.extend([232, 303, 326, 363, 378])
# sources are node with degree 1 whose neighbor has degree at least equal to 5
# we assume that sources are nodes connected to a hub
# they are 44
sources = [v for v in onedeg if deg[list(topology.edge[v].keys())[0]] > 4.5] # they are
# receivers are node with degree 1 whose neighbor has degree at most equal to 4
# we assume that receivers are nodes not well connected to the network
# they are 36
receivers = [v for v in onedeg if deg[list(topology.edge[v].keys())[0]] < 4.5]
# we set router stacks because some strategies will fail if no stacks
# are deployed
routers = [v for v in topology.nodes() if v not in sources + receivers]
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms')
# deploy stacks
topology.graph['icr_candidates'] = set(icr_candidates)
for v in sources:
fnss.add_stack(topology, v, 'source')
for v in receivers:
fnss.add_stack(topology, v, 'receiver')
for v in routers:
fnss.add_stack(topology, v, 'router')
# label links as internal or external
for u, v in topology.edges():
if u in sources or v in sources:
topology.edge[u][v]['type'] = 'external'
# this prevents sources to be used to route traffic
fnss.set_weights_constant(topology, 1000.0, [(u, v)])
fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms', [(u, v)])
else:
topology.edge[u][v]['type'] = 'internal'
return IcnTopology(topology)
@register_topology_factory('ROCKET_FUEL')
def topology_rocketfuel_latency(asn, source_ratio=0.1, ext_delay=EXTERNAL_LINK_DELAY, **kwargs):
"""Parse a generic RocketFuel topology with annotated latencies
To each node of the parsed topology it is attached an artificial receiver
node. To the routers with highest degree it is also attached a source node.
Parameters
----------
asn : int
AS number
source_ratio : float
Ratio between number of source nodes (artificially attached) and routers
ext_delay : float
Delay on external nodes
"""
if source_ratio < 0 or source_ratio > 1:
raise ValueError('source_ratio must be comprised between 0 and 1')
f_topo = path.join(TOPOLOGY_RESOURCES_DIR, 'rocketfuel-latency', str(asn), 'latencies.intra')
topology = fnss.parse_rocketfuel_isp_latency(f_topo).to_undirected()
topology = list(nx.connected_component_subgraphs(topology))[0]
# First mark all current links as inernal
for u,v in topology.edges_iter():
topology.edge[u][v]['type'] = 'internal'
# Note: I don't need to filter out nodes with degree 1 cause they all have
# a greater degree value but we compute degree to decide where to attach sources
routers = topology.nodes()
# Source attachment
n_sources = int(source_ratio*len(routers))
sources = ['src_%d' % i for i in range(n_sources)]
deg = nx.degree(topology)
# Attach sources based on their degree purely, but they may end up quite clustered
routers = sorted(routers, key=lambda k: deg[k], reverse=True)
for i in range(len(sources)):
topology.add_edge(sources[i], routers[i], delay=ext_delay, type='external')
# Here let's try attach them via cluster
# clusters = compute_clusters(topology, n_sources, distance=None, n_iter=1000)
# source_attachments = [max(cluster, key=lambda k: deg[k]) for cluster in clusters]
# for i in range(len(sources)):
# topology.add_edge(sources[i], source_attachments[i], delay=ext_delay, type='external')
# attach artificial receiver nodes to ICR candidates
receivers = ['rec_%d' % i for i in range(len(routers))]
for i in range(len(routers)):
topology.add_edge(receivers[i], routers[i], delay=0, type='internal')
# Set weights to latency values
for u, v in topology.edges_iter():
topology.edge[u][v]['weight'] = topology.edge[u][v]['delay']
# Deploy stacks on nodes
topology.graph['icr_candidates'] = set(routers)
for v in sources:
fnss.add_stack(topology, v, 'source')
for v in receivers:
fnss.add_stack(topology, v, 'receiver')
for v in routers:
fnss.add_stack(topology, v, 'router')
return IcnTopology(topology)
@register_topology_factory('SIX_NODE')
def topology_fiveNode(**kwargs):
"""Return a scenario based on Five_Node topology.
This functions the similar as the GEANT topology but with only 5 nodes
All routers are given caches
Sources are added on initilization in addition to the main network to all
nodes with 2 connections
Parameters
----------
seed : int, optional
The seed used for random number generation
Returns
-------
topology : fnss.Topology
The topology object
"""
# 5 nodes
topology = fnss.parse_topology_zoo(path.join(TOPOLOGY_RESOURCES_DIR,
'SixNode.graphml')
).to_undirected()
topology = list(nx.connected_component_subgraphs(topology))[0]
deg = nx.degree(topology)
receivers = [v for v in topology.nodes() if deg[v] == 1] # 8 nodes
# attach sources to topology
source_attachments = [v for v in topology.nodes() if deg[v] == 2] # 13 nodes
sources = []
for v in source_attachments:
u = v + 1000 # node ID of source
topology.add_edge(v, u)
sources.append(u)
routers = [v for v in topology.nodes() if v not in sources + receivers]
# Put caches in nodes with top betweenness centralities
betw = nx.betweenness_centrality(topology)
routers = sorted(routers, key=lambda k: betw[k])
# Select as ICR candidates all routers
icr_candidates = routers
# add stacks to nodes
topology.graph['icr_candidates'] = set(icr_candidates)
for v in sources:
fnss.add_stack(topology, v, 'source')
for v in receivers:
fnss.add_stack(topology, v, 'receiver')
for v in routers:
fnss.add_stack(topology, v, 'router')
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms')
# label links as internal or external
for u, v in topology.edges_iter():
if u in sources or v in sources:
topology.edge[u][v]['type'] = 'external'
# this prevents sources to be used to route traffic
fnss.set_weights_constant(topology, 1000.0, [(u, v)])
fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms', [(u, v)])
else:
topology.edge[u][v]['type'] = 'internal'
return IcnTopology(topology)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from google.protobuf import text_format
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_state_pb2
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
def _build_estimator_for_resource_export_test():
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
def resource_constant_model_fn(unused_features, unused_labels, mode):
"""A model_fn that loads a constant from a resource and serves it."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
const = constant_op.constant(-1, dtype=dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableModel')
update_global_step = variables.get_global_step().assign_add(1)
if mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL):
key = constant_op.constant(['key'])
value = constant_op.constant([42], dtype=dtypes.int64)
train_op_1 = table.insert(key, value)
training_state = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableTrainingState')
training_op_2 = training_state.insert(key, value)
return (const, const,
control_flow_ops.group(train_op_1, training_op_2,
update_global_step))
if mode == model_fn.ModeKeys.INFER:
key = constant_op.constant(['key'])
prediction = table.lookup(key)
return prediction, const, update_global_step
est = estimator.Estimator(model_fn=resource_constant_model_fn)
est.fit(input_fn=_input_fn, steps=1)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
return est, serving_input_fn
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
def _model_fn_ops(
expected_features, expected_labels, actual_features, actual_labels, mode):
assert_ops = tuple([
check_ops.assert_equal(
expected_features[k], actual_features[k], name='assert_%s' % k)
for k in expected_features
] + [
check_ops.assert_equal(
expected_labels, actual_labels, name='assert_labels')
])
with ops.control_dependencies(assert_ops):
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=variables.get_global_step().assign_add(1))
def _make_input_fn(features, labels):
def _input_fn():
return {
k: constant_op.constant(v)
for k, v in six.iteritems(features)
}, constant_op.constant(labels)
return _input_fn
class EstimatorModelFnTest(test.TestCase):
def testModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, mode, params, config):
model_fn_call_count[0] += 1
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
est = estimator.Estimator(
model_fn=_model_fn, params=expected_params, config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testPartialModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
expected_foo = 45.
expected_bar = 46.
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, foo, mode, params, config, bar):
model_fn_call_count[0] += 1
self.assertEqual(expected_foo, foo)
self.assertEqual(expected_bar, bar)
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
partial_model_fn = functools.partial(
_model_fn, foo=expected_foo, bar=expected_bar)
est = estimator.Estimator(
model_fn=partial_model_fn, params=expected_params,
config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features, labels, mode, params, config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return (constant_op.constant(0.), constant_op.constant(0.),
variables.get_global_step().assign_add(1))
est = estimator.Estimator(model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
update_global_step = variables.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing train_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = variables.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = variables.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(
boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffoldInTraining(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=variables.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testModelFnScaffoldSaverUsage(self):
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables_lib.Variable(1., 'weight')
real_saver = saver_lib.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=variables.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(saver=self.mock_saver))
def input_fn():
return {
'x': constant_op.constant([[1.]]),
}, constant_op.constant([[1.]])
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.save.called)
est.evaluate(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.restore.called)
est.predict(input_fn=input_fn)
self.assertTrue(self.mock_saver.restore.called)
def serving_input_fn():
serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
shape=[None],
name='input_example_tensor')
features, labels = input_fn()
return input_fn_utils.InputFnOps(
features, labels, {'examples': serialized_tf_example})
est.export_savedmodel(est.model_dir + '/export', serving_input_fn)
self.assertTrue(self.mock_saver.restore.called)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaisesRegexp(
ValueError,
'model_dir are set both in constructor and RunConfig, '
'but with different'):
estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='different_dir')
def testModelDirIsCopiedToRunConfig(self):
config = run_config.RunConfig()
self.assertIsNone(config.model_dir)
est = estimator.Estimator(model_fn=linear_model_fn,
model_dir='test_dir',
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAsTempDir(self):
with test.mock.patch.object(tempfile, 'mkdtemp', return_value='temp_dir'):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertEqual('temp_dir', est.config.model_dir)
self.assertEqual('temp_dir', est.model_dir)
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def test_checkpoint_contains_relative_paths(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(
model_dir=tmpdir,
model_fn=linear_model_fn_with_model_fn_ops)
est.fit(input_fn=boston_input_fn, steps=5)
checkpoint_file_content = file_io.read_file_to_string(
os.path.join(tmpdir, 'checkpoint'))
ckpt = checkpoint_state_pb2.CheckpointState()
text_format.Merge(checkpoint_file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, 'model.ckpt-5')
self.assertAllEqual(
['model.ckpt-1', 'model.ckpt-5'], ckpt.all_model_checkpoint_paths)
def test_train_save_copy_reload(self):
tmpdir = tempfile.mkdtemp()
model_dir1 = os.path.join(tmpdir, 'model_dir1')
est1 = estimator.Estimator(
model_dir=model_dir1,
model_fn=linear_model_fn_with_model_fn_ops)
est1.fit(input_fn=boston_input_fn, steps=5)
model_dir2 = os.path.join(tmpdir, 'model_dir2')
os.renames(model_dir1, model_dir2)
est2 = estimator.Estimator(
model_dir=model_dir2,
model_fn=linear_model_fn_with_model_fn_ops)
self.assertEqual(5, est2.get_variable_value('global_step'))
est2.fit(input_fn=boston_input_fn, steps=5)
self.assertEqual(10, est2.get_variable_value('global_step'))
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={'learning_rate': 0.01}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
self.assertSameElements(
['bogus_lookup', 'feature'],
graph.get_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS))
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_resource(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_resource_export_test()
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base, serving_input_fn)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('LookupTableModel' in graph_ops)
self.assertFalse('LookupTableTrainingState' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_graph_transforms(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra,
graph_rewrite_specs=[
estimator.GraphRewriteSpec(['tag_1'], []),
estimator.GraphRewriteSpec(['tag_2', 'tag_3'],
['strip_unused_nodes'])])
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
# tag_1 is untransformed.
tags = ['tag_1']
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, tags, export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# Since there were no transforms, both save ops are still present.
self.assertTrue('save/SaveV2/tensor_names' in graph_ops)
self.assertTrue('save_1/SaveV2/tensor_names' in graph_ops)
# Since there were no transforms, the hash table lookup is still there.
self.assertTrue('hash_table_Lookup' in graph_ops)
# Restore, to validate that the export was well-formed.
# tag_2, tag_3 was subjected to strip_unused_nodes.
tags = ['tag_2', 'tag_3']
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, tags, export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# The Saver used to restore the checkpoint into the export Session
# was not added to the SAVERS collection, so strip_unused_nodes removes
# it. The one explicitly created in export_savedmodel is tracked in
# the MetaGraphDef saver_def field, so that one is retained.
# TODO(soergel): Make Savers sane again. I understand this is all a bit
# nuts but for now the test demonstrates what actually happens.
self.assertFalse('save/SaveV2/tensor_names' in graph_ops)
self.assertTrue('save_1/SaveV2/tensor_names' in graph_ops)
# The fake hash table lookup wasn't connected to anything; stripped.
self.assertFalse('hash_table_Lookup' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual(
{
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
},
feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool),
None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i
for i in xrange(8)]
for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
|
|
from random import choice
from django.db import connection
from django.db.models import get_model
from django.test import TestCase
from django.conf import settings
from django.contrib.auth.models import User, AnonymousUser, Group
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.template.loader import Template, Context
from actstream.models import Action, Follow, model_stream, user_stream,\
setup_generic_relations, following, followers
from actstream.actions import follow, unfollow
from actstream.exceptions import ModelNotActionable
from actstream.signals import action
from actstream.settings import get_models, SETTINGS
class LTE(int):
def __new__(cls, n):
obj = super(LTE, cls).__new__(cls, n)
obj.n = n
return obj
def __eq__(self, other):
return other <= self.n
def __repr__(self):
return "<= %s" % self.n
class ActivityBaseTestCase(TestCase):
actstream_models = ()
def setUp(self):
self.old_models = get_models()
SETTINGS['MODELS'] = {}
for model in self.actstream_models:
SETTINGS['MODELS'][model.lower()] = get_model(*model.split('.'))
setup_generic_relations()
def tearDown(self):
SETTINGS['MODELS'] = self.old_models
class ActivityTestCase(ActivityBaseTestCase):
urls = 'actstream.urls'
actstream_models = ('auth.User', 'auth.Group', 'sites.Site')
def setUp(self):
super(ActivityTestCase, self).setUp()
self.group = Group.objects.create(name='CoolGroup')
self.user1 = User.objects.get_or_create(username='admin')[0]
self.user1.set_password('admin')
self.user1.is_superuser = self.user1.is_staff = True
self.user1.save()
self.user2 = User.objects.get_or_create(username='Two')[0]
# User1 joins group
self.user1.groups.add(self.group)
action.send(self.user1, verb='joined', target=self.group)
# User1 follows User2
follow(self.user1, self.user2)
# User2 joins group
self.user2.groups.add(self.group)
action.send(self.user2, verb='joined', target=self.group)
# User2 follows group
follow(self.user2, self.group)
# User1 comments on group
# Use a site object here and predict the "__unicode__ method output"
action.send(self.user1, verb='commented on', target=self.group)
self.comment = Site.objects.create(
domain="admin: Sweet Group!...")
# Group responds to comment
action.send(self.group, verb='responded to', target=self.comment)
def test_aauser1(self):
self.assertEqual(map(unicode, self.user1.actor_actions.all()), [
u'admin commented on CoolGroup 0 minutes ago',
u'admin started following Two 0 minutes ago',
u'admin joined CoolGroup 0 minutes ago',
])
def test_user2(self):
self.assertEqual(map(unicode, Action.objects.actor(self.user2)), [
u'Two started following CoolGroup 0 minutes ago',
u'Two joined CoolGroup 0 minutes ago',
])
def test_group(self):
self.assertEqual(map(unicode, Action.objects.actor(self.group)),
[u'CoolGroup responded to admin: Sweet Group!... 0 minutes ago'])
def test_following(self):
self.assertEqual(list(following(self.user1)), [self.user2])
self.assertEqual(len(following(self.user2, User)), 0)
def test_followers(self):
self.assertEqual(list(followers(self.group)), [self.user2])
def test_empty_follow_stream(self):
unfollow(self.user1, self.user2)
self.assert_(not user_stream(self.user1))
def test_stream(self):
self.assertEqual(map(unicode, Action.objects.user(self.user1)), [
u'Two started following CoolGroup 0 minutes ago',
u'Two joined CoolGroup 0 minutes ago',
])
self.assertEqual(map(unicode, Action.objects.user(self.user2)),
[u'CoolGroup responded to admin: Sweet Group!... 0 minutes ago'])
def test_stream_stale_follows(self):
"""
Action.objects.user() should ignore Follow objects with stale actor
references.
"""
self.user2.delete()
self.assert_(not 'Two' in str(Action.objects.user(self.user1)))
def test_rss(self):
rss = self.client.get('/feed/').content
self.assert_(rss.startswith('<?xml version="1.0" encoding="utf-8"?>\n'
'<rss xmlns:atom="http://www.w3.org/2005/Atom" version="2.0">'))
self.assert_(rss.find('Activity feed for your followed actors') > -1)
def test_atom(self):
atom = self.client.get('/feed/atom/').content
self.assert_(atom.startswith('<?xml version="1.0" encoding="utf-8"?>\n'
'<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="%s">' %
settings.LANGUAGE_CODE))
self.assert_(atom.find('Activity feed for your followed actors') > -1)
def test_action_object(self):
action.send(self.user1, verb='created comment',
action_object=self.comment, target=self.group)
created_action = Action.objects.get(verb='created comment')
self.assertEqual(created_action.actor, self.user1)
self.assertEqual(created_action.action_object, self.comment)
self.assertEqual(created_action.target, self.group)
self.assertEqual(unicode(created_action),
u'admin created comment admin: Sweet Group!... on CoolGroup 0 '
'minutes ago')
def test_doesnt_generate_duplicate_follow_records(self):
g = Group.objects.get_or_create(name='DupGroup')[0]
s = User.objects.get_or_create(username='dupuser')[0]
f1 = follow(s, g)
self.assertTrue(f1 is not None, "Should have received a new follow "
"record")
self.assertTrue(isinstance(f1, Follow), "Returns a Follow object")
self.assertEquals(1, Follow.objects.filter(user=s, object_id=g.pk,
content_type=ContentType.objects.get_for_model(g)).count(),
"Should only have 1 follow record here")
f2 = follow(s, g)
self.assertEquals(1, Follow.objects.filter(user=s, object_id=g.pk,
content_type=ContentType.objects.get_for_model(g)).count(),
"Should still only have 1 follow record here")
self.assertTrue(f2 is not None, "Should have received a Follow object")
self.assertTrue(isinstance(f2, Follow), "Returns a Follow object")
self.assertEquals(f1, f2, "Should have received the same Follow "
"object that I first submitted")
def test_y_no_orphaned_follows(self):
follows = Follow.objects.count()
self.user2.delete()
self.assertEqual(follows - 1, Follow.objects.count())
def test_z_no_orphaned_actions(self):
actions = self.user1.actor_actions.count()
self.user2.delete()
self.assertEqual(actions - 1, self.user1.actor_actions.count())
def test_generic_relation_accessors(self):
self.assertEqual(self.user2.actor_actions.count(), 2)
self.assertEqual(self.user2.target_actions.count(), 1)
self.assertEqual(self.user2.action_object_actions.count(), 0)
def test_bad_actionable_model(self):
self.assertRaises(ModelNotActionable, follow, self.user1,
ContentType.objects.get_for_model(self.user1))
def test_hidden_action(self):
action = self.user1.actor_actions.all()[0]
action.public = False
action.save()
self.assert_(not action in self.user1.actor_actions.public())
def test_tag_follow_url(self):
src = '{% load activity_tags %}{% follow_activity_url user %}'
output = Template(src).render(Context({'user': self.user1}))
ct = ContentType.objects.get_for_model(User)
self.assertEqual(output, '/follow/%s/%s/' % (ct.pk, self.user1.pk))
def test_model_actions_with_kwargs(self):
"""
Testing the model_actions method of the ActionManager
by passing kwargs
"""
self.assertEqual(map(unicode, model_stream(self.user1, verb='commented on')), [
u'admin commented on CoolGroup 0 minutes ago',
])
def test_user_stream_with_kwargs(self):
"""
Testing the user method of the ActionManager by passing additional
filters in kwargs
"""
self.assertEqual(map(unicode, Action.objects.user(self.user1, verb='joined')), [
u'Two joined CoolGroup 0 minutes ago',
])
def test_is_following_filter(self):
src = '{% load activity_tags %}{% if user|is_following:group %}yup{% endif %}'
self.assertEqual(Template(src).render(Context({
'user': self.user2, 'group': self.group
})), u'yup')
self.assertEqual(Template(src).render(Context({
'user': self.user1, 'group': self.group
})), u'')
class ZombieTest(ActivityBaseTestCase):
actstream_models = ('auth.User',)
human = 10
zombie = 1
def setUp(self):
super(ZombieTest, self).setUp()
settings.DEBUG = True
player_generator = lambda n, count: [User.objects.create(
username='%s%d' % (n, i)) for i in range(count)]
self.humans = player_generator('human', self.human)
self.zombies = player_generator('zombie', self.zombie)
self.zombie_apocalypse()
def tearDown(self):
settings.DEBUG = False
super(ZombieTest, self).tearDown()
def zombie_apocalypse(self):
humans = self.humans[:]
zombies = self.zombies[:]
while humans:
for z in self.zombies:
victim = choice(humans)
humans.remove(victim)
zombies.append(victim)
action.send(z, verb='killed', target=victim)
if not humans:
break
def check_query_count(self, queryset):
ci = len(connection.queries)
result = list([map(unicode, (x.actor, x.target, x.action_object))
for x in queryset])
self.assertTrue(len(connection.queries) - ci <= 4,
'Too many queries, got %d expected no more than 4' %
len(connection.queries))
return result
def test_query_count(self):
queryset = model_stream(User)
result = self.check_query_count(queryset)
self.assertEqual(len(result), 10)
def test_query_count_sliced(self):
queryset = model_stream(User)[:5]
result = self.check_query_count(queryset)
self.assertEqual(len(result), 5)
class GFKManagerTestCase(TestCase):
def setUp(self):
self.user_ct = ContentType.objects.get_for_model(User)
self.group_ct = ContentType.objects.get_for_model(Group)
self.group, _ = Group.objects.get_or_create(name='CoolGroup')
self.user1, _ = User.objects.get_or_create(username='admin')
self.user2, _ = User.objects.get_or_create(username='Two')
self.user3, _ = User.objects.get_or_create(username='Three')
self.user4, _ = User.objects.get_or_create(username='Four')
Action.objects.get_or_create(
actor_content_type=self.user_ct,
actor_object_id=self.user1.id,
verb='followed',
target_content_type=self.user_ct,
target_object_id=self.user2.id
)
Action.objects.get_or_create(
actor_content_type=self.user_ct,
actor_object_id=self.user1.id,
verb='followed',
target_content_type=self.user_ct,
target_object_id=self.user3.id
)
Action.objects.get_or_create(
actor_content_type=self.user_ct,
actor_object_id=self.user1.id,
verb='followed',
target_content_type=self.user_ct,
target_object_id=self.user4.id
)
Action.objects.get_or_create(
actor_content_type=self.user_ct,
actor_object_id=self.user1.id,
verb='joined',
target_content_type=self.group_ct,
target_object_id=self.group.id
)
def test_fetch_generic_relations(self):
# baseline without fetch_generic_relations
_actions = Action.objects.filter(actor_content_type=self.user_ct,
actor_object_id=self.user1.id)
actions = lambda: _actions._clone()
num_content_types = len(set(actions().values_list(
'target_content_type_id', flat=True)))
n = actions().count()
# compare to fetching only 1 generic relation
self.assertNumQueries(LTE(n + 1),
lambda: [a.target for a in actions()])
self.assertNumQueries(LTE(num_content_types + 2),
lambda: [a.target for a in
actions().fetch_generic_relations('target')])
action_targets = [(a.id, a.target) for a in actions()]
action_targets_fetch_generic = [(a.id, a.target) for a in
actions().fetch_generic_relations('target')]
self.assertEqual(action_targets, action_targets_fetch_generic)
# compare to fetching all generic relations
num_content_types = len(set(sum(actions().values_list(
'actor_content_type_id', 'target_content_type_id'), ())))
self.assertNumQueries(LTE(2 * n + 1),
lambda: [(a.actor, a.target) for a in actions()])
self.assertNumQueries(LTE(num_content_types + 2),
lambda: [(a.actor, a.target) for a in
actions().fetch_generic_relations()])
action_actor_targets = [(a.id, a.actor, a.target) for a in actions()]
action_actor_targets_fetch_generic_all = [
(a.id, a.actor, a.target) for a in
actions().fetch_generic_relations()]
self.assertEqual(action_actor_targets,
action_actor_targets_fetch_generic_all)
# fetch only 1 generic relation, but access both gfks
self.assertNumQueries(LTE(n + num_content_types + 2),
lambda: [(a.actor, a.target) for a in
actions().fetch_generic_relations('target')])
action_actor_targets_fetch_generic_target = [
(a.id, a.actor, a.target) for a in
actions().fetch_generic_relations('target')]
self.assertEqual(action_actor_targets,
action_actor_targets_fetch_generic_target)
|
|
#!/usr/bin/env python3
from runtime import EspNone, EspList, EspString, EspDict
import re
from multimethod import multimethod
def join(sep, v):
return sep.join(str(x) for x in v if x is not None)
COLOR = True
if COLOR:
num = '\033[38;5;202m%s\033[0m'
color = {
str: '\033[38;5;247;4m%r\033[0m',
EspString: '\033[38;5;247m%r\033[0m',
bool: '\033[38;5;202m%s\033[0m',
int: num, float: num,
type(EspNone): '\033[38;5;172m%s\033[0m',
"var": '\033[38;5;228m%s\033[0m',
"op": '\033[38;5;33m%s\033[0m'
}
else:
color = {
str: "%s", bool: "%s", int: "%s", float: "%s",
type(EspNone): "%s", "var": "%s"
}
SOL = re.compile(r"^", flags=re.M)
def indent(x):
return re.sub(SOL, ' ', x)
def subsexp(ex, before, after):
nl = False
for x in ex:
if x == ...:
nl = True
elif nl:
after.append(sexp(x))
else:
before.append(sexp(x))
b = join(' ', before)
if len(after):
a = indent(join('\n', after))
ba = join('\n', [b, a]) if a else b
else:
ba = b
return ba
def sexp(v):
ex = v.sexp() if isinstance(v, Expr) else v
tex = type(ex)
if ex is None:
pass
elif tex is str:
return color[str]%ex
elif tex is EspString:
return color[EspString]%ex
elif tex in color:
return color[tex]%ex
elif tex is tuple:
# Special colorations
if ex:
if ex[0] == "var":
return color['var']%ex[1]
before = [color['op']%ex[0]]
else:
before = []
after = []
return f"({subsexp(ex[1:], before, after)})"
elif tex is list or tex is EspList:
return f"[{subsexp(tuple(ex), [], [])}]"
else:
raise TypeError(f"Unknown value in sexp {type(v).__name__} {v}")
def is_expr(*x):
return all(isinstance(e, Expr) for e in x)
class Expr:
lvalue = True
rvalue = True
statement = False
def __init__(self):
self.origin = None
def __repr__(self):
raise NotImplementedError("__repr__")
def visit(self, v):
# Give it a name for better stack traces
visit_method = getattr(v, f"visit_{type(self).__name__.lower()}")
return visit_method(self)
def set_origin(self, token):
self.origin = token
return self
def make_expr(self):
'''
Signals to this and all subexpressions that it's being used as an
expression
'''
self.statement = False
def sexp(self):
'''
Return the expression as an S-expression. Ellipses are used to
indicate where the rest of the arguments should be separated by
newlines
'''
raise NotImplementedError("sexp")
class Statement(Expr):
'''
Expressions which don't automatically return if they're the last in a
function body.
'''
statement = True
class Value(Expr):
'''Value'''
def __init__(self, value):
super().__init__()
assert(not is_expr(value))
# Convert Pythonic values to espresso values
tv = type(value)
if value is None:
value = EspNone
elif tv is str:
value = EspString(value)
elif tv is list:
value = EspList(value)
elif tv is dict:
value = EspDict(value)
self.value = value
self.lvalue = False
def __str__(self):
return sexp(self.value)
def __repr__(self):
return f"Value({self.value!r})"
def sexp(self):
#raise ValueError("Sexp")
return self.value
class Var(Expr):
'''Variable'''
def __init__(self, name, mutable=True):
super().__init__()
if name and type(name) != str:
raise TypeError(f"Var name must be str, got {type(name)}")
self.name = name
self.mutable = mutable
def __str__(self):
return sexp(self)
def __repr__(self):
if self.mutable:
return f"Var({self.name!r})"
return f"Var({self.name!r}, mutable={self.mutable!r})"
def sexp(self):
return ("var", self.name, self.mutable or None)
class Spread(Expr):
'''Spread operator, has its own node because it's syntax'''
rvalue = False
def __init__(self, var):
super().__init__()
assert(is_expr(var))
self.var = var
self.lvalue = var.lvalue
var.make_expr()
def __str__(self):
return "..." + sexp(self.var)
def __repr__(self):
return f"Spread({self.var!r})"
def sexp(self):
return ("...", self.var)
class Assign(Statement):
'''Assignment is syntactic too'''
def __init__(self, name, value, op=""):
super().__init__()
assert(is_expr(name))
assert(is_expr(value))
assert(type(op) is str)
self.name = name
self.value = value
self.op = op
value.make_expr()
def __str__(self):
return sexp((f"assign{self.op or ''}=", self.name, self.value))
def __repr__(self):
if self.op:
return f"Assign({self.name!r}, {self.value!r}, {self.op!r})"
else:
return f"Assign({self.name!r}, {self.value!r})"
def sexp(self):
return (self.op + '=', self.name, self.value)
class Tuple(Expr):
'''Tuple'''
def __init__(self, elems):
super().__init__()
assert(is_expr(*elems))
self.elems = elems
lv = rv = True
for e in elems:
lv = lv and e.lvalue
rv = rv and e.rvalue
e.make_expr()
self.lvalue = lv
self.rvalue = rv
def append(self, x):
self.elems.append(x)
def __str__(self):
return sexp((",", *self.elems))
def __repr__(self):
return f"Tuple({self.elems!r})"
def sexp(self):
return ("tuple", *self.elems)
class Call(Expr):
'''Call a function'''
def __init__(self, func, args):
super().__init__()
assert(is_expr(func))
assert(is_expr(*args))
self.func = func
self.args = args
func.make_expr()
for a in args:
a.make_expr()
def __str__(self):
return sexp(("call", self.func, *self.args))
def __repr__(self):
return f"Call({self.func!r}, {self.args!r})"
def sexp(self):
return ("call", self.func, *self.args)
class Index(Expr):
'''Index a value'''
def __init__(self, obj, indices):
super().__init__()
assert(is_expr(obj))
assert(is_expr(*indices))
self.obj = obj
self.indices = indices
obj.make_expr()
for i in indices:
i.make_expr()
def __str__(self):
return sexp((".", self.obj, [*self.indices]))
def __repr__(self):
return f"Index({self.obj!r}, {self.indices!r})"
def sexp(self):
return (".", self.obj, [*self.indices])
class After(Expr):
def __init__(self, value, update):
super().__init__()
assert(is_expr(value))
assert(is_expr(update))
self.value = value
self.update = update
value.make_expr()
update.make_expr()
def __str__(self):
return sexp(("after", self.value, self.update))
def __repr__(self):
return f"After({self.value!r}, {self.update!r})"
def sexp(self):
return ("after", self.value, self.update)
class Bind(Expr):
'''Binding operator ->'''
def __init__(self, obj, member):
super().__init__()
assert(is_expr(obj))
assert(is_expr(member))
self.obj = obj
self.member = member
obj.make_expr()
member.make_expr()
def __str__(self):
return sexp(self)
def __repr__(self):
return f"Bind({self.obj!r}, {self.member!r})"
def sexp(self):
return ("->", self.obj, self.member)
class Descope(Expr):
'''Descoping operator ::'''
def __init__(self, obj, member):
super().__init__()
assert(is_expr(obj))
assert(is_expr(member))
self.obj = obj
self.member = member
obj.make_expr()
member.make_expr()
def __str__(self):
return sexp(self)
def __repr__(self):
return f"Descope({self.obj!r}, {self.member!r})"
def sexp(self):
return ("::", self.obj, self.member)
class Loop(Statement):
'''All loop types simplify to this node, an infinite loop'''
def __init__(self, body, el=None):
super().__init__()
assert(is_expr(body))
assert(is_expr(el) or el is None)
self.body = body
self.el = el
def __str__(self):
return sexp(("loop", ..., self.body, self.el and ("else", self.el)))
def __repr__(self):
return f"Loop({self.body!r}, {self.el!r})"
def sexp(self):
return ("loop", ...,
self.body, self.el and ("else", self.el))
class If(Expr):
'''
if statements always act the same as an expression or statement, so
they're actually a kind of expression
'''
def __init__(self, cond, th, el):
super().__init__()
assert(is_expr(cond))
assert(is_expr(th) or th is None)
assert(is_expr(el) or el is None)
self.cond = cond
self.th = th
self.el = el
cond.make_expr()
# Then and else retain their statement value
def __str__(self):
return sexp(("if", self.cond,
...,
self.th and ("then", self.th),
self.el and ("else", self.el)
))
def __repr__(self):
return f"If({self.cond!r}, {self.th!r}, {self.el!r})"
def sexp(self):
return ("if", self.cond, ...,
self.th and ("then", self.th),
self.el and ("else", self.el))
class Branch(Statement):
'''Base class for branching in blocks'''
def __init__(self, kind, level=0):
super().__init__()
assert(type(kind) is str)
assert(type(level) is int)
self.kind = kind
self.level = level
def __str__(self):
return sexp((self.kind, self.level))
def __repr__(self):
return f"Branch({self.kind!r}, {self.level!r})"
def sexp(self):
return (self.kind, self.level)
class Op(Expr):
'''Simple operation, evaluates to a value'''
def __init__(self, op, *args):
super().__init__()
assert(type(op) is str)
assert(is_expr(*args))
self.op = op
self.args = args
self.lvalue = False # ops are always r-value
# All subexpressions are not statements
for a in args:
a.make_expr()
def __str__(self):
return sexp((self.op, *self.args))
def __repr__(self):
return f"Op({self.op!r}, {self.args!r}, {self.lvalue!r})"
def sexp(self):
return (self.op, *self.args)
class Import(Expr):
'''Import statement, for now just support builtin libraries'''
def __init__(self, name):
super().__init__()
assert(is_expr(name))
self.name = name
def __str__(self):
return sexp(("import", self.name))
def __repr__(self):
return f"Import({self.name!r})"
def sexp(self):
return ("import", self.name)
class Proto(Expr):
'''Proto expression'''
def __init__(self, name, parent, pub, priv, stat):
super().__init__()
assert(is_expr(name))
assert(is_expr(parent) or parent is None)
assert(is_expr(*pub))
assert(is_expr(*priv))
assert(is_expr(*stat))
self.name = name
self.parent = parent
self.pub = pub
self.priv = priv
self.stat = stat
def __str__(self):
return sexp(("proto",
self.name and f":{self.name}",
self.parent and ("is", self.parent),
...,
self.pub and ("public", self.pub),
self.priv and ("private", self.priv),
self.stat and ("static", self.stat)
))
def __repr__(self):
return f"Proto({self.name!r}, {self.parent!r}, {self.pub!r}, {self.priv!r}, {self.stat!r})"
def sexp(self):
return ("proto", self.name, self.parent and ("is", self.parent),
...,
self.pub and ("public", self.pub),
self.priv and ("private", self.priv),
self.stat and ("static", self.stat)
)
class Return(Statement):
'''Return statement'''
def __init__(self, value):
super.__init__()
assert(is_expr(value))
self.value = value
value.make_expr()
def __str__(self):
return sexp(("return", self.value))
def __repr__(self):
return f"Return({self.value!r})"
def sexp(self):
return ("return", self.value)
class Format(Expr):
'''Formatted string expression'''
def __init__(self, parts):
super().__init__()
assert(is_expr(*parts))
self.parts = parts
for p in parts:
p.make_expr()
def __str__(self):
return sexp(("format", ..., *(
repr(x) if type(x) is str else x for x in self.parts
)))
def __repr__(self):
return f"Format({self.parts!r})"
def sexp(self):
return ("format", ..., *(
repr(x) if type(x) is str else x for x in self.parts
))
class Case(Expr):
def __init__(self, op, value, body, next):
super().__init__()
assert(type(op) is str)
assert(is_expr(value))
assert(is_expr(body))
self.op = op
self.value = value
self.body = body
self.next = next
value.make_expr()
def __str__(self):
return sexp(("case", self.op, self.value,
self.body, self.next and "..."
))
def __repr__(self):
return f"Case({self.op!r}, {self.value!r}, {self.body!r}, {self.next!r})"
def sexp(self):
return ("case" + self.op, self.value,
self.body, self.next and "..."
)
class Switch(Expr):
'''
Switch expression.
This is implemented by separating the predicates from the values/bodies.
Predicates keep track of the comparison operation, value to compare
against, a body index, and a next index. Blocks
'''
def __init__(self, ex, cs, de, th, el):
super().__init__()
assert(is_expr(ex))
assert(is_expr(*cs))
assert(is_expr(de) or de is None)
assert(is_expr(th) or th is None)
assert(is_expr(el) or el is None)
self.ex = ex # EXpression
self.cs = cs # CaseS
self.de = de # DEfault
self.th = th # THen
self.el = el # ELse
ex.make_expr()
def __str__(self):
return sexp(("switch", self.ex,
...,
*self.cs,
self.de and ("default", self.de),
self.th and ("then", self.th),
self.el and ("else", self.el)
))
def __repr__(self):
return f"Switch({self.ex!r}, {self.cs!r}, {self.de!r}, {self.th!r}, {self.el!r})"
def sexp(self):
return ("switch", self.ex, ...,
*self.cs,
self.de and ("default", self.de),
self.th and ("then", self.th),
self.el and ("else", self.el)
)
class ObjectLiteral(Expr):
'''Object literal'''
def __init__(self, obj):
super().__init__()
#assert(??)
self.values = obj
for k, v in obj:
k.make_expr()
v.make_expr()
def __str__(self):
return sexp(("object", ...,
*(("pair", k, v) for k, v in self.values)
))
def __repr__(self):
return f"ObjectLiteral({self.values!r})"
def sexp(self):
return ("object", ...,
*(("pair", k, v) for k, v in self.values)
)
class ListLiteral(Expr):
'''List literal'''
def __init__(self, vals):
super().__init__()
assert(is_expr(*vals))
self.values = vals
for v in vals:
v.make_expr()
def __str__(self):
return sexp(("list", *self.values))
def __repr__(self):
return f"ListLiteral({self.values!r})"
def sexp(self):
return ("list", *self.values)
class ForLoop(Statement):
'''
Representing for loops with Loop ends up being too complicated
'''
def __init__(self, itvar, toiter, body, th, el):
super().__init__()
assert(is_expr(itvar))
assert(is_expr(toiter))
assert(is_expr(body))
assert(is_expr(th) or th is None)
assert(is_expr(el) or el is None)
self.itvar = itvar
self.toiter = toiter
self.body = body
self.th = th
self.el = el
toiter.make_expr()
def __str__(self):
return sexp(("for",
self.itvar,
("in", self.toiter),
...,
("body", self.body),
self.th and ("then", self.th),
self.el and ("else", self.el)
))
def __repr__(self):
return f"ForLoop({self.itvar!r}, {self.toiter!r}, {self.body!r}, {self.th!r}, {self.el!r})"
def sexp(self):
return ("for", self.itvar, ("in", self.toiter), ...,
("body", self.body),
self.th and ("then", self.th),
self.el and ("else", self.el)
)
class Block(Statement):
'''Sequence of expressions evaluating to the last'''
def __init__(self, elems, vars=None):
super().__init__()
vars = vars or []
assert(is_expr(*vars))
se = []
for e in elems:
if type(e) is Block:
se += e.elems
vars += e.vars
elif e is not None:
se.append(e)
self.elems = se
self.vars = vars
self.lvalue = False
def __str__(self):
#v = [x for x in self.vars if x.mutable]
#c = [x for x in self.vars if not x.mutable]
return sexp(("block",
self.vars,
...,
#c and tuple(["const", *c]),
*self.elems
))
def __repr__(self):
return f"Block({self.elems!r}, {self.vars!r})"
def sexp(self):
return ("block", self.vars, ...,
#c and tuple(["const", *c]),
*self.elems
)
class Prog(Block):
def __init__(self, elems, vars=None):
super().__init__(elems, vars)
def __repr__(self):
return f"Prog({self.elems!r}, {self.vars!r})"
class Func(Expr):
def __init__(self, name, args, body):
super().__init__()
assert(is_expr(name))
assert(is_expr(*args))
assert(is_expr(body))
self.name = name
self.args = args
self.body = body
def __str__(self):
return sexp(("function", self.name, self.args, ..., self.body))
def __repr__(self):
return f"Func({self.name!r}, {self.args!r}, {self.body!r}"
def sexp(self):
return ("function", self.name, self.args, ..., self.body)
|
|
"""
The tdb module provides support for reading and writing databases in
Thermo-Calc TDB format.
"""
from pyparsing import CaselessKeyword, CharsNotIn, Group
from pyparsing import LineEnd, MatchFirst, OneOrMore, Optional, Regex, SkipTo
from pyparsing import ZeroOrMore, Suppress, White, Word, alphanums, alphas, nums
from pyparsing import delimitedList, ParseException
import re
from sympy import sympify, And, Or, Not, Intersection, Union, EmptySet, Interval, Piecewise
from sympy import Symbol, GreaterThan, StrictGreaterThan, LessThan, StrictLessThan, Complement, S
from sympy import Mul, Pow, Rational
from sympy.abc import _clash
from sympy.printing.str import StrPrinter
from sympy.core.mul import _keep_coeff
from sympy.printing.precedence import precedence
from pycalphad import Database
from pycalphad.io.database import DatabaseExportError
from pycalphad.io.grammar import float_number, chemical_formula
from pycalphad.variables import Species
import pycalphad.variables as v
from pycalphad.io.tdb_keywords import expand_keyword, TDB_PARAM_TYPES
from collections import defaultdict, namedtuple
import ast
import sys
import inspect
import functools
import itertools
import getpass
import datetime
import warnings
import hashlib
from copy import deepcopy
# ast.Num is deprecated in Python 3.8 in favor of as ast.Constant
# Both are whitelisted for compatability across versions
_AST_WHITELIST = [ast.Add, ast.BinOp, ast.Call, ast.Constant, ast.Div,
ast.Expression, ast.Load, ast.Mult, ast.Name, ast.Num,
ast.Pow, ast.Sub, ast.UAdd, ast.UnaryOp, ast.USub]
# Avoid symbol names clashing with objects in sympy (gh-233)
clashing_namespace = {}
clashing_namespace.update(_clash)
clashing_namespace['CC'] = Symbol('CC')
clashing_namespace['FF'] = Symbol('FF')
clashing_namespace['T'] = v.T
clashing_namespace['P'] = v.P
clashing_namespace['R'] = v.R
def _sympify_string(math_string):
"Convert math string into SymPy object."
# drop pound symbols ('#') since they denote function names
# we detect those automatically
expr_string = math_string.replace('#', '')
# sympify doesn't recognize LN as ln()
expr_string = \
re.sub(r'(?<!\w)LN(?!\w)', 'ln', expr_string, flags=re.IGNORECASE)
expr_string = \
re.sub(r'(?<!\w)LOG(?!\w)', 'log', expr_string, flags=re.IGNORECASE)
expr_string = \
re.sub(r'(?<!\w)EXP(?!\w)', 'exp', expr_string,
flags=re.IGNORECASE)
# sympify uses eval, so we need to sanitize the input
nodes = ast.parse(expr_string)
nodes = ast.Expression(nodes.body[0].value)
for node in ast.walk(nodes):
if type(node) not in _AST_WHITELIST: #pylint: disable=W1504
raise ValueError('Expression from TDB file not in whitelist: '
'{}'.format(expr_string))
return sympify(expr_string, locals=clashing_namespace)
def _parse_action(func):
"""
Decorator for pyparsing parse actions to ease debugging.
pyparsing uses trial & error to deduce the number of arguments a parse
action accepts. Unfortunately any ``TypeError`` raised by a parse action
confuses that mechanism.
This decorator replaces the trial & error mechanism with one based on
reflection. If the decorated function itself raises a ``TypeError`` then
that exception is re-raised if the wrapper is called with less arguments
than required. This makes sure that the actual ``TypeError`` bubbles up
from the call to the parse action (instead of the one caused by pyparsing's
trial & error).
Modified slightly from the original for Py3 compatibility
Source: Florian Brucker on StackOverflow
http://stackoverflow.com/questions/10177276/pyparsing-setparseaction-function-is-getting-no-arguments
"""
func_items = inspect.signature(func).parameters.items()
func_args = [name for name, param in func_items
if param.kind == param.POSITIONAL_OR_KEYWORD]
num_args = len(func_args)
if num_args > 3:
raise ValueError('Input function must take at most 3 parameters.')
@functools.wraps(func)
def action(*args):
"Wrapped function."
if len(args) < num_args:
if action.exc_info:
raise action.exc_info[0](action.exc_info[1], action.exc_info[2])
action.exc_info = None
try:
return func(*args[:-(num_args + 1):-1])
except TypeError as err:
action.exc_info = sys.exc_info()
raise err
action.exc_info = None
return action
@_parse_action
def _make_piecewise_ast(toks):
"""
Convenience function for converting tokens into a piecewise sympy AST.
"""
cur_tok = 0
expr_cond_pairs = []
# Only one token: Not a piecewise function; just return the AST
if len(toks) == 1:
return _sympify_string(toks[0].strip(' ,'))
while cur_tok < len(toks)-1:
low_temp = toks[cur_tok]
try:
high_temp = toks[cur_tok+2]
except IndexError:
# No temperature limit specified
high_temp = None
if high_temp is None:
expr_cond_pairs.append(
(
_sympify_string(toks[cur_tok+1]),
And(low_temp <= v.T)
)
)
else:
expr_cond_pairs.append(
(
_sympify_string(toks[cur_tok+1]),
And(low_temp <= v.T, v.T < high_temp)
)
)
cur_tok = cur_tok + 2
expr_cond_pairs.append((0, True))
return Piecewise(*expr_cond_pairs, evaluate=False)
class TCCommand(CaselessKeyword): #pylint: disable=R0903
"""
Parser element for dealing with Thermo-Calc command abbreviations.
"""
def parseImpl(self, instring, loc, doActions=True):
# Find the end of the keyword by searching for an end character
start = loc
endchars = ' ():,'
loc = -1
for charx in endchars:
locx = instring.find(charx, start)
if locx != -1:
# match the end-character closest to the start character
if loc != -1:
loc = min(loc, locx)
else:
loc = locx
# if no end character found, just match the whole thing
if loc == -1:
loc = len(instring)
try:
res = expand_keyword([self.match], instring[start:loc])
if len(res) > 1:
self.errmsg = '{0!r} is ambiguous: matches {1}' \
.format(instring[start:loc], res)
raise ParseException(instring, loc, self.errmsg, self)
# res[0] is the unambiguous expanded keyword
# in principle, res[0] == self.match
return loc, res[0]
except ValueError:
pass
raise ParseException(instring, loc, self.errmsg, self)
def _tdb_grammar(): #pylint: disable=R0914
"""
Convenience function for getting the pyparsing grammar of a TDB file.
"""
int_number = Word(nums).setParseAction(lambda t: [int(t[0])])
# symbol name, e.g., phase name, function name
symbol_name = Word(alphanums+'_:', min=1)
ref_phase_name = symbol_name = Word(alphanums+'_-:()/', min=1)
# species name, e.g., CO2, AL, FE3+
species_name = Word(alphanums+'+-*/_.', min=1) + Optional(Suppress('%'))
reference_key = Word(alphanums+':_-')('reference_key')
# constituent arrays are colon-delimited
# each subarray can be comma- or space-delimited
constituent_array = Group(delimitedList(Group(OneOrMore(Optional(Suppress(',')) + species_name)), ':'))
param_types = MatchFirst([TCCommand(param_type) for param_type in TDB_PARAM_TYPES])
# Let sympy do heavy arithmetic / algebra parsing for us
# a convenience function will handle the piecewise details
func_expr = (float_number | ZeroOrMore(',').setParseAction(lambda t: 0.01)) + OneOrMore(SkipTo(';') \
+ Suppress(';') + ZeroOrMore(Suppress(',')) + Optional(float_number) + \
Suppress(Optional(Word('Yy', exact=1))), stopOn=Word('Nn', exact=1)) + Suppress(Optional(Word('Nn', exact=1)))
# ELEMENT
cmd_element = TCCommand('ELEMENT') + Word(alphas+'/-', min=1, max=2) + ref_phase_name + \
float_number + float_number + float_number + LineEnd()
# SPECIES
cmd_species = TCCommand('SPECIES') + species_name + chemical_formula + LineEnd()
# TYPE_DEFINITION
cmd_typedef = TCCommand('TYPE_DEFINITION') + \
Suppress(White()) + CharsNotIn(' !', exact=1) + SkipTo(LineEnd())
# FUNCTION
cmd_function = TCCommand('FUNCTION') + symbol_name + \
func_expr.setParseAction(_make_piecewise_ast) + \
Optional(Suppress(reference_key)) + LineEnd()
# ASSESSED_SYSTEMS
cmd_ass_sys = TCCommand('ASSESSED_SYSTEMS') + SkipTo(LineEnd())
# DEFINE_SYSTEM_DEFAULT
cmd_defsysdef = TCCommand('DEFINE_SYSTEM_DEFAULT') + SkipTo(LineEnd())
# DEFAULT_COMMAND
cmd_defcmd = TCCommand('DEFAULT_COMMAND') + SkipTo(LineEnd())
# DATABASE_INFO
cmd_database_info = TCCommand('DATABASE_INFO') + SkipTo(LineEnd())
# VERSION_DATE
cmd_version_date = TCCommand('VERSION_DATE') + SkipTo(LineEnd())
# REFERENCE_FILE
cmd_reference_file = TCCommand('REFERENCE_FILE') + SkipTo(LineEnd())
# ADD_REFERENCES
cmd_add_ref = TCCommand('ADD_REFERENCES') + SkipTo(LineEnd())
# LIST_OF_REFERENCES
cmd_lor = TCCommand('LIST_OF_REFERENCES') + SkipTo(LineEnd())
# TEMPERATURE_LIMITS
cmd_templim = TCCommand('TEMPERATURE_LIMITS') + SkipTo(LineEnd())
# PHASE
cmd_phase = TCCommand('PHASE') + symbol_name + \
Suppress(White()) + CharsNotIn(' !', min=1) + Suppress(White()) + \
Suppress(int_number) + Group(OneOrMore(float_number)) + \
Suppress(SkipTo(LineEnd()))
# CONSTITUENT
cmd_constituent = TCCommand('CONSTITUENT') + symbol_name + \
Suppress(White()) + Suppress(':') + constituent_array + \
Suppress(':') + LineEnd()
# PARAMETER
cmd_parameter = TCCommand('PARAMETER') + param_types + \
Suppress('(') + symbol_name + \
Optional(Suppress('&') + Word(alphas+'/-', min=1, max=2), default=None) + \
Suppress(',') + constituent_array + \
Optional(Suppress(';') + int_number, default=0) + \
Suppress(')') + func_expr.setParseAction(_make_piecewise_ast) + \
Optional(Suppress(reference_key)) + LineEnd()
# Now combine the grammar together
all_commands = cmd_element | \
cmd_species | \
cmd_typedef | \
cmd_function | \
cmd_ass_sys | \
cmd_defsysdef | \
cmd_defcmd | \
cmd_database_info | \
cmd_version_date | \
cmd_reference_file | \
cmd_add_ref | \
cmd_lor | \
cmd_templim | \
cmd_phase | \
cmd_constituent | \
cmd_parameter
return all_commands
def _process_typedef(targetdb, typechar, line):
"""
Process a TYPE_DEFINITION command.
Assumes all phases are entered into the database already and that the
database defines _typechar_map, which defines a map of typechar to the
phases that use it. Any phases that in the typechar dict for this will have
the model_hints updated based on this type definition, regardless of which
phase names may be defined in this TYPE_DEF line.
"""
matching_phases = targetdb._typechar_map[typechar]
del targetdb._typechar_map[typechar]
# GES A_P_D BCC_A2 MAGNETIC -1 0.4
tokens = line.replace(',', '').split()
if len(tokens) < 4:
return
keyword = expand_keyword(['DISORDERED_PART', 'MAGNETIC'], tokens[3].upper())[0]
if len(keyword) == 0:
raise ValueError('Unknown type definition keyword: {}'.format(tokens[3]))
if len(matching_phases) == 0:
warnings.warn(f"The type definition character `{typechar}` in `TYPE_DEFINITION {typechar} {line}` is not used by any phase.")
if keyword == 'MAGNETIC':
# Magnetic model, both IHJ and Xiong models use these model hints when
# constructing Model instances, despite being prefixed `ihj_magnetic_`
model_hints = {
'ihj_magnetic_afm_factor': float(tokens[4]),
'ihj_magnetic_structure_factor': float(tokens[5])
}
for phase_name in matching_phases:
targetdb.phases[phase_name].model_hints.update(model_hints)
# GES A_P_D L12_FCC DIS_PART FCC_A1
if keyword == 'DISORDERED_PART':
# order-disorder model: since we need to add model_hints to both the
# ordered and disorderd phase, we special case to update the phase
# names defined by the TYPE_DEF, rather than the updating the phases
# with matching typechars.
ordered_phase = tokens[2].upper()
disordered_phase = tokens[4].upper()
hint = {
'ordered_phase': ordered_phase,
'disordered_phase': disordered_phase,
}
if ordered_phase in targetdb.phases:
targetdb.phases[ordered_phase].model_hints.update(hint)
else:
raise ValueError(f"The {ordered_phase} phase is not in the database, but is defined by: `TYPE_DEFINTION {typechar} {line}`")
if disordered_phase in targetdb.phases:
targetdb.phases[disordered_phase].model_hints.update(hint)
else:
raise ValueError(f"The {disordered_phase} phase is not in the database, but is defined by: `TYPE_DEFINTION {typechar} {line}`")
phase_options = {'ionic_liquid_2SL': 'Y',
'symmetry_FCC_4SL': 'F',
'symmetry_BCC_4SL': 'B',
'liquid': 'L',
'gas': 'G',
'aqueous': 'A',
'charged_phase': 'I'}
inv_phase_options = dict([reversed(i) for i in phase_options.items()])
def _process_phase(targetdb, name, typedefs, subls):
"""
Process the PHASE command.
"""
splitname = name.split(':')
phase_name = splitname[0].upper()
options = ''
if len(splitname) > 1:
options = splitname[1]
targetdb.add_structure_entry(phase_name, phase_name)
model_hints = {}
for option in inv_phase_options.keys():
if option in options:
model_hints[inv_phase_options[option]] = True
for typedef_char in list(typedefs):
targetdb._typechar_map[typedef_char].append(phase_name)
# Model hints are updated later based on the type definitions
targetdb.add_phase(phase_name, model_hints, subls)
def _process_parameter(targetdb, param_type, phase_name, diffusing_species,
constituent_array, param_order, param, ref=None):
"""
Process the PARAMETER command.
"""
# sorting lx is _required_ here: see issue #17 on GitHub
targetdb.add_parameter(param_type, phase_name.upper(),
[[c.upper() for c in sorted(lx)]
for lx in constituent_array.asList()],
param_order, param, ref, diffusing_species, force_insert=False)
def _unimplemented(*args, **kwargs): #pylint: disable=W0613
"""
Null function.
"""
pass
def _process_species(db, sp_name, sp_comp, charge=0, *args):
"""Add a species to the Database. If charge not specified, the Species will be neutral."""
# process the species composition list of [element1, ratio1, element2, ratio2, ..., elementN, ratioN]
constituents = {sp_comp[i]: sp_comp[i+1] for i in range(0, len(sp_comp), 2)}
db.species.add(Species(sp_name, constituents, charge=charge))
def _process_reference_state(db, el, refphase, mass, H298, S298):
db.refstates[el] = {
'phase': refphase,
'mass': mass,
'H298': H298,
'S298': S298,
}
def _setitem_raise_duplicates(dictionary, key, value):
if key in dictionary:
raise ValueError("TDB contains duplicate FUNCTION {}".format(key))
dictionary[key] = value
_TDB_PROCESSOR = {
'ELEMENT': lambda db, el, ref_phase, mass, h, s: (db.elements.add(el), _process_reference_state(db, el, ref_phase, mass, h, s), _process_species(db, el, [el, 1], 0)),
'SPECIES': _process_species,
'TYPE_DEFINITION': lambda db, typechar, line: db._typedefs_queue.append((typechar, line)),
'FUNCTION': lambda db, name, sym: _setitem_raise_duplicates(db.symbols, name, sym),
'DEFINE_SYSTEM_DEFAULT': _unimplemented,
'ASSESSED_SYSTEMS': _unimplemented,
'DEFAULT_COMMAND': _unimplemented,
'DATABASE_INFO': _unimplemented,
'VERSION_DATE': _unimplemented,
'REFERENCE_FILE': _unimplemented,
'ADD_REFERENCES': _unimplemented,
'LIST_OF_REFERENCES': _unimplemented,
'TEMPERATURE_LIMITS': _unimplemented,
'PHASE': _process_phase,
'CONSTITUENT': \
lambda db, name, c: db.add_phase_constituents(
name.split(':')[0].upper(), c),
'PARAMETER': _process_parameter
}
def to_interval(relational):
if isinstance(relational, And):
return Intersection(*[to_interval(i) for i in relational.args])
elif isinstance(relational, Or):
return Union(*[to_interval(i) for i in relational.args])
elif isinstance(relational, Not):
return Complement(*[to_interval(i) for i in relational.args])
if relational == S.true:
return Interval(S.NegativeInfinity, S.Infinity, left_open=True, right_open=True)
if len(relational.free_symbols) != 1:
raise ValueError('Relational must only have one free symbol')
if len(relational.args) != 2:
raise ValueError('Relational must only have two arguments')
free_symbol = list(relational.free_symbols)[0]
lhs = relational.args[0]
rhs = relational.args[1]
if isinstance(relational, GreaterThan):
if lhs == free_symbol:
return Interval(rhs, S.Infinity, left_open=False)
else:
return Interval(S.NegativeInfinity, rhs, right_open=False)
elif isinstance(relational, StrictGreaterThan):
if lhs == free_symbol:
return Interval(rhs, S.Infinity, left_open=True)
else:
return Interval(S.NegativeInfinity, rhs, right_open=True)
elif isinstance(relational, LessThan):
if lhs != free_symbol:
return Interval(rhs, S.Infinity, left_open=False)
else:
return Interval(S.NegativeInfinity, rhs, right_open=False)
elif isinstance(relational, StrictLessThan):
if lhs != free_symbol:
return Interval(rhs, S.Infinity, left_open=True)
else:
return Interval(S.NegativeInfinity, rhs, right_open=True)
else:
raise ValueError('Unsupported Relational: {}'.format(relational.__class__.__name__))
class TCPrinter(StrPrinter):
"""
Prints Thermo-Calc style function expressions.
"""
def _print_Piecewise(self, expr):
# Filter out default zeros since they are implicit in a TDB
filtered_args = [i for i in expr.args if not ((i.cond == S.true) and (i.expr == S.Zero))]
exprs = [self._print(arg.expr) for arg in filtered_args]
# Only a small subset of piecewise functions can be represented
# Need to verify that each cond's highlim equals the next cond's lowlim
# to_interval() is used instead of sympy.Relational.as_set() for performance reasons
intervals = [to_interval(i.cond) for i in filtered_args]
if (len(intervals) > 1) and not Intersection(*intervals) is EmptySet:
raise ValueError('Overlapping intervals cannot be represented: {}'.format(intervals))
if not isinstance(Union(*intervals), Interval):
raise ValueError('Piecewise intervals must be continuous')
if not all([arg.cond.free_symbols == {v.T} for arg in filtered_args]):
raise ValueError('Only temperature-dependent piecewise conditions are supported')
# Sort expressions based on intervals
sortindices = [i[0] for i in sorted(enumerate(intervals), key=lambda x:x[1].start)]
exprs = [exprs[idx] for idx in sortindices]
intervals = [intervals[idx] for idx in sortindices]
if len(exprs) > 1:
result = '{1} {0}; {2} Y'.format(exprs[0], self._print(intervals[0].start),
self._print(intervals[0].end))
result += 'Y'.join([' {0}; {1} '.format(expr,
self._print(i.end)) for i, expr in zip(intervals[1:], exprs[1:])])
result += 'N'
else:
result = '{0} {1}; {2} N'.format(self._print(intervals[0].start), exprs[0],
self._print(intervals[0].end))
return result
def _print_Mul(self, expr):
"Copied from sympy StrPrinter and modified to remove division."
prec = precedence(expr)
c, e = expr.as_coeff_Mul()
if c < 0:
expr = _keep_coeff(-c, e)
sign = "-"
else:
sign = ""
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
# use make_args in case expr was something like -x -> x
args = Mul.make_args(expr)
# Gather args for numerator/denominator
for item in args:
if item.is_commutative and item.is_Pow and item.exp.is_Rational and item.exp.is_negative:
if item.exp != -1:
b.append(Pow(item.base, -item.exp, evaluate=False))
else:
b.append(Pow(item.base, -item.exp))
elif item.is_Rational and item is not S.Infinity:
if item.p != 1:
a.append(Rational(item.p))
if item.q != 1:
b.append(Rational(item.q))
else:
a.append(item)
a = a or [S.One]
a_str = [self.parenthesize(x, prec) for x in a]
b_str = [self.parenthesize(x, prec) for x in b]
if len(b) == 0:
return sign + '*'.join(a_str)
elif len(b) == 1:
# Thermo-Calc's parser can't handle division operators
return sign + '*'.join(a_str) + "*%s" % self.parenthesize(b[0]**(-1), prec)
else:
# TODO: Make this Thermo-Calc compatible by removing division operation
return sign + '*'.join(a_str) + "/(%s)" % '*'.join(b_str)
def _print_Pow(self, expr, rational=False):
"Copied from sympy StrPrinter to remove TC-incompatible Pow simplifications."
PREC = precedence(expr)
e = self.parenthesize(expr.exp, PREC)
if self.printmethod == '_sympyrepr' and expr.exp.is_Rational and expr.exp.q != 1:
# the parenthesized exp should be '(Rational(a, b))' so strip parens,
# but just check to be sure.
if e.startswith('(Rational'):
return '%s**%s' % (self.parenthesize(expr.base, PREC), e[1:-1])
return '%s**%s' % (self.parenthesize(expr.base, PREC), e)
def _print_Infinity(self, expr):
# Use "default value" though TC's Database Checker complains about this
return ","
def _print_Symbol(self, expr):
if isinstance(expr, v.StateVariable):
return expr.name
else:
# Thermo-Calc likes symbol references to be marked with a '#' at the end
return expr.name + "#"
def _print_Function(self, expr):
func_translations = {'log': 'ln', 'exp': 'exp'}
if expr.func.__name__.lower() in func_translations:
return func_translations[expr.func.__name__.lower()] + "(%s)" % self.stringify(expr.args, ", ")
else:
raise TypeError("Unable to represent function: %s" %
expr.func.__name__)
def blacklisted(self, expr):
raise TypeError("Unable to represent expression: %s" %
expr.__class__.__name__)
# blacklist all Matrix printing
_print_SparseMatrix = \
_print_MutableSparseMatrix = \
_print_ImmutableSparseMatrix = \
_print_Matrix = \
_print_DenseMatrix = \
_print_MutableDenseMatrix = \
_print_ImmutableMatrix = \
_print_ImmutableDenseMatrix = \
blacklisted
# blacklist other operations
_print_Derivative = \
_print_Integral = \
blacklisted
# blacklist some logical operations
# These should never show up outside a piecewise function
# Piecewise handles them directly
_print_And = \
_print_Or = \
_print_Not = \
blacklisted
# blacklist some python expressions
_print_list = \
_print_tuple = \
_print_Tuple = \
_print_dict = \
_print_Dict = \
blacklisted
def reflow_text(text, linewidth=80):
"""
Add line breaks to ensure text doesn't exceed a certain line width.
Parameters
----------
text : str
linewidth : int, optional
Returns
-------
reflowed_text : str
"""
""
lines = text.split("\n")
linebreak_chars = [" ", "$"]
output_lines = []
for line in lines:
if len(line) <= linewidth:
output_lines.append(line)
else:
while len(line) > linewidth:
linebreak_idx = linewidth-1
while line[linebreak_idx] not in linebreak_chars:
linebreak_idx -= 1
output_lines.append(line[:linebreak_idx])
if "$" in line:
# previous line was a comment
line = "$ " + line[linebreak_idx:]
else:
# Always put some leading spaces at the start of a new line
# Otherwise TC may misunderstand the expression
line = " " + line[linebreak_idx:]
output_lines.append(line)
return "\n".join(output_lines)
def _apply_new_symbol_names(dbf, symbol_name_map):
"""
Push changes in symbol names through the Sympy expressions in symbols and parameters
Parameters
----------
dbf : Database
A pycalphad Database.
symbol_name_map : dict
Map of {old_symbol_name: new_symbol_name}
"""
# first apply the rename to the keys
dbf.symbols = {symbol_name_map.get(name, name): expr for name, expr in dbf.symbols.items()}
# then propagate through to the symbol SymPy expression values
dbf.symbols = {name: S(expr).xreplace({Symbol(s): Symbol(v) for s, v in symbol_name_map.items()}) for name, expr in dbf.symbols.items()}
# finally propagate through to the parameters
for p in dbf._parameters.all():
dbf._parameters.update({'parameter': S(p['parameter']).xreplace({Symbol(s): Symbol(v) for s, v in symbol_name_map.items()})}, doc_ids=[p.doc_id])
def write_tdb(dbf, fd, groupby='subsystem', if_incompatible='warn'):
"""
Write a TDB file from a pycalphad Database object.
The goal is to produce TDBs that conform to the most restrictive subset of database specifications. Some of these
can be adjusted for automatically, such as the Thermo-Calc line length limit of 78. Others require changing the
database in non-trivial ways, such as the maximum length of function names (8). The default is to warn the user when
attempting to write an incompatible database and the user must choose whether to warn and write the file anyway or
to fix the incompatibility.
Currently the supported compatibility fixes are:
- Line length <= 78 characters (Thermo-Calc)
- Function names <= 8 characters (Thermo-Calc)
The current unsupported fixes include:
- Keyword length <= 2000 characters (Thermo-Calc)
- Element names <= 2 characters (Thermo-Calc)
- Phase names <= 24 characters (Thermo-Calc)
Other TDB compatibility issues required by Thermo-Calc or other software should be reported to the issue tracker.
Parameters
----------
dbf : Database
A pycalphad Database.
fd : file-like
File descriptor.
groupby : ['subsystem', 'phase'], optional
Desired grouping of parameters in the file.
if_incompatible : string, optional ['raise', 'warn', 'fix']
Strategy if the database does not conform to the most restrictive database specification.
The 'warn' option (default) will write out the incompatible database with a warning.
The 'raise' option will raise a DatabaseExportError.
The 'ignore' option will write out the incompatible database silently.
The 'fix' option will rectify the incompatibilities e.g. through name mangling.
"""
# Before writing anything, check that the TDB is valid and take the appropriate action if not
if if_incompatible not in ['warn', 'raise', 'ignore', 'fix']:
raise ValueError('Incorrect options passed to \'if_invalid\'. Valid args are \'raise\', \'warn\', or \'fix\'.')
# Handle function names > 8 characters
long_function_names = {k for k in dbf.symbols.keys() if len(k) > 8}
if len(long_function_names) > 0:
if if_incompatible == 'raise':
raise DatabaseExportError('The following function names are beyond the 8 character TDB limit: {}. Use the keyword argument \'if_incompatible\' to control this behavior.'.format(long_function_names))
elif if_incompatible == 'fix':
# if we are going to make changes, make the changes to a copy and leave the original object untouched
dbf = deepcopy(dbf) # TODO: if we do multiple fixes, we should only copy once
symbol_name_map = {}
for name in long_function_names:
hashed_name = 'F' + str(hashlib.md5(name.encode('UTF-8')).hexdigest()).upper()[:7] # this is implictly upper(), but it is explicit here
symbol_name_map[name] = hashed_name
_apply_new_symbol_names(dbf, symbol_name_map)
elif if_incompatible == 'warn':
warnings.warn('Ignoring that the following function names are beyond the 8 character TDB limit: {}. Use the keyword argument \'if_incompatible\' to control this behavior.'.format(long_function_names))
# Begin constructing the written database
writetime = datetime.datetime.now()
maxlen = 78
output = ""
# Comment header block
# Import here to prevent circular imports
from pycalphad import __version__
try:
# getuser() will raise on Windows if it can't find a username: https://bugs.python.org/issue32731
username = getpass.getuser()
except:
# if we can't find a good username, just choose a default and move on
username = 'user'
output += ("$" * maxlen) + "\n"
output += "$ Date: {}\n".format(writetime.strftime("%Y-%m-%d %H:%M"))
output += "$ Components: {}\n".format(', '.join(sorted(dbf.elements)))
output += "$ Phases: {}\n".format(', '.join(sorted(dbf.phases.keys())))
output += "$ Generated by {} (pycalphad {})\n".format(username, __version__)
output += ("$" * maxlen) + "\n\n"
for element in sorted(dbf.elements):
ref = dbf.refstates.get(element, {})
refphase = ref.get('phase', 'BLANK')
mass = ref.get('mass', 0.0)
H298 = ref.get('H298', 0.0)
S298 = ref.get('S298', 0.0)
output += "ELEMENT {0} {1} {2} {3} {4} !\n".format(element.upper(), refphase, mass, H298, S298)
if len(dbf.elements) > 0:
output += "\n"
for species in sorted(dbf.species, key=lambda s: s.name):
if species.name not in dbf.elements:
# construct the charge part of the specie
if species.charge != 0:
if species.charge >0:
charge_sign = '+'
else:
charge_sign = ''
charge = '/{}{}'.format(charge_sign, species.charge)
else:
charge = ''
species_constituents = ''.join(['{}{}'.format(el, val) for el, val in sorted(species.constituents.items(), key=lambda t: t[0])])
output += "SPECIES {0} {1}{2} !\n".format(species.name.upper(), species_constituents, charge)
if len(dbf.species) > 0:
output += "\n"
# Write FUNCTION block
for name, expr in sorted(dbf.symbols.items()):
if not isinstance(expr, Piecewise):
# Non-piecewise exprs need to be wrapped to print
# Otherwise TC's TDB parser will complain
expr = Piecewise((expr, And(v.T >= 1, v.T < 10000)))
expr = TCPrinter().doprint(expr).upper()
if ';' not in expr:
expr += '; N'
output += "FUNCTION {0} {1} !\n".format(name.upper(), expr)
output += "\n"
# Boilerplate code
output += "TYPE_DEFINITION % SEQ * !\n"
output += "DEFINE_SYSTEM_DEFAULT ELEMENT 2 !\n"
default_elements = [i.upper() for i in sorted(dbf.elements) if i.upper() == 'VA' or i.upper() == '/-']
if len(default_elements) > 0:
output += 'DEFAULT_COMMAND DEFINE_SYSTEM_ELEMENT {} !\n'.format(' '.join(default_elements))
output += "\n"
typedef_chars = list("^&*()'ABCDEFGHIJKLMNOPQSRTUVWXYZ")[::-1]
# Write necessary TYPE_DEF based on model hints
typedefs = defaultdict(lambda: ["%"])
for name, phase_obj in sorted(dbf.phases.items()):
model_hints = phase_obj.model_hints.copy()
possible_options = set(phase_options.keys()).intersection(model_hints)
# Phase options are handled later
for option in possible_options:
del model_hints[option]
if ('ordered_phase' in model_hints.keys()) and (model_hints['ordered_phase'] == name):
new_char = typedef_chars.pop()
typedefs[name].append(new_char)
typedefs[model_hints['disordered_phase']].append(new_char)
output += 'TYPE_DEFINITION {} GES AMEND_PHASE_DESCRIPTION {} DISORDERED_PART {} !\n'\
.format(new_char, model_hints['ordered_phase'].upper(),
model_hints['disordered_phase'].upper())
del model_hints['ordered_phase']
del model_hints['disordered_phase']
if ('disordered_phase' in model_hints.keys()) and (model_hints['disordered_phase'] == name):
# We handle adding the correct typedef when we write the ordered phase
del model_hints['ordered_phase']
del model_hints['disordered_phase']
if 'ihj_magnetic_afm_factor' in model_hints.keys():
new_char = typedef_chars.pop()
typedefs[name].append(new_char)
output += 'TYPE_DEFINITION {} GES AMEND_PHASE_DESCRIPTION {} MAGNETIC {} {} !\n'\
.format(new_char, name.upper(), model_hints['ihj_magnetic_afm_factor'],
model_hints['ihj_magnetic_structure_factor'])
del model_hints['ihj_magnetic_afm_factor']
del model_hints['ihj_magnetic_structure_factor']
if len(model_hints) > 0:
# Some model hints were not properly consumed
raise ValueError('Not all model hints are supported: {}'.format(model_hints))
# Perform a second loop now that all typedefs / model hints are consistent
for name, phase_obj in sorted(dbf.phases.items()):
# model_hints may also contain "phase options", e.g., ionic liquid
model_hints = phase_obj.model_hints.copy()
name_with_options = str(name.upper())
possible_options = set(phase_options.keys()).intersection(model_hints.keys())
if len(possible_options) > 0:
name_with_options += ':'
for option in possible_options:
name_with_options += phase_options[option]
output += "PHASE {0} {1} {2} {3} !\n".format(name_with_options, ''.join(typedefs[name]),
len(phase_obj.sublattices),
' '.join([str(i) for i in phase_obj.sublattices]))
constituents = ':'.join([','.join([spec.name for spec in sorted(subl)]) for subl in phase_obj.constituents])
output += "CONSTITUENT {0} :{1}: !\n".format(name_with_options, constituents)
output += "\n"
# PARAMETERs by subsystem
param_sorted = defaultdict(lambda: list())
paramtuple = namedtuple('ParamTuple', ['phase_name', 'parameter_type', 'complexity', 'constituent_array',
'parameter_order', 'diffusing_species', 'parameter', 'reference'])
for param in dbf._parameters.all():
if groupby == 'subsystem':
components = set()
for subl in param['constituent_array']:
components |= set(subl)
if param['diffusing_species'] != Species(None):
components |= {param['diffusing_species']}
# Wildcard operator is not a component
components -= {'*'}
desired_active_pure_elements = [list(x.constituents.keys()) for x in components]
components = set([el.upper() for constituents in desired_active_pure_elements for el in constituents])
# Remove vacancy if it's not the only component (pure vacancy endmember)
if len(components) > 1:
components -= {'VA'}
components = tuple(sorted([c.upper() for c in components]))
grouping = components
elif groupby == 'phase':
grouping = param['phase_name'].upper()
else:
raise ValueError('Unknown groupby attribute \'{}\''.format(groupby))
# We use the complexity parameter to help with sorting the parameters logically
param_sorted[grouping].append(paramtuple(param['phase_name'], param['parameter_type'],
sum([len(i) for i in param['constituent_array']]),
param['constituent_array'], param['parameter_order'],
param['diffusing_species'], param['parameter'],
param['reference']))
def write_parameter(param_to_write):
constituents = ':'.join([','.join(sorted([i.name.upper() for i in subl]))
for subl in param_to_write.constituent_array])
# TODO: Handle references
paramx = param_to_write.parameter
if not isinstance(paramx, Piecewise):
# Non-piecewise parameters need to be wrapped to print correctly
# Otherwise TC's TDB parser will fail
paramx = Piecewise((paramx, And(v.T >= 1, v.T < 10000)))
exprx = TCPrinter().doprint(paramx).upper()
if ';' not in exprx:
exprx += '; N'
if param_to_write.diffusing_species != Species(None):
ds = "&" + param_to_write.diffusing_species.name
else:
ds = ""
return "PARAMETER {}({}{},{};{}) {} !\n".format(param_to_write.parameter_type.upper(),
param_to_write.phase_name.upper(),
ds,
constituents,
param_to_write.parameter_order,
exprx)
if groupby == 'subsystem':
for num_species in range(1, 5):
subsystems = list(itertools.combinations(sorted([i.name.upper() for i in dbf.species]), num_species))
for subsystem in subsystems:
parameters = sorted(param_sorted[subsystem])
if len(parameters) > 0:
output += "\n\n"
output += "$" * maxlen + "\n"
output += "$ {}".format('-'.join(sorted(subsystem)).center(maxlen, " ")[2:-1]) + "$\n"
output += "$" * maxlen + "\n"
output += "\n"
for parameter in parameters:
output += write_parameter(parameter)
# Don't generate combinatorics for multi-component subsystems or we'll run out of memory
if len(dbf.species) > 4:
subsystems = [k for k in param_sorted.keys() if len(k) > 4]
for subsystem in subsystems:
parameters = sorted(param_sorted[subsystem])
for parameter in parameters:
output += write_parameter(parameter)
elif groupby == 'phase':
for phase_name in sorted(dbf.phases.keys()):
parameters = sorted(param_sorted[phase_name])
if len(parameters) > 0:
output += "\n\n"
output += "$" * maxlen + "\n"
output += "$ {}".format(phase_name.upper().center(maxlen, " ")[2:-1]) + "$\n"
output += "$" * maxlen + "\n"
output += "\n"
for parameter in parameters:
output += write_parameter(parameter)
else:
raise ValueError('Unknown groupby attribute {}'.format(groupby))
# Reflow text to respect character limit per line
fd.write(reflow_text(output, linewidth=maxlen))
def read_tdb(dbf, fd):
"""
Parse a TDB file into a pycalphad Database object.
Parameters
----------
dbf : Database
A pycalphad Database.
fd : file-like
File descriptor.
"""
lines = fd.read().upper()
lines = lines.replace('\t', ' ')
lines = lines.strip()
# Split the string by newlines
splitlines = lines.split('\n')
# Remove extra whitespace inside line
splitlines = [' '.join(k.split()) for k in splitlines]
# Remove comments
splitlines = [k.strip().split('$', 1)[0] for k in splitlines]
# Remove everything after command delimiter, but keep the delimiter so we can split later
splitlines = [k.split('!')[0] + ('!' if len(k.split('!')) > 1 else '') for k in splitlines]
# Combine everything back together
lines = ' '.join(splitlines)
# Now split by the command delimeter
commands = lines.split('!')
# Temporarily track which typedef characters were used by which phase
# before we process the type definitions
# Map {typedef character: [phases using that typedef]}
dbf._typechar_map = defaultdict(list)
dbf._typedefs_queue = [] # queue of type defintion lines to process
grammar = _tdb_grammar()
for command in commands:
if len(command) == 0:
continue
tokens = None
try:
tokens = grammar.parseString(command)
_TDB_PROCESSOR[tokens[0]](dbf, *tokens[1:])
except:
print("Failed while parsing: " + command)
print("Tokens: " + str(tokens))
raise
# Process type definitions last, updating model_hints for defined phases.
for typechar, line in dbf._typedefs_queue:
_process_typedef(dbf, typechar, line)
# Raise warnings if there are any remaining type characters that one or more
# phases expected to be defined
for typechar, phases_expecting_typechar in dbf._typechar_map.items():
warnings.warn(f"The type definition character `{typechar}` was defined in the following phases: "
f"{phases_expecting_typechar}, but no corresponding TYPE_DEFINITION line was found in the TDB.")
del dbf._typechar_map
del dbf._typedefs_queue
dbf.process_parameter_queue()
Database.register_format("tdb", read=read_tdb, write=write_tdb)
|
|
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linformer models."""
from flax import nn
import jax.numpy as jnp
from lra_benchmarks.models.layers import common_layers
from lra_benchmarks.models.linformer import linformer_attention
class LinformerBlock(nn.Module):
"""Linformer layer (https://arxiv.org/abs/2006.04768)."""
def apply(self,
inputs,
qkv_dim,
mlp_dim,
num_heads,
dtype=jnp.float32,
inputs_segmentation=None,
causal_mask=False,
padding_mask=None,
dropout_rate=0.1,
attention_dropout_rate=0.1,
deterministic=False,
max_len=512,
cache=None):
"""Applies LinformerBlock module.
Args:
inputs: input data
qkv_dim: dimension of the query/key/value
mlp_dim: dimension of the mlp on top of attention block
num_heads: number of heads
dtype: the dtype of the computation (default: float32).
inputs_segmentation: input segmentation info for packed examples.
causal_mask: bool, mask future or not
padding_mask: bool, mask padding tokens
dropout_rate: dropout rate
attention_dropout_rate: dropout rate for attention weights
deterministic: bool, deterministic or not (to apply dropout)
max_len: int, max sequence length.
cache: flax autoregressive cache for fast decoding.
Returns:
output after transformer block.
"""
# Attention block.
assert inputs.ndim == 3
x = nn.LayerNorm(inputs)
x = linformer_attention.LinformerSelfAttention(
x,
num_heads=num_heads,
dtype=dtype,
qkv_features=qkv_dim,
attention_axis=(1,),
causal_mask=causal_mask,
segmentation=inputs_segmentation,
padding_mask=padding_mask,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6),
bias=False,
broadcast_dropout=False,
dropout_rate=attention_dropout_rate,
deterministic=deterministic,
max_len=max_len,
cache=cache)
x = nn.dropout(x, rate=dropout_rate, deterministic=deterministic)
x = x + inputs
# MLP block.
y = nn.LayerNorm(x)
y = common_layers.MlpBlock(
y,
mlp_dim=mlp_dim,
dtype=dtype,
dropout_rate=dropout_rate,
deterministic=deterministic)
return x + y
class LinformerEncoder(nn.Module):
"""Linformer Model Encoder."""
def apply(self,
inputs,
vocab_size,
inputs_positions=None,
inputs_segmentation=None,
shared_embedding=None,
use_bfloat16=False,
emb_dim=512,
num_heads=8,
dtype=jnp.float32,
num_layers=6,
qkv_dim=512,
mlp_dim=2048,
max_len=512,
train=True,
dropout_rate=0.1,
attention_dropout_rate=0.1,
learn_pos_emb=False,
classifier=False,
classifier_pool='CLS',
num_classes=10,
tied_weights=False):
"""Applies Transformer model on the inputs.
Args:
inputs: input data
vocab_size: size of the vocabulary
inputs_positions: input subsequence positions for packed examples.
inputs_segmentation: input segmentation info for packed examples.
shared_embedding: a shared embedding layer to use.
use_bfloat16: bool: whether use bfloat16.
emb_dim: dimension of embedding
num_heads: number of heads
dtype: the dtype of the computation (default: float32)
num_layers: number of layers
qkv_dim: dimension of the query/key/value
mlp_dim: dimension of the mlp on top of attention block
max_len: maximum length.
train: if it is training,
dropout_rate: dropout rate
attention_dropout_rate: dropout rate for attention weights
learn_pos_emb: boolean, if learn the positional embedding or use the
sinusoidal positional embedding.
classifier: boolean, for classification mode (output N-class logits)
classifier_pool: str, supports "MEAN", "MAX" pooling.
num_classes: int, number of classification classes.
tied_weights: bool
Returns:
output of a transformer encoder or logits if classifier_mode is true.
"""
assert inputs.ndim == 2 # (batch, len)
# Padding Masks
src_padding_mask = (inputs > 0)[..., None]
# Input Embedding
if shared_embedding is None:
input_embed = nn.Embed.partial(
num_embeddings=vocab_size,
features=emb_dim,
embedding_init=nn.initializers.normal(stddev=1.0))
else:
input_embed = shared_embedding
x = inputs.astype('int32')
x = input_embed(x)
if classifier and classifier_pool == 'CLS':
cls = self.param('cls', (1, 1, emb_dim), nn.initializers.zeros)
cls = jnp.tile(cls, [x.shape[0], 1, 1])
x = jnp.concatenate([cls, x], axis=1)
max_len += 1
src_padding_mask = jnp.concatenate(
[src_padding_mask[:, :1], src_padding_mask], axis=1)
pe_init = nn.initializers.normal(stddev=0.02) if learn_pos_emb else None
x = common_layers.AddPositionEmbs(
x,
inputs_positions=inputs_positions,
posemb_init=pe_init,
max_len=max_len,
name='posembed_input')
x = nn.dropout(x, rate=dropout_rate, deterministic=not train)
if use_bfloat16:
x = x.astype(jnp.bfloat16)
dtype = jnp.bfloat16
else:
dtype = jnp.float32
# Input Encoder
if tied_weights:
encoder = LinformerBlock.shared(
qkv_dim=qkv_dim,
mlp_dim=mlp_dim,
num_heads=num_heads,
dtype=dtype,
padding_mask=src_padding_mask,
inputs_segmentation=inputs_segmentation,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
max_len=max_len,
deterministic=not train,
name='encoderblock')
for lyr in range(num_layers):
x = encoder(x)
else:
for lyr in range(num_layers):
x = LinformerBlock(
x,
qkv_dim=qkv_dim,
mlp_dim=mlp_dim,
num_heads=num_heads,
dtype=dtype,
padding_mask=src_padding_mask,
inputs_segmentation=inputs_segmentation,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
max_len=max_len,
deterministic=not train,
name=f'encoderblock_{lyr}')
encoded = nn.LayerNorm(x, dtype=dtype, name='encoder_norm')
if classifier:
encoded = common_layers.classifier_head(
encoded, num_classes, mlp_dim, pooling_mode=classifier_pool)
return encoded
class LinformerDualEncoder(nn.Module):
"""Linformer Model for Matching (dual encoding) tasks."""
def apply(self,
inputs1,
inputs2,
vocab_size=None,
inputs1_positions=None,
inputs2_positions=None,
inputs1_segmentation=None,
inputs2_segmentation=None,
use_bfloat16=False,
emb_dim=512,
num_heads=8,
num_layers=6,
qkv_dim=512,
mlp_dim=2048,
max_len=2048,
train=False,
dropout_rate=0.1,
attention_dropout_rate=0.1,
classifier=True,
classifier_pool='CLS',
num_classes=2,
interaction=None,
tied_weights=False):
"""Applies Transformer model on text similarity.
A deliberate choice to distinguish this from NLI because
we may want to do different things to the model later. Dual Encoding
mode enforces that we do not do cross attention between pairs.
Args:
inputs1: input data.
inputs2: target data.
vocab_size: size of the input vocabulary.
inputs1_positions: input subsequence positions for packed examples.
inputs2_positions: target subsequence positions for packed examples.
inputs1_segmentation: input segmentation info for packed examples.
inputs2_segmentation: target segmentation info for packed examples.
use_bfloat16: bool: whether use bfloat16.
emb_dim: dimension of embedding.
num_heads: number of heads.
num_layers: number of layers.
qkv_dim: dimension of the query/key/value.
mlp_dim: dimension of the mlp on top of attention block.
max_len: maximum length.
train: whether it is training.
dropout_rate: dropout rate.
attention_dropout_rate: dropout rate for attention weights.
classifier: boolean, to use classifier.
classifier_pool: str, supports "MEAN", "MAX" pooling.
num_classes: int, number of classification classes.
interaction: str
tied_weights: boolean
Returns:
output of a transformer decoder.
"""
encoder = LinformerEncoder.shared(
inputs_positions=inputs1_positions,
inputs_segmentation=inputs1_segmentation,
vocab_size=vocab_size,
use_bfloat16=use_bfloat16,
emb_dim=emb_dim,
num_heads=num_heads,
num_layers=num_layers,
qkv_dim=qkv_dim,
mlp_dim=mlp_dim,
max_len=max_len,
train=train,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
name='encoder',
tied_weights=tied_weights)
inputs1_encoded = encoder(inputs1)
inputs2_encoded = encoder(inputs2)
encoded = common_layers.classifier_head_dual(
inputs1_encoded,
inputs2_encoded,
num_classes,
mlp_dim,
pooling_mode=classifier_pool,
interaction=interaction)
return encoded
|
|
# orm/interfaces.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""
Contains various base classes used throughout the ORM.
Defines some key base classes prominent within the internals.
This module and the classes within are mostly private, though some attributes
are exposed when inspecting mappings.
"""
from __future__ import absolute_import
import collections
from . import exc as orm_exc
from . import path_registry
from .base import _MappedAttribute # noqa
from .base import EXT_CONTINUE
from .base import EXT_SKIP
from .base import EXT_STOP
from .base import InspectionAttr # noqa
from .base import InspectionAttrInfo # noqa
from .base import MANYTOMANY
from .base import MANYTOONE
from .base import NOT_EXTENSION
from .base import ONETOMANY
from .. import inspect
from .. import inspection
from .. import util
from ..sql import operators
from ..sql import roles
from ..sql import visitors
from ..sql.base import ExecutableOption
from ..sql.traversals import HasCacheKey
__all__ = (
"EXT_CONTINUE",
"EXT_STOP",
"EXT_SKIP",
"ONETOMANY",
"MANYTOMANY",
"MANYTOONE",
"NOT_EXTENSION",
"LoaderStrategy",
"MapperOption",
"LoaderOption",
"MapperProperty",
"PropComparator",
"StrategizedProperty",
)
class ORMStatementRole(roles.StatementRole):
_role_name = (
"Executable SQL or text() construct, including ORM " "aware objects"
)
class ORMColumnsClauseRole(roles.ColumnsClauseRole):
_role_name = "ORM mapped entity, aliased entity, or Column expression"
class ORMEntityColumnsClauseRole(ORMColumnsClauseRole):
_role_name = "ORM mapped or aliased entity"
class ORMFromClauseRole(roles.StrictFromClauseRole):
_role_name = "ORM mapped entity, aliased entity, or FROM expression"
@inspection._self_inspects
class MapperProperty(
HasCacheKey, _MappedAttribute, InspectionAttr, util.MemoizedSlots
):
"""Represent a particular class attribute mapped by :class:`_orm.Mapper`.
The most common occurrences of :class:`.MapperProperty` are the
mapped :class:`_schema.Column`, which is represented in a mapping as
an instance of :class:`.ColumnProperty`,
and a reference to another class produced by :func:`_orm.relationship`,
represented in the mapping as an instance of
:class:`.RelationshipProperty`.
"""
__slots__ = (
"_configure_started",
"_configure_finished",
"parent",
"key",
"info",
)
_cache_key_traversal = [
("parent", visitors.ExtendedInternalTraversal.dp_has_cache_key),
("key", visitors.ExtendedInternalTraversal.dp_string),
]
cascade = frozenset()
"""The set of 'cascade' attribute names.
This collection is checked before the 'cascade_iterator' method is called.
The collection typically only applies to a RelationshipProperty.
"""
is_property = True
"""Part of the InspectionAttr interface; states this object is a
mapper property.
"""
@property
def _links_to_entity(self):
"""True if this MapperProperty refers to a mapped entity.
Should only be True for RelationshipProperty, False for all others.
"""
raise NotImplementedError()
def _memoized_attr_info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.InspectionAttr`.
The dictionary is generated when first accessed. Alternatively,
it can be specified as a constructor argument to the
:func:`.column_property`, :func:`_orm.relationship`, or
:func:`.composite`
functions.
.. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also
available on extension types via the
:attr:`.InspectionAttrInfo.info` attribute, so that it can apply
to a wider variety of ORM and extension constructs.
.. seealso::
:attr:`.QueryableAttribute.info`
:attr:`.SchemaItem.info`
"""
return {}
def setup(self, context, query_entity, path, adapter, **kwargs):
"""Called by Query for the purposes of constructing a SQL statement.
Each MapperProperty associated with the target mapper processes the
statement referenced by the query context, adding columns and/or
criterion as appropriate.
"""
def create_row_processor(
self, context, query_entity, path, mapper, result, adapter, populators
):
"""Produce row processing functions and append to the given
set of populators lists.
"""
def cascade_iterator(
self, type_, state, dict_, visited_states, halt_on=None
):
"""Iterate through instances related to the given instance for
a particular 'cascade', starting with this MapperProperty.
Return an iterator3-tuples (instance, mapper, state).
Note that the 'cascade' collection on this MapperProperty is
checked first for the given type before cascade_iterator is called.
This method typically only applies to RelationshipProperty.
"""
return iter(())
def set_parent(self, parent, init):
"""Set the parent mapper that references this MapperProperty.
This method is overridden by some subclasses to perform extra
setup when the mapper is first known.
"""
self.parent = parent
def instrument_class(self, mapper):
"""Hook called by the Mapper to the property to initiate
instrumentation of the class attribute managed by this
MapperProperty.
The MapperProperty here will typically call out to the
attributes module to set up an InstrumentedAttribute.
This step is the first of two steps to set up an InstrumentedAttribute,
and is called early in the mapper setup process.
The second step is typically the init_class_attribute step,
called from StrategizedProperty via the post_instrument_class()
hook. This step assigns additional state to the InstrumentedAttribute
(specifically the "impl") which has been determined after the
MapperProperty has determined what kind of persistence
management it needs to do (e.g. scalar, object, collection, etc).
"""
def __init__(self):
self._configure_started = False
self._configure_finished = False
def init(self):
"""Called after all mappers are created to assemble
relationships between mappers and perform other post-mapper-creation
initialization steps.
"""
self._configure_started = True
self.do_init()
self._configure_finished = True
@property
def class_attribute(self):
"""Return the class-bound descriptor corresponding to this
:class:`.MapperProperty`.
This is basically a ``getattr()`` call::
return getattr(self.parent.class_, self.key)
I.e. if this :class:`.MapperProperty` were named ``addresses``,
and the class to which it is mapped is ``User``, this sequence
is possible::
>>> from sqlalchemy import inspect
>>> mapper = inspect(User)
>>> addresses_property = mapper.attrs.addresses
>>> addresses_property.class_attribute is User.addresses
True
>>> User.addresses.property is addresses_property
True
"""
return getattr(self.parent.class_, self.key)
def do_init(self):
"""Perform subclass-specific initialization post-mapper-creation
steps.
This is a template method called by the ``MapperProperty``
object's init() method.
"""
def post_instrument_class(self, mapper):
"""Perform instrumentation adjustments that need to occur
after init() has completed.
The given Mapper is the Mapper invoking the operation, which
may not be the same Mapper as self.parent in an inheritance
scenario; however, Mapper will always at least be a sub-mapper of
self.parent.
This method is typically used by StrategizedProperty, which delegates
it to LoaderStrategy.init_class_attribute() to perform final setup
on the class-bound InstrumentedAttribute.
"""
def merge(
self,
session,
source_state,
source_dict,
dest_state,
dest_dict,
load,
_recursive,
_resolve_conflict_map,
):
"""Merge the attribute represented by this ``MapperProperty``
from source to destination object.
"""
def __repr__(self):
return "<%s at 0x%x; %s>" % (
self.__class__.__name__,
id(self),
getattr(self, "key", "no key"),
)
@inspection._self_inspects
class PropComparator(operators.ColumnOperators):
r"""Defines SQL operators for :class:`.MapperProperty` objects.
SQLAlchemy allows for operators to
be redefined at both the Core and ORM level. :class:`.PropComparator`
is the base class of operator redefinition for ORM-level operations,
including those of :class:`.ColumnProperty`,
:class:`.RelationshipProperty`, and :class:`.CompositeProperty`.
.. note:: With the advent of Hybrid properties introduced in SQLAlchemy
0.7, as well as Core-level operator redefinition in
SQLAlchemy 0.8, the use case for user-defined :class:`.PropComparator`
instances is extremely rare. See :ref:`hybrids_toplevel` as well
as :ref:`types_operators`.
User-defined subclasses of :class:`.PropComparator` may be created. The
built-in Python comparison and math operator methods, such as
:meth:`.operators.ColumnOperators.__eq__`,
:meth:`.operators.ColumnOperators.__lt__`, and
:meth:`.operators.ColumnOperators.__add__`, can be overridden to provide
new operator behavior. The custom :class:`.PropComparator` is passed to
the :class:`.MapperProperty` instance via the ``comparator_factory``
argument. In each case,
the appropriate subclass of :class:`.PropComparator` should be used::
# definition of custom PropComparator subclasses
from sqlalchemy.orm.properties import \
ColumnProperty,\
CompositeProperty,\
RelationshipProperty
class MyColumnComparator(ColumnProperty.Comparator):
def __eq__(self, other):
return self.__clause_element__() == other
class MyRelationshipComparator(RelationshipProperty.Comparator):
def any(self, expression):
"define the 'any' operation"
# ...
class MyCompositeComparator(CompositeProperty.Comparator):
def __gt__(self, other):
"redefine the 'greater than' operation"
return sql.and_(*[a>b for a, b in
zip(self.__clause_element__().clauses,
other.__composite_values__())])
# application of custom PropComparator subclasses
from sqlalchemy.orm import column_property, relationship, composite
from sqlalchemy import Column, String
class SomeMappedClass(Base):
some_column = column_property(Column("some_column", String),
comparator_factory=MyColumnComparator)
some_relationship = relationship(SomeOtherClass,
comparator_factory=MyRelationshipComparator)
some_composite = composite(
Column("a", String), Column("b", String),
comparator_factory=MyCompositeComparator
)
Note that for column-level operator redefinition, it's usually
simpler to define the operators at the Core level, using the
:attr:`.TypeEngine.comparator_factory` attribute. See
:ref:`types_operators` for more detail.
.. seealso::
:class:`.ColumnProperty.Comparator`
:class:`.RelationshipProperty.Comparator`
:class:`.CompositeProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__slots__ = "prop", "property", "_parententity", "_adapt_to_entity"
__visit_name__ = "orm_prop_comparator"
def __init__(
self,
prop,
parentmapper,
adapt_to_entity=None,
):
self.prop = self.property = prop
self._parententity = adapt_to_entity or parentmapper
self._adapt_to_entity = adapt_to_entity
def __clause_element__(self):
raise NotImplementedError("%r" % self)
def _bulk_update_tuples(self, value):
"""Receive a SQL expression that represents a value in the SET
clause of an UPDATE statement.
Return a tuple that can be passed to a :class:`_expression.Update`
construct.
"""
return [(self.__clause_element__(), value)]
def adapt_to_entity(self, adapt_to_entity):
"""Return a copy of this PropComparator which will use the given
:class:`.AliasedInsp` to produce corresponding expressions.
"""
return self.__class__(self.prop, self._parententity, adapt_to_entity)
@property
def _parentmapper(self):
"""legacy; this is renamed to _parententity to be
compatible with QueryableAttribute."""
return inspect(self._parententity).mapper
@property
def _propagate_attrs(self):
# this suits the case in coercions where we don't actually
# call ``__clause_element__()`` but still need to get
# resolved._propagate_attrs. See #6558.
return util.immutabledict(
{
"compile_state_plugin": "orm",
"plugin_subject": self._parentmapper,
}
)
@property
def adapter(self):
"""Produce a callable that adapts column expressions
to suit an aliased version of this comparator.
"""
if self._adapt_to_entity is None:
return None
else:
return self._adapt_to_entity._adapt_element
@property
def info(self):
return self.property.info
@staticmethod
def any_op(a, b, **kwargs):
return a.any(b, **kwargs)
@staticmethod
def has_op(a, b, **kwargs):
return a.has(b, **kwargs)
@staticmethod
def of_type_op(a, class_):
return a.of_type(class_)
def of_type(self, class_):
r"""Redefine this object in terms of a polymorphic subclass,
:func:`_orm.with_polymorphic` construct, or :func:`_orm.aliased`
construct.
Returns a new PropComparator from which further criterion can be
evaluated.
e.g.::
query.join(Company.employees.of_type(Engineer)).\
filter(Engineer.name=='foo')
:param \class_: a class or mapper indicating that criterion will be
against this specific subclass.
.. seealso::
:ref:`queryguide_join_onclause` - in the :ref:`queryguide_toplevel`
:ref:`inheritance_of_type`
"""
return self.operate(PropComparator.of_type_op, class_)
def and_(self, *criteria):
"""Add additional criteria to the ON clause that's represented by this
relationship attribute.
E.g.::
stmt = select(User).join(
User.addresses.and_(Address.email_address != 'foo')
)
stmt = select(User).options(
joinedload(User.addresses.and_(Address.email_address != 'foo'))
)
.. versionadded:: 1.4
.. seealso::
:ref:`orm_queryguide_join_on_augmented`
:ref:`loader_option_criteria`
:func:`.with_loader_criteria`
"""
return self.operate(operators.and_, *criteria)
def any(self, criterion=None, **kwargs):
r"""Return true if this collection contains any member that meets the
given criterion.
The usual implementation of ``any()`` is
:meth:`.RelationshipProperty.Comparator.any`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.any_op, criterion, **kwargs)
def has(self, criterion=None, **kwargs):
r"""Return true if this element references a member which meets the
given criterion.
The usual implementation of ``has()`` is
:meth:`.RelationshipProperty.Comparator.has`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.has_op, criterion, **kwargs)
class StrategizedProperty(MapperProperty):
"""A MapperProperty which uses selectable strategies to affect
loading behavior.
There is a single strategy selected by default. Alternate
strategies can be selected at Query time through the usage of
``StrategizedOption`` objects via the Query.options() method.
The mechanics of StrategizedProperty are used for every Query
invocation for every mapped attribute participating in that Query,
to determine first how the attribute will be rendered in SQL
and secondly how the attribute will retrieve a value from a result
row and apply it to a mapped object. The routines here are very
performance-critical.
"""
__slots__ = (
"_strategies",
"strategy",
"_wildcard_token",
"_default_path_loader_key",
)
inherit_cache = True
strategy_wildcard_key = None
def _memoized_attr__wildcard_token(self):
return (
"%s:%s"
% (self.strategy_wildcard_key, path_registry._WILDCARD_TOKEN),
)
def _memoized_attr__default_path_loader_key(self):
return (
"loader",
(
"%s:%s"
% (self.strategy_wildcard_key, path_registry._DEFAULT_TOKEN),
),
)
def _get_context_loader(self, context, path):
load = None
search_path = path[self]
# search among: exact match, "attr.*", "default" strategy
# if any.
for path_key in (
search_path._loader_key,
search_path._wildcard_path_loader_key,
search_path._default_path_loader_key,
):
if path_key in context.attributes:
load = context.attributes[path_key]
break
return load
def _get_strategy(self, key):
try:
return self._strategies[key]
except KeyError:
pass
# run outside to prevent transfer of exception context
cls = self._strategy_lookup(self, *key)
# this previously was setting self._strategies[cls], that's
# a bad idea; should use strategy key at all times because every
# strategy has multiple keys at this point
self._strategies[key] = strategy = cls(self, key)
return strategy
def setup(self, context, query_entity, path, adapter, **kwargs):
loader = self._get_context_loader(context, path)
if loader and loader.strategy:
strat = self._get_strategy(loader.strategy)
else:
strat = self.strategy
strat.setup_query(
context, query_entity, path, loader, adapter, **kwargs
)
def create_row_processor(
self, context, query_entity, path, mapper, result, adapter, populators
):
loader = self._get_context_loader(context, path)
if loader and loader.strategy:
strat = self._get_strategy(loader.strategy)
else:
strat = self.strategy
strat.create_row_processor(
context,
query_entity,
path,
loader,
mapper,
result,
adapter,
populators,
)
def do_init(self):
self._strategies = {}
self.strategy = self._get_strategy(self.strategy_key)
def post_instrument_class(self, mapper):
if (
not self.parent.non_primary
and not mapper.class_manager._attr_has_impl(self.key)
):
self.strategy.init_class_attribute(mapper)
_all_strategies = collections.defaultdict(dict)
@classmethod
def strategy_for(cls, **kw):
def decorate(dec_cls):
# ensure each subclass of the strategy has its
# own _strategy_keys collection
if "_strategy_keys" not in dec_cls.__dict__:
dec_cls._strategy_keys = []
key = tuple(sorted(kw.items()))
cls._all_strategies[cls][key] = dec_cls
dec_cls._strategy_keys.append(key)
return dec_cls
return decorate
@classmethod
def _strategy_lookup(cls, requesting_property, *key):
requesting_property.parent._with_polymorphic_mappers
for prop_cls in cls.__mro__:
if prop_cls in cls._all_strategies:
strategies = cls._all_strategies[prop_cls]
try:
return strategies[key]
except KeyError:
pass
for property_type, strats in cls._all_strategies.items():
if key in strats:
intended_property_type = property_type
actual_strategy = strats[key]
break
else:
intended_property_type = None
actual_strategy = None
raise orm_exc.LoaderStrategyException(
cls,
requesting_property,
intended_property_type,
actual_strategy,
key,
)
class ORMOption(ExecutableOption):
"""Base class for option objects that are passed to ORM queries.
These options may be consumed by :meth:`.Query.options`,
:meth:`.Select.options`, or in a more general sense by any
:meth:`.Executable.options` method. They are interpreted at
statement compile time or execution time in modern use. The
deprecated :class:`.MapperOption` is consumed at ORM query construction
time.
.. versionadded:: 1.4
"""
__slots__ = ()
_is_legacy_option = False
propagate_to_loaders = False
"""if True, indicate this option should be carried along
to "secondary" SELECT statements that occur for relationship
lazy loaders as well as attribute load / refresh operations.
"""
_is_compile_state = False
_is_criteria_option = False
_is_strategy_option = False
class LoaderOption(ORMOption):
"""Describe a loader modification to an ORM statement at compilation time.
.. versionadded:: 1.4
"""
_is_compile_state = True
def process_compile_state_replaced_entities(
self, compile_state, mapper_entities
):
"""Apply a modification to a given :class:`.CompileState`,
given entities that were replaced by with_only_columns() or
with_entities().
.. versionadded:: 1.4.19
"""
self.process_compile_state(compile_state)
def process_compile_state(self, compile_state):
"""Apply a modification to a given :class:`.CompileState`."""
class CriteriaOption(ORMOption):
"""Describe a WHERE criteria modification to an ORM statement at
compilation time.
.. versionadded:: 1.4
"""
_is_compile_state = True
_is_criteria_option = True
def process_compile_state(self, compile_state):
"""Apply a modification to a given :class:`.CompileState`."""
def get_global_criteria(self, attributes):
"""update additional entity criteria options in the given
attributes dictionary.
"""
class UserDefinedOption(ORMOption):
"""Base class for a user-defined option that can be consumed from the
:meth:`.SessionEvents.do_orm_execute` event hook.
"""
_is_legacy_option = False
propagate_to_loaders = False
"""if True, indicate this option should be carried along
to "secondary" Query objects produced during lazy loads
or refresh operations.
"""
def __init__(self, payload=None):
self.payload = payload
@util.deprecated_cls(
"1.4",
"The :class:`.MapperOption class is deprecated and will be removed "
"in a future release. For "
"modifications to queries on a per-execution basis, use the "
":class:`.UserDefinedOption` class to establish state within a "
":class:`.Query` or other Core statement, then use the "
":meth:`.SessionEvents.before_orm_execute` hook to consume them.",
constructor=None,
)
class MapperOption(ORMOption):
"""Describe a modification to a Query"""
_is_legacy_option = True
propagate_to_loaders = False
"""if True, indicate this option should be carried along
to "secondary" Query objects produced during lazy loads
or refresh operations.
"""
def process_query(self, query):
"""Apply a modification to the given :class:`_query.Query`."""
def process_query_conditionally(self, query):
"""same as process_query(), except that this option may not
apply to the given query.
This is typically applied during a lazy load or scalar refresh
operation to propagate options stated in the original Query to the
new Query being used for the load. It occurs for those options that
specify propagate_to_loaders=True.
"""
self.process_query(query)
class LoaderStrategy(object):
"""Describe the loading behavior of a StrategizedProperty object.
The ``LoaderStrategy`` interacts with the querying process in three
ways:
* it controls the configuration of the ``InstrumentedAttribute``
placed on a class to handle the behavior of the attribute. this
may involve setting up class-level callable functions to fire
off a select operation when the attribute is first accessed
(i.e. a lazy load)
* it processes the ``QueryContext`` at statement construction time,
where it can modify the SQL statement that is being produced.
For example, simple column attributes will add their represented
column to the list of selected columns, a joined eager loader
may establish join clauses to add to the statement.
* It produces "row processor" functions at result fetching time.
These "row processor" functions populate a particular attribute
on a particular mapped instance.
"""
__slots__ = (
"parent_property",
"is_class_level",
"parent",
"key",
"strategy_key",
"strategy_opts",
)
def __init__(self, parent, strategy_key):
self.parent_property = parent
self.is_class_level = False
self.parent = self.parent_property.parent
self.key = self.parent_property.key
self.strategy_key = strategy_key
self.strategy_opts = dict(strategy_key)
def init_class_attribute(self, mapper):
pass
def setup_query(
self, compile_state, query_entity, path, loadopt, adapter, **kwargs
):
"""Establish column and other state for a given QueryContext.
This method fulfills the contract specified by MapperProperty.setup().
StrategizedProperty delegates its setup() method
directly to this method.
"""
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
"""Establish row processing functions for a given QueryContext.
This method fulfills the contract specified by
MapperProperty.create_row_processor().
StrategizedProperty delegates its create_row_processor() method
directly to this method.
"""
def __str__(self):
return str(self.parent_property)
|
|
import unittest
from lambda_tools import mapper
FAMOUS_FIVE = ['Dick', 'Julian', 'George', 'Anne', 'Timmy']
class StringFieldEntity:
hello = mapper.StringField()
class TestStringField(unittest.TestCase):
def test_simple_mapping(self):
result = mapper.parse(StringFieldEntity, { 'hello': 'world' })
self.assertEqual('world', result.hello)
self.assertIsInstance(result, StringFieldEntity)
def test_simple_mapping_with_default(self):
result = mapper.parse(StringFieldEntity, { })
self.assertEqual(None, result.hello)
def test_simple_mapping_with_unknown_value(self):
self.assertRaises(
mapper.MappingError,
lambda: mapper.parse(StringFieldEntity, { 'goodbye': 'test'})
)
def test_simple_mapping_with_non_dict(self):
self.assertRaises(
mapper.MappingError,
lambda: mapper.parse(StringFieldEntity, 'Hello world')
)
class RequiredStringFieldEntity:
hello = mapper.StringField(required=True)
class TestRequiredStringField(unittest.TestCase):
def test_missing_required_field(self):
self.assertRaises(
mapper.MappingError,
lambda: mapper.parse(RequiredStringFieldEntity, { })
)
class IntFieldEntity:
count = mapper.IntField(default=100)
class TestIntField(unittest.TestCase):
def test_int_field(self):
result = mapper.parse(IntFieldEntity, { 'count': 10 })
self.assertEqual(result.count, 10)
def test_missing_int_field(self):
result = mapper.parse(IntFieldEntity, { })
self.assertEqual(result.count, 100)
def test_int_as_string(self):
result = mapper.parse(IntFieldEntity, { 'count': '10' })
self.assertEqual(result.count, 10)
class BoolFieldEntity:
active = mapper.BoolField()
class TestBoolField(unittest.TestCase):
def test_true(self):
result = mapper.parse(BoolFieldEntity, { 'active': True })
self.assertEqual(result.active, True)
def test_false(self):
result = mapper.parse(BoolFieldEntity, { 'active': False })
self.assertEqual(result.active, False)
class ChoiceFieldEntity:
name = mapper.ChoiceField(FAMOUS_FIVE)
class TestChoiceField(unittest.TestCase):
def test_valid(self):
result = mapper.parse(ChoiceFieldEntity, { 'name': 'Julian' })
def test_invalid(self):
self.assertRaises(
mapper.MappingError,
lambda: mapper.parse(ChoiceFieldEntity, { 'name': 'Jack' })
)
def test_missing(self):
result = mapper.parse(ChoiceFieldEntity, { })
self.assertEqual(None, result.name)
class ListFieldEntity:
names = mapper.ListField(mapper.StringField())
class TestListField(unittest.TestCase):
def test_valid(self):
result = mapper.parse(ListFieldEntity, { 'names': FAMOUS_FIVE })
self.assertListEqual(result.names, FAMOUS_FIVE)
def test_set(self):
names = set(FAMOUS_FIVE)
result = mapper.parse(ListFieldEntity, { 'names': names })
self.assertListEqual(result.names, list(names))
def test_empty_list(self):
result = mapper.parse(ListFieldEntity, { 'names': [] })
self.assertListEqual(result.names, [])
def test_invalid_list(self):
self.assertRaises(
mapper.MappingError,
lambda: mapper.parse(ListFieldEntity, { 'names': range(5) })
)
def test_string(self):
self.assertRaises(
mapper.MappingError,
lambda: mapper.parse(ListFieldEntity, { 'names': '5' })
)
def test_dict(self):
self.assertRaises(
mapper.MappingError,
lambda: mapper.parse(ListFieldEntity, { 'names': {} })
)
class DictFieldEntity:
environment = mapper.DictField(mapper.StringField())
class TestDictField(unittest.TestCase):
def test_valid(self):
result = mapper.parse(DictFieldEntity, { 'environment': { 'one': 'two' } })
self.assertDictEqual(result.environment, { 'one': 'two' })
def test_invalid_dict(self):
self.assertRaises(
mapper.MappingError,
lambda: mapper.parse(DictFieldEntity, { 'environment': { 'one': [] } })
)
class ClassFieldEntity:
five = mapper.ClassField(ChoiceFieldEntity)
class TestChoiceField(unittest.TestCase):
def test_valid(self):
result = mapper.parse(ClassFieldEntity, {
'five': {
'name': 'Julian'
}
})
self.assertEqual(result.five.name, 'Julian')
def test_invalid(self):
self.assertRaises(
mapper.MappingError,
lambda: mapper.parse(ClassFieldEntity, {
'five': {
'name': 'Philip'
}
})
)
class ListClassFieldEntity:
five = mapper.ListField(mapper.ClassField(ChoiceFieldEntity))
class TestListClassField(unittest.TestCase):
def test_valid(self):
result = mapper.parse(ListClassFieldEntity, {
'five': [
{ 'name': 'Julian' },
{ 'name': 'Dick' },
{ 'name': 'George' },
{ 'name': 'Anne' },
{ 'name': 'Timmy' }
]
})
names = sorted([x.name for x in result.five])
self.assertListEqual(names, sorted(FAMOUS_FIVE))
def test_invalid(self):
self.assertRaises(
mapper.MappingError,
lambda: mapper.parse(ListClassFieldEntity, {
'five': [
{ 'name': 'Peter' },
{ 'name': 'Janet' },
{ 'name': 'Jack' },
{ 'name': 'Barbara' },
{ 'name': 'George' },
{ 'name': 'Pam' },
{ 'name': 'Colin' },
]
})
)
class ClassWithDefaultFieldEntity:
five = mapper.ClassField(ChoiceFieldEntity, default_field='name')
class TestClassWithDefaultField(unittest.TestCase):
def test_default_field(self):
result = mapper.parse(ClassWithDefaultFieldEntity, { 'five': 'George' })
self.assertEqual(result.five.name, 'George')
|
|
# -*- coding: ascii -*-
#
# Copyright 2007 - 2021
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
=================
Shell utilities
=================
Shell utilities.
"""
from __future__ import absolute_import
__author__ = "Andr\xe9 Malo"
import contextlib as _contextlib
import errno as _errno
import fnmatch as _fnmatch
import functools as _ft
import os as _os
import re as _re
import shutil as _shutil
import sys as _sys
import tempfile as _tempfile
# pylint: disable = invalid-name
root = _os.path.dirname(_os.path.dirname(_os.path.abspath(__file__)))
@_contextlib.contextmanager
def root_dir():
""" Context manager to change into the root directory """
assert root is not None
old = _os.getcwd()
try:
_os.chdir(root)
yield root
finally:
_os.chdir(old)
def _make_split_command():
"""
Make split_command function
The command splitter splits between tokens. Tokens are non-whitespace
sequences or double quoted strings. Inside those double quotes can be
escaped with a backslash. So have to be backslashes.
Stolen from <http://opensource.perlig.de/svnmailer/>.
:Return: Parser for generic commandlines
:Rtype: callable
"""
argre = r'[^"\s]\S*|"[^\\"]*(?:\\[\\"][^\\"]*)*"'
check = _re.compile(
r'\s*(?:%(arg)s)(?:\s+(?:%(arg)s))*\s*$' % dict(arg=argre)
).match
split = _re.compile(argre).findall
strip = _ft.partial(_re.compile(r'\\([\\"])').sub, r'\1')
def split_command(command): # pylint: disable = redefined-outer-name
"""
Split generic commandline into single arguments
The command splitter splits between tokens. Tokens are non-whitespace
sequences or double quoted strings. Inside those double quotes can be
escaped with a backslash. So have to be backslashes.
Stolen from <http://opensource.perlig.de/svnmailer/>.
:Return: Parser for generic commandlines
:Rtype: callable
"""
if not check(command):
raise ValueError("Invalid command string %r" % (command,))
return [
strip(arg[1:-1]) if arg.startswith('"') else arg
for arg in split(command)
]
return split_command
split_command = _make_split_command()
def _make_formatter(*args, **kwargs):
"""
Make args / kwargs formatter
Either args or kwargs or neither of them can be set. There cannot be set
both of them.
:Return: Formatter, using either args or kwargs
:Rtype: callable
"""
# pylint: disable = no-else-return
assert not(args and kwargs)
if args:
# tuples are given for the whole command string but applied per token.
# We need to supply only the tuples which are needed for the current
# token.
args = list(args[::-1])
pcents = _re.compile(r'%[^%]').findall
def formatter(value):
""" Tuple formatter """
count = len(pcents(value))
torepl = []
while len(torepl) < count:
torepl.append(args.pop())
return value % tuple(torepl)
return formatter
elif kwargs:
return lambda x: x % kwargs
return lambda x: x
def _make_win32_command():
r"""
Make win32_command function
>>> x = win32_command(r'''
... command arg "arg 2" "" "arg %3"
... "malic'ious argument\\\"&whoami"
... ''')
>>> print(x[:42])
command arg ^"arg^ 2^" ^"^" ^"arg^ ^%3^" ^
>>> print(x[41:])
^"malic'ious^ argument\\\^"^&whoami^"
"""
wsp, meta = r'\r\n\t\x0b\x0c\x08 ', r'()%!^"<>&|'
slashsub = _ft.partial(_re.compile(r'(\\+)("|$)').sub, r'\1\1\2')
metasub = _ft.partial(_re.compile(r'([%s%s])' % (wsp, meta)).sub, r'^\1')
qsearch = _re.compile(r'[%s"]' % (wsp,)).search
needq = lambda x: not x or qsearch(x)
def win32_command(command, *args, **kwargs):
"""
Return a win32/cmd.exe suitable commandline
:See: https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/
2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/
Either args or kwargs or neither of them can be set. There cannot be
set both of them.
:Parameters:
`command` : ``str``
Generic commandline, possibly containing substitutions, filled by
args or kwargs. See `split_command` for generic commandline
syntax.
`args` : ``tuple``
Substitution tuple
`kwargs` : ``dict``
Substitution dict
:Return: Strictly quoted shell commandline for ``cmd.exe``
:Rtype: ``str``
"""
# pylint: disable = redefined-outer-name
return ' '.join([metasub(
'"%s"' % (slashsub(token).replace('"', '\\"'),)
if needq(token) else token
) for token in map(_make_formatter(*args, **kwargs),
split_command(command))])
return win32_command
win32_command = _make_win32_command()
def _make_posix_command():
r"""
Make posix_command function
>>> x = posix_command(r'''
... command arg "arg 2" "" "arg $3"
... "malic'ious argument\\\"&whoami"
... ''')
>>> print(x)
command arg 'arg 2' '' 'arg $3' 'malic'\''ious argument\"&whoami'
"""
qsearch = _re.compile(r'[^a-zA-Z\d_./-]').search
needq = lambda x: not x or qsearch(x)
def posix_command(command, *args, **kwargs):
"""
Return a POSIX shell suitable commandline
Either args or kwargs or neither of them can be set. There cannot be
set both of them.
:Parameters:
`command` : ``str``
Generic commandline, possibly containing substitutions, filled by
args or kwargs. See `split_command` for generic commandline
syntax.
`args` : ``tuple``
Substitution tuple
`kwargs` : ``dict``
Substitution dict
:Return: Strictly quoted shell commandline for POSIX shells
:Rtype: ``str``
"""
# pylint: disable = redefined-outer-name
return ' '.join([
"'%s'" % (token.replace("'", "'\\''")) if needq(token) else token
for token in map(_make_formatter(*args, **kwargs),
split_command(command))
])
return posix_command
posix_command = _make_posix_command()
command = win32_command if _sys.platform.lower() == 'win32' else posix_command
def native(path):
"""
Convert slash path to native
:Parameters:
`path` : ``str``
Path relative to the checkout root
:Return: The native path
:Rtype: ``str``
"""
path = _os.path.sep.join(path.split('/'))
return _os.path.normpath(_os.path.join(root, path))
def cp(src, dest):
"""
Copy src to dest
:Parameters:
`src` : ``str``
Source path, relative to the checkout root
`dest` : ``str``
Dest path, relative to the checkout root
"""
_shutil.copy2(native(src), native(dest))
def cp_r(src, dest, ignore=None):
"""
Copy -r src to dest
:Parameters:
`src` : ``str``
Source path, relative to the checkout root
`dest` : ``str``
Dest path, relative to the checkout root
`ignore` : callable
Ignore callback
"""
_shutil.copytree(native(src), native(dest), ignore=ignore)
def rm(*dest):
"""
Remove a file, ENOENT is not considered an error
:Parameters:
`dest` : ``str``
File to remove
"""
for name in dest:
try:
_os.unlink(native(name))
except OSError as e:
if _errno.ENOENT != e.errno:
raise
def rm_rf(*dest):
"""
Remove a tree
:Parameters:
`dest` : ``str``
Path to remove
"""
for name in dest:
name = native(name)
if _os.path.exists(name):
if _os.path.islink(name):
_os.unlink(name)
continue
for path in files(name, '*'):
if not _os.path.islink(native(path)):
_os.chmod(native(path), 0o644)
_shutil.rmtree(name)
def mkdir_p(dirname):
"""
Create direcories
:Parameters:
`dirname` : ``str``
Directory name (the leaf directory)
"""
try:
_os.makedirs(dirname)
except OSError as e:
# makedirs throws OSError if the last dir segment exists
if e.errno != _errno.EEXIST:
raise
mkstemp = _tempfile.mkstemp
walk = _os.walk
def files(base, wildcard='[!.]*', recursive=1, prune=('.git', '.svn', 'CVS')):
"""
Determine a filelist
:Parameters:
`base` : ``str``
Base path to start from
`wildcard` : ``str``
Glob to match against
`recursive` : ``bool``
Deep walk into the tree? Default: true
`prune` : iterable
List of directory basenames to ignore.
Default: ('.git', '.svn', 'CVS'). Can be empty or ``None`` (meaning
the same)
:Return: Iterator over matching pathnames
:Rtype: iterable
"""
prune = tuple(prune or ())
for dirpath, dirnames, filenames in walk(native(base)):
for item in prune:
if item in dirnames:
dirnames.remove(item)
filenames.sort()
for name in _fnmatch.filter(filenames, wildcard):
dest = _os.path.join(dirpath, name)
if dest.startswith(root):
dest = dest.replace(root, '', 1)
aslist = []
head, tail = _os.path.split(dest)
while tail:
aslist.append(tail)
head, tail = _os.path.split(head)
aslist.reverse()
dest = '/'.join(aslist)
yield dest
if not recursive:
break
dirnames.sort()
def dirs(base, wildcard='[!.]*', recursive=1, prune=('.git', '.svn', 'CVS')):
"""
Determine a directory list
:Parameters:
`base` : ``str``
Base path to start from
`wildcard` : ``str``
Glob to match against
`recursive` : ``bool``
Deep walk into the tree? Default: true
`prune` : iterable
List of directory basenames to ignore.
Default: ('.git', '.svn', 'CVS'). Can be empty or ``None`` (meaning
the same)
:Return: Iterator over matching pathnames
:Rtype: iterable
"""
prune = tuple(prune or ())
for dirpath, dirnames, _ in walk(native(base)):
for item in prune:
if item in dirnames:
dirnames.remove(item)
dirnames.sort()
for name in _fnmatch.filter(dirnames, wildcard):
dest = _os.path.join(dirpath, name)
if dest.startswith(root):
dest = dest.replace(root, '', 1)
aslist = []
head, tail = _os.path.split(dest)
while tail:
aslist.append(tail)
head, tail = _os.path.split(head)
aslist.reverse()
dest = '/'.join(aslist)
yield dest
if not recursive:
break
def frompath(executable):
"""
Find executable in PATH
:Parameters:
`executable` : ``str``
Command to search for
:Return: Full path or ``None``
:Rtype: ``str``
"""
# Based on distutils.spawn.find_executable.
path = _os.environ.get('PATH', '')
paths = [
_os.path.expanduser(item)
for item in path.split(_os.pathsep)
]
ext = _os.path.splitext(executable)[1]
exts = ['']
if _sys.platform == 'win32' or _os.name == 'os2':
eext = ['.exe', '.bat', '.py']
if ext not in eext:
exts.extend(eext)
for ext in exts:
if not _os.path.isfile(executable + ext):
for path in paths:
fname = _os.path.join(path, executable + ext)
if _os.path.isfile(fname):
# the file exists, we have a shot at spawn working
return fname
else:
return executable + ext
return None
|
|
"""Describe TestBlock class."""
# pylint: disable=attribute-defined-outside-init,unused-argument
# pylint: disable=dangerous-default-value,access-member-before-definition
# pylint: disable=bare-except,protected-access,too-many-instance-attributes
# pylint: disable=too-many-arguments,too-many-locals,broad-except,no-self-use
# pylint: disable=too-many-public-methods,deprecated-method
from __future__ import absolute_import
import os
import sys
import unittest
import platform
from bdb import BdbQuit
from functools import wraps
from itertools import count
from ipdbugger import debug
from attrdict import AttrDict
from cached_property import cached_property
from future.builtins import next, str, object
from future.utils import iteritems, itervalues
from rotest.core.result.result import Result
from rotest.common.utils import get_class_fields
from rotest.core.models.case_data import TestOutcome
from rotest.management.base_resource import ResourceRequest
from rotest.common.log import get_test_logger, get_tree_path
from rotest.management.client.manager import ClientResourceManager
request = ResourceRequest
class AbstractTest(unittest.TestCase):
"""Base class for all runnable Rotest tests.
Attributes:
resources (tuple): list of the required resources. each item is a
tuple of (resource_name, resource type, parameters dictionary),
you can use :func:`rotest.core..request` to create the tuple.
identifier (number): unique id of the test.
data (rotest.core.models._data.Data): contain information
about a test run.
logger (logging.Logger): test logger.
save_state (bool): a flag to determine if storing the states of
resources is required.
config (AttrDict): dictionary of configurations.
enable_debug (bool): whether to enable entering ipdb debugging mode
upon any exception in a test statement.
skip_init (bool): True to skip resources initialize and validation.
resource_manager (ClientResourceManager): client resource manager.
TAGS (list): list of tags by which the test may be filtered.
IS_COMPLEX (bool): if this test is complex (may contain sub-tests).
TIMEOUT (number): timeout for flow run, None means no timeout.
"""
SETUP_METHOD_NAME = 'setUp'
TEARDOWN_METHOD_NAME = 'tearDown'
TIMEOUT = 60 * 60 # 60 minutes
resources = ()
TAGS = []
IS_COMPLEX = False
STATE_DIR_NAME = "state"
def __init__(self, methodName='test_method', indexer=count(), parent=None,
save_state=True, config=None, enable_debug=False,
resource_manager=None, skip_init=False):
if enable_debug:
for method_name in (methodName, self.SETUP_METHOD_NAME,
self.TEARDOWN_METHOD_NAME):
debug(getattr(self, method_name),
ignore_exceptions=[KeyboardInterrupt,
unittest.SkipTest,
BdbQuit])
super(AbstractTest, self).__init__(methodName)
self.result = None
self.is_main = True
self.config = config
self.parent = parent
self.skip_init = skip_init
self.save_state = save_state
self.identifier = next(indexer)
self.enable_debug = enable_debug
self.parents_count = self._get_parents_count()
self.all_resources = AttrDict()
self.locked_resources = AttrDict()
self._is_client_local = False
self.resource_manager = resource_manager
if parent is not None:
parent.addTest(self)
def override_resource_loggers(self):
"""Replace the resources' logger with the test's logger."""
for resource in itervalues(self.all_resources):
resource.override_logger(self.logger)
def release_resource_loggers(self):
"""Revert logger replacement."""
for resource in itervalues(self.all_resources):
resource.release_logger(self.logger)
@classmethod
def get_resource_requests(cls):
"""Return a list of all the resource requests this test makes.
Resource requests can be done both by overriding the class's
'resources' field and by declaring class fields that point to a
BaseResource instance.
Returns:
list. resource requests of the test class.
"""
all_requests = list(cls.resources)
for (field_name, new_request) in get_class_fields(cls,
ResourceRequest):
new_request.name = field_name
if new_request not in all_requests:
all_requests.append(new_request)
return all_requests
def create_resource_manager(self):
"""Create a new resource manager client instance.
Returns:
ClientResourceManager. new resource manager client.
"""
return ClientResourceManager()
def expect(self, expression, msg=None):
"""Check an expression and fail the test at the end if it's False.
This does not raise an AssertionError like assertTrue, but instead
updates the result of the test and appends the message to the saved
traceback without stopping its flow.
Args:
expression (bool): value to validate.
msg (str): failure message if the expression is False.
Returns:
bool. True if the validation passed, False otherwise.
"""
if not expression:
failure = AssertionError(msg)
self.result.addFailure(self, (failure.__class__, failure, None))
return False
return True
def add_resources(self, resources):
"""Register the resources to the case and set them as its attributes.
Args:
resources (dict): dictionary of attributes name to resources
instance.
"""
self.all_resources.update(resources)
for name, resource in iteritems(resources):
setattr(self, name, resource)
def request_resources(self, resources_to_request, use_previous=False,
force_initialize=False):
"""Lock the requested resources and prepare them for the test.
Lock the required resources using the resource manager, then assign
each resource to its requested name, and update the result of the
chosen resources. This method can also be used to add resources to all
the sibling blocks under the test-flow.
Args:
resources_to_request (list): list of resource requests to lock.
use_previous (bool): whether to use previously locked resources and
release the unused ones.
force_initialize (bool): whether the resources will be initialized
even if the validation succeeds and skip_init is True.
"""
new_requests = []
request_name_to_unpack = {}
for resource_request in resources_to_request:
if resource_request.name in self.all_resources:
self.logger.debug("Already has a resource named %r, "
"skipping request", resource_request)
else:
new_requests.append(resource_request)
request_name_to_unpack[resource_request.name] = \
resource_request.do_unpack
if len(new_requests) == 0:
# No resources to request
return
requested_resources = self.resource_manager.request_resources(
config=self.config,
skip_init=self.skip_init,
use_previous=use_previous,
base_work_dir=self.work_dir,
requests=new_requests,
enable_debug=self.enable_debug,
force_initialize=force_initialize)
self.add_resources(requested_resources)
self.locked_resources.update(requested_resources)
for name, resource in iteritems(requested_resources):
resource.override_logger(self.logger)
unpack_value = request_name_to_unpack[name]
self.unpack_resource(resource, unpack_value)
if isinstance(self.result, Result):
self.result.updateResources(self)
def unpack_resource(self, resource, unpack_order):
"""Unpack a resource - Add its sub-resources as requested resources.
Args:
resource (BaseResource): resource to unpack.
unpack_order (number): level of unpacking (none, once, recursive).
"""
if unpack_order != ResourceRequest.DONT_UNPACK:
self.logger.debug("unpacking %r", resource.name)
sub_resources = resource.get_sub_resource_dict()
self.add_resources(sub_resources)
if unpack_order == ResourceRequest.RECURSIVE_UNPACK:
for sub_resource in sub_resources.values():
self.unpack_resource(sub_resource, unpack_order)
def release_resources(self, resources=None, dirty=False,
force_release=True):
"""Release given resources using the client.
Args:
resources (list): resource names to release, leave None to release
all locked resources.
dirty (bool): True if the resource's integrity has been
compromised, and it should be re-validated.
force_release (bool): whether to always release to resources
or enable saving them for next tests.
"""
if resources is None:
resources = list(self.locked_resources.keys())
if len(resources) == 0:
# No resources to release locked
return
resources_dict = {
name: resource
for name, resource in iteritems(self.locked_resources)
if name in resources
}
not_releasing = [name for name in resources
if name not in resources_dict]
if not_releasing:
self.logger.warn("Not releasing (since they weren't locked by "
"the component): %r", not_releasing)
self.resource_manager.release_resources(list(resources_dict.values()),
dirty=dirty,
force_release=force_release)
# Remove the resources from the test's resource to avoid double release
for resource in resources_dict:
self.locked_resources.pop(resource, None)
def _get_parents_count(self):
"""Get the number of ancestors.
Returns:
number. number of ancestors.
"""
if self.parent is None:
return 0
return self.parent.parents_count + 1
@cached_property
def logger(self):
"""Create logger instance for the test."""
return get_test_logger(get_tree_path(self), self.work_dir)
def start(self):
"""Update the data that the test started."""
self.data.start()
def end(self, test_outcome, details=None):
"""Update the data that the test ended.
Args:
test_outcome (number): test outcome code (as defined in
rotest.core.models.case_data.TestOutcome).
details (str): details of the result (traceback/skip reason).
"""
self.data.update_result(test_outcome, details)
def _decorate_teardown(self, teardown_method):
"""Decorate the tearDown method to handle resource release.
Args:
teardown_method (function): the original tearDown method.
Returns:
function. the wrapped tearDown method.
"""
@wraps(teardown_method)
def teardown_method_wrapper(*args, **kwargs):
"""tearDown method wrapper.
* Executes the original tearDown method.
* Releases the test resources.
* Closes the client if needed
"""
if isinstance(self.result, Result):
self.result.startTeardown(self)
try:
teardown_method(*args, **kwargs)
except Exception:
self.result.addError(self, sys.exc_info())
finally:
self.store_state()
self.release_resources(
dirty=self.data.exception_type == TestOutcome.ERROR,
force_release=False)
if (self._is_client_local and
self.resource_manager.is_connected()):
self.resource_manager.disconnect()
return teardown_method_wrapper
def store_state(self):
"""Store the state of the resources in the work dir."""
# In Python 3 tearDown() is called before result.addError() whereas
# in python 2 addError() is called before tearDown().
# in python 3 self.data.exception_type would always be None
# but we could check the error state via the self._outcome object
# and in python2 we could just check the exception_type identifier.
if not self.save_state:
self.logger.debug("Skipping saving state")
return
if platform.python_version().startswith("3"):
exceptions_that_occurred = len([test
for test, exc_info
in self._outcome.errors
if exc_info is not None])
if exceptions_that_occurred == 0:
self.logger.debug("Test didn't fail, skipping saving state")
return
elif platform.python_version().startswith("2"):
status = self.data.exception_type
if status is None or status in TestOutcome.POSITIVE_RESULTS:
self.logger.debug("Test didn't fail, skipping saving state")
return
store_dir = os.path.join(self.work_dir, self.STATE_DIR_NAME)
# In case a state dir already exists, create a new one.
state_dir_index = 1
while os.path.exists(store_dir):
state_dir_index += 1
store_dir = os.path.join(self.work_dir,
self.STATE_DIR_NAME + str(
state_dir_index))
self.logger.debug("Creating state dir %r", store_dir)
os.makedirs(store_dir)
for resource in itervalues(self.locked_resources):
resource.store_state(store_dir)
def _wrap_assert(self, assert_method, *args, **kwargs):
try:
assert_method(*args, **kwargs)
except AssertionError as err:
self.expect(False, str(err))
class _ExpectRaisesContext(object):
def __init__(self, assert_context, wrap_assert):
self.assert_context = assert_context
self.wrap_assert = wrap_assert
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.wrap_assert(self.assert_context.__exit__,
exc_type, exc_value, tb)
def expectRaises(self, expected_exception, callable_obj=None,
*args, **kwargs):
if callable_obj is None:
return AbstractTest._ExpectRaisesContext(self.assertRaises(
expected_exception,
callable_obj,
*args, **kwargs),
self._wrap_assert)
self._wrap_assert(self.assertRaises, expected_exception, callable_obj,
*args, **kwargs)
def expectRaisesRegexp(self, expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
if callable_obj is None:
return AbstractTest._ExpectRaisesContext(self.assertRaisesRegexp(
expected_exception,
expected_regexp,
callable_obj,
*args, **kwargs),
self._wrap_assert)
self._wrap_assert(self.assertRaisesRegexp, expected_exception,
expected_regexp, callable_obj, *args, **kwargs)
def addSuccess(self, msg):
"""Register a success message to the test result.
Args:
msg (str): success message to add to the result.
"""
self.result.addInfo(self, msg)
# Shortcuts
success = addSuccess
skip = unittest.TestCase.skipTest
def create_expect_method(method_name):
original_assert = getattr(unittest.TestCase, method_name)
@wraps(original_assert)
def extended_assert(self, *args, **kwargs):
success_msg = kwargs.pop("success_msg", None)
retval = original_assert(self, *args, **kwargs)
if success_msg is not None:
self.success(success_msg)
return retval
setattr(AbstractTest, method_name, extended_assert)
def expect_func(self, *args, **kwargs):
return self._wrap_assert(getattr(self, method_name), *args, **kwargs)
expect_func.__doc__ = """Like {} but doesn't break workflow.""".format(
method_name)
setattr(AbstractTest, method_name.replace("assert", "expect"),
expect_func)
# Create an 'expect' method for every 'assert' method in unittest.TestCase
for attr_name in unittest.TestCase.__dict__:
if attr_name.startswith("assert") and \
"Raises" not in attr_name and "_" not in attr_name:
create_expect_method(attr_name)
|
|
"""
Matplotlib implementation of the plotting engine.
"""
from __future__ import absolute_import, print_function, division
from .engine import PlottingEngine, PlottingFigure, PlottingLayout
import os
import matplotlib.pyplot as plt
from matplotlib import gridspec
from tempfile import mkstemp
# enable fixes for non-IPython environment
IPYTHON = False
if any('IPYTHONDIR' in name for name in os.environ):
IPYTHON = True
class MatplotlibEngine(PlottingEngine):
""" Matplotlib engine."""
def __init__(self):
super(MatplotlibEngine, self).__init__()
def __str__(self):
return "<MatplotlibEngine>"
@classmethod
def newFigure(cls, title=None, logX=False, logY=False, layout=None, xtitle=None, ytitle=None):
""" Returns a figure object."""
if layout is None:
layout = PlottingLayout()
fig = MatplotlibFigure(title=title, layout=layout, xtitle=xtitle, ytitle=ytitle, logx=logX, logy=logY)
return fig
class MatplotlibFigure(PlottingFigure):
""" MatplotlibFigure. """
def __init__(self, layout=PlottingLayout(), use_legend=True, xtitle=None, ytitle=None, title=None,
linewidth=None, xlim=None, ylim=None, logx=None, logy=None, xscale=None, yscale=None,
grid=None, ordinates=None, tag=None, labels=None, figsize=(9,6), savefig=None, dpi=None):
super(MatplotlibFigure, self).__init__(title=title, layout=layout,
xtitle=xtitle, ytitle=ytitle, logx=logx, logy=logy)
self.use_legend = use_legend
self.linewidth = linewidth
self.xscale = xscale
self.yscale = yscale
self.grid = grid
self.ordinates = ordinates
self.tag = tag
self.labels = labels
self.figsize = figsize
self.savefig = savefig
self.dpi = dpi
def render(self):
""" Plot the figure. Call this last."""
fig, ax = plt.subplots(num=None, figsize=self.figsize, facecolor='w', edgecolor='k')
have_labels = False
show_legend = False # override self.use_legend if user called plot with showlegend=True
bartype = "vertical"
for dataset in self.getDatasets():
mode = "line"
kwargs = {}
if "mode" in dataset:
mode = dataset["mode"]
#Set different defaults based on the mode
passkeys = ["alpha", "color", "linewidth", "marker", "mfc", "mec", "ms", "mew"]
if mode=="line":
kwargs['marker'] = ''
kwargs['linewidth'] = self.linewidth
elif mode=="markers":
kwargs['marker'] = 'o'
kwargs['linewidth'] = 0
passkeys = ["alpha", "color", "marker", "mfc", "mec", "ms", "mew"]
elif mode=="bar":
passkeys = ["alpha", "color", "linewidth", "edgecolor", "bottom"]
elif mode=="fillBetween":
passkeys = ["alpha", "color", "y2"]
for dkey in dataset:
element = dataset[dkey]
if element is None:
continue
#These keys have the same id as is needed in the matplotlib call
if dkey in passkeys:
kwargs[dkey] = element
#These keys must be translated to matplotlib
elif dkey=="name":
kwargs['label'] = element
have_labels = True
elif dkey=="bartype":
bartype = element
elif dkey == 'dash' and mode != "bar":
if isinstance(dataset['dash'], list):
kwargs['dashes'] = element
else:
kwargs['dashes'] = [4,2]
if 'text' in dataset and dataset['text'] is not None:
for x,y,t in zip(dataset['x'], dataset['y'], dataset['text']):
plt.text(x, y, t, bbox=dict(facecolor='white', alpha=1))
elif mode == "fill":
plt.fill_between(dataset['x'], dataset['y'], **kwargs)
elif mode == "fillBetween":
plt.fill_between(dataset['x'], dataset['y'], **kwargs)
elif mode == "bar":
if bartype == "horizontal":
if "bottom" in kwargs:
kwargs["left"] = kwargs["bottom"]
del kwargs["bottom"]
plt.barh(dataset['x'], dataset['y'], **kwargs)
else:
plt.bar(dataset['x'], dataset['y'], **kwargs)
else:
plt.plot(dataset['x'], dataset['y'], **kwargs)
# TODO: data as points
# title & axes labels
if self.title:
ax.set_title(self.title, fontweight='bold')
if self.xtitle:
ax.set_xlabel(self.xtitle, fontweight='bold')
if self.ytitle:
ax.set_ylabel(self.ytitle, fontweight="bold")
# axes limits
if self.xlim:
ax.set_xlim(self.xlim)
if self.ylim:
ax.set_ylim(self.ylim)
# axes type
if self.logx or self.xscale == 'log':
ax.set_xscale('log')
elif self.xscale != None:
ax.set_xscale(self.xscale)
if self.logy or self.yscale == 'log':
ax.set_yscale('log')
elif self.yscale != None:
ax.set_yscale(self.yscale)
# grid
if self.grid:
ax.grid(linestyle='dotted', alpha=0.8)
# TODO: implement ordinates, tags & labels
# legend
if (self.use_legend and have_labels) or show_legend:
if not IPYTHON:
legend = plt.legend()
else:
# legend = plt.legend(bbox_to_anchor=(1.0, 0.5), loc='center left', borderaxespad=1.)
# legend = plt.legend(bbox_to_anchor=(0.0, 1.02, 1., .102), ncol=2, loc='best', borderaxespad=0.)
legend = plt.legend(ncol=1, loc='best', borderaxespad=0.)
# legend.draw_frame(False)
legend.draw_frame(True)
# save figure
if self.savefig:
plt.savefig(self.savefig, dpi=self.dpi, bbox_inches='tight')
print('saved plot to {}'.format(self.savefig))
plt.show()
return fig
def save(self, filename, format):
fig = self.render()
fig.savefig(filename, format=format)
# FIXME: integrate old code
# Old code:
# if loc is False:
# loc = None
#
# if 'linewidth' not in kwargs:
# kwargs['linewidth'] = 2.0
#
# # get the names
# names = result.dtype.names
# if names is None:
# names = self.selections
#
# # check if set_prop_cycle is supported
# if hasattr(plt.gca(), 'set_prop_cycle'):
# # reset color cycle (repeated simulations have the same colors)
# plt.gca().set_prop_cycle(None)
#
# # make plot
# Ncol = result.shape[1]
# if len(names) != Ncol:
# raise Exception('Legend names must match result array')
# for k in range(1, Ncol):
# if loc is None:
# # no labels if no legend
# plt.plot(result[:, 0], result[:, k], **kwargs)
# else:
# plt.plot(result[:, 0], result[:, k], label=names[k], **kwargs)
#
# cmap = plt.get_cmap('Blues')
#
# # labels
# if xlabel is None:
# xlabel = names[0]
# plt.xlabel(xlabel)
# if ylabel is not None:
# plt.ylabel(ylabel)
# if title is not None:
# plt.title(title)
# if xlim is not None:
# plt.xlim(xlim)
# if ylim is not None:
# plt.ylim(ylim)
# # axis and grids
# plt.xscale(xscale)
# plt.yscale(yscale)
# plt.grid(grid)
#
# # show legend
# if loc is not None:
# plt.legend(loc=loc)
# # show plot
# if show:
# plt.show()
# return plt
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importmulti RPC.
Test importmulti by generating keys on node0, importing the scriptPubKeys and
addresses on node1 and then testing the address info for the different address
variants.
- `get_key()` and `get_multisig()` are called to generate keys on node0 and
return the privkeys, pubkeys and all variants of scriptPubKey and address.
- `test_importmulti()` is called to send an importmulti call to node1, test
success, and (if unsuccessful) test the error code and error message returned.
- `test_address()` is called to call getaddressinfo for an address on node1
and test the values returned."""
from collections import namedtuple
from test_framework.address import (
key_to_p2pkh,
script_to_p2sh,
)
from test_framework.script import (
CScript,
OP_2,
OP_3,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DUP,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_NOP,
hash160,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.descriptors import descsum_create
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
Key = namedtuple('Key', ['privkey',
'pubkey',
'p2pkh_script',
'p2pkh_addr'])
Multisig = namedtuple('Multisig', ['privkeys',
'pubkeys',
'p2sh_script',
'p2sh_addr',
'redeem_script'])
class ImportMultiTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [['-usehd=1']] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
def get_key(self):
"""Generate a fresh key on node0
Returns a named tuple of privkey, pubkey and all address and scripts."""
addr = self.nodes[0].getnewaddress()
pubkey = self.nodes[0].getaddressinfo(addr)['pubkey']
pkh = hash160(bytes.fromhex(pubkey))
return Key(self.nodes[0].dumpprivkey(addr),
pubkey,
CScript([OP_DUP, OP_HASH160, pkh, OP_EQUALVERIFY, OP_CHECKSIG]).hex(), # p2pkh
key_to_p2pkh(pubkey)) # p2pkh addr
def get_multisig(self):
"""Generate a fresh multisig on node0
Returns a named tuple of privkeys, pubkeys and all address and scripts."""
addrs = []
pubkeys = []
for _ in range(3):
addr = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
addrs.append(addr['address'])
pubkeys.append(addr['pubkey'])
script_code = CScript([OP_2] + [bytes.fromhex(pubkey) for pubkey in pubkeys] + [OP_3, OP_CHECKMULTISIG])
return Multisig([self.nodes[0].dumpprivkey(addr) for addr in addrs],
pubkeys,
CScript([OP_HASH160, hash160(script_code), OP_EQUAL]).hex(), # p2sh
script_to_p2sh(script_code), # p2sh addr
script_code.hex()) # redeem script
def test_importmulti(self, req, success, error_code=None, error_message=None, warnings=[]):
"""Run importmulti and assert success"""
result = self.nodes[1].importmulti([req])
observed_warnings = []
if 'warnings' in result[0]:
observed_warnings = result[0]['warnings']
assert_equal("\n".join(sorted(warnings)), "\n".join(sorted(observed_warnings)))
assert_equal(result[0]['success'], success)
if error_code is not None:
assert_equal(result[0]['error']['code'], error_code)
assert_equal(result[0]['error']['message'], error_message)
def test_address(self, address, **kwargs):
"""Get address info for `address` and test whether the returned values are as expected."""
addr_info = self.nodes[1].getaddressinfo(address)
for key, value in kwargs.items():
if value is None:
if key in addr_info.keys():
raise AssertionError("key {} unexpectedly returned in getaddressinfo.".format(key))
elif addr_info[key] != value:
raise AssertionError("key {} value {} did not match expected value {}".format(key, addr_info[key], value))
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
node0_address1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
# Check only one address
assert_equal(node0_address1['ismine'], True)
# Node 1 sync test
assert_equal(self.nodes[1].getblockcount(), 1)
# Address Test - before import
address_info = self.nodes[1].getaddressinfo(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# RPC importmulti -----------------------------------------------
# Bitcoin Address (implicit non-internal)
self.log.info("Should import an address")
key = self.get_key()
address = key.p2pkh_addr
self.test_importmulti({"scriptPubKey": {"address": address},
"timestamp": "now"},
True)
self.test_address(address,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=False)
watchonly_address = address
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
self.test_importmulti({"scriptPubKey": {"address": "not valid address"},
"timestamp": "now"},
False,
error_code=-5,
error_message='Invalid address \"not valid address\"')
# ScriptPubKey + internal
self.log.info("Should import a scriptPubKey with internal flag")
key = self.get_key()
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True},
True)
self.test_address(key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=True)
# ScriptPubKey + internal + label
self.log.info("Should not allow a label to be specified when internal is true")
key = self.get_key()
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True,
"label": "Example label"},
False,
error_code=-8,
error_message='Internal addresses should not have a label')
# Nonstandard scriptPubKey + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal flag")
nonstandardScriptPubKey = key.p2pkh_script + CScript([OP_NOP]).hex()
key = self.get_key()
address = key.p2pkh_addr
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now"},
False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
self.test_address(address,
iswatchonly=False,
ismine=False,
timestamp=None)
# Address + Public key + !Internal(explicit)
self.log.info("Should import an address with public key")
key = self.get_key()
address = key.p2pkh_addr
self.test_importmulti({"scriptPubKey": {"address": address},
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": False},
True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.test_address(address,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal
self.log.info("Should import a scriptPubKey with internal and with public key")
key = self.get_key()
address = key.p2pkh_addr
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": True},
True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.test_address(address,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
# Nonstandard scriptPubKey + Public key + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal and with public key")
key = self.get_key()
address = key.p2pkh_addr
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"pubkeys": [key.pubkey]},
False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
self.test_address(address,
iswatchonly=False,
ismine=False,
timestamp=None)
# Address + Private key + !watchonly
self.log.info("Should import an address with private key")
key = self.get_key()
address = key.p2pkh_addr
self.test_importmulti({"scriptPubKey": {"address": address},
"timestamp": "now",
"keys": [key.privkey]},
True)
self.test_address(address,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
self.log.info("Should not import an address with private key if is already imported")
self.test_importmulti({"scriptPubKey": {"address": address},
"timestamp": "now",
"keys": [key.privkey]},
False,
error_code=-4,
error_message='The wallet already contains the private key for this address or script ("' + key.p2pkh_script + '")')
# Address + Private key + watchonly
self.log.info("Should import an address with private key and with watchonly")
key = self.get_key()
address = key.p2pkh_addr
self.test_importmulti({"scriptPubKey": {"address": address},
"timestamp": "now",
"keys": [key.privkey],
"watchonly": True},
True,
warnings=["All private keys are provided, outputs will be considered spendable. If this is intentional, do not specify the watchonly flag."])
self.test_address(address,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
# ScriptPubKey + Private key + internal
self.log.info("Should import a scriptPubKey with internal and with private key")
key = self.get_key()
address = key.p2pkh_addr
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [key.privkey],
"internal": True},
True)
self.test_address(address,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
# Nonstandard scriptPubKey + Private key + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal and with private key")
key = self.get_key()
address = key.p2pkh_addr
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"keys": [key.privkey]},
False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
self.test_address(address,
iswatchonly=False,
ismine=False,
timestamp=None)
# P2SH address
multisig = self.get_multisig()
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now"},
True)
self.test_address(multisig.p2sh_addr,
isscript=True,
iswatchonly=True,
timestamp=timestamp)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
# P2SH + Redeem script
multisig = self.get_multisig()
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh with respective redeem script")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script},
True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.test_address(multisig.p2sh_addr, timestamp=timestamp, iswatchonly=True, ismine=False, solvable=True)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + !Watchonly
multisig = self.get_multisig()
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2]},
True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.test_address(multisig.p2sh_addr,
timestamp=timestamp,
ismine=False,
iswatchonly=True,
solvable=True)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + Watchonly
multisig = self.get_multisig()
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2],
"watchonly": True},
True)
self.test_address(multisig.p2sh_addr,
iswatchonly=True,
ismine=False,
solvable=True,
timestamp=timestamp)
# Address + Public key + !Internal + Wrong pubkey
self.log.info("Should not import an address with the wrong public key as non-solvable")
key = self.get_key()
address = key.p2pkh_addr
wrong_key = self.get_key().pubkey
self.test_importmulti({"scriptPubKey": {"address": address},
"timestamp": "now",
"pubkeys": [wrong_key]},
True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.test_address(address,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal + Wrong pubkey
self.log.info("Should import a scriptPubKey with internal and with a wrong public key as non-solvable")
key = self.get_key()
address = key.p2pkh_addr
wrong_key = self.get_key().pubkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [wrong_key],
"internal": True},
True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.test_address(address,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# Address + Private key + !watchonly + Wrong private key
self.log.info("Should import an address with a wrong private key as non-solvable")
key = self.get_key()
address = key.p2pkh_addr
wrong_privkey = self.get_key().privkey
self.test_importmulti({"scriptPubKey": {"address": address},
"timestamp": "now",
"keys": [wrong_privkey]},
True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.test_address(address,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Private key + internal + Wrong private key
self.log.info("Should import a scriptPubKey with internal and with a wrong private key as non-solvable")
key = self.get_key()
address = key.p2pkh_addr
wrong_privkey = self.get_key().privkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [wrong_privkey],
"internal": True},
True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.test_address(address,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# Importing existing watch only address with new timestamp should replace saved timestamp.
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
self.test_importmulti({"scriptPubKey": {"address": watchonly_address},
"timestamp": "now"},
True)
self.test_address(watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
watchonly_timestamp = timestamp
# restart nodes to check for proper serialization/deserialization of watch only address
self.stop_nodes()
self.start_nodes()
self.test_address(watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=watchonly_timestamp)
# Bad or missing timestamps
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_rpc_error(-3, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{"scriptPubKey": key.p2pkh_script}])
assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": key.p2pkh_script,
"timestamp": ""
}])
# Test ranged descriptor fails if range is not specified
xpriv = "tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg"
desc = "sh(pkh(" + xpriv + "/0'/0'/*'" + "))"
self.log.info("Ranged descriptor import should fail without a specified range")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Descriptor is ranged, please specify the range')
# Test importing of a ranged descriptor without keys
self.log.info("Should import the ranged descriptor with specified range as solvable")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now",
"range": 1},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": -1},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [-1, 10]},
success=False, error_code=-8, error_message='Range should be greater or equal than 0')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [(2 << 31 + 1) - 1000000, (2 << 31 + 1)]},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [2, 1]},
success=False, error_code=-8, error_message='Range specified as [begin,end] must not have begin after end')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [0, 1000001]},
success=False, error_code=-8, error_message='Range is too large')
# Test importing of a P2PKH address via descriptor
key = self.get_key()
self.log.info("Should import a p2pkh address from descriptor")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"timestamp": "now",
"label": "Descriptor import test"},
True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.test_address(key.p2pkh_addr,
solvable=True,
ismine=False,
label="Descriptor import test")
# Test import fails if both desc and scriptPubKey are provided
key = self.get_key()
self.log.info("Import should fail if both scriptPubKey and desc are provided")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Both a descriptor and a scriptPubKey should not be provided.')
# Test import fails if neither desc nor scriptPubKey are present
key = self.get_key()
self.log.info("Import should fail if neither a descriptor nor a scriptPubKey are provided")
self.test_importmulti({"timestamp": "now"},
success=False,
error_code=-8,
error_message='Either a descriptor or scriptPubKey must be provided.')
# Test importing of a multisig via descriptor
key1 = self.get_key()
key2 = self.get_key()
self.log.info("Should import a 1-of-2 bare multisig from descriptor")
self.test_importmulti({"desc": descsum_create("multi(1," + key1.pubkey + "," + key2.pubkey + ")"),
"timestamp": "now"},
True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.log.info("Should not treat individual keys from the imported bare multisig as watchonly")
self.test_address(key1.p2pkh_addr,
ismine=False,
iswatchonly=False)
# Import pubkeys with key origin info
self.log.info("Addresses should have hd keypath and master key id after import with key origin")
pub_addr = self.nodes[1].getnewaddress()
pub_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
pub_keypath = info['hdkeypath']
pub_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc' : descsum_create("pkh([" + pub_fpr + pub_keypath[1:] +"]" + pub + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(pub_import_info['pubkey'], pub)
assert_equal(pub_import_info['hdkeypath'], pub_keypath)
# Import privkeys with key origin info
priv_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(priv_addr)
priv = self.nodes[1].dumpprivkey(priv_addr)
priv_keypath = info['hdkeypath']
priv_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc' : descsum_create("pkh([" + priv_fpr + priv_keypath[1:] + "]" + priv + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
priv_import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(priv_import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(priv_import_info['hdkeypath'], priv_keypath)
# Make sure the key origin info are still there after a restart
self.stop_nodes()
self.start_nodes()
import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(import_info['hdkeypath'], pub_keypath)
import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(import_info['hdkeypath'], priv_keypath)
# Check legacy import does not import key origin info
self.log.info("Legacy imports don't have key origin info")
pub_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
result = self.nodes[0].importmulti(
[{
'scriptPubKey': {'address': pub_addr},
'pubkeys': [pub],
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['pubkey'], pub)
assert 'hdmasterfingerprint' not in pub_import_info
assert 'hdkeypath' not in pub_import_info
# Import some public keys to the keypool of a no privkey wallet
self.log.info("Adding pubkey to keypool of disableprivkey wallet")
self.nodes[1].createwallet(wallet_name="noprivkeys", disable_private_keys=True)
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('pkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
},
{
'desc': descsum_create('pkh(' + pub2 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 2)
newaddr1 = wrpc.getnewaddress()
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getnewaddress()
assert_equal(addr2, newaddr2)
# Import some public keys to the internal keypool of a no privkey wallet
self.log.info("Adding pubkey to internal keypool of disableprivkey wallet")
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('pkh(' + pub1 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
},
{
'desc': descsum_create('pkh(' + pub2 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize_hd_internal"], 2)
newaddr1 = wrpc.getrawchangeaddress()
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getrawchangeaddress()
assert_equal(addr2, newaddr2)
# Import a multisig and make sure the keys don't go into the keypool
self.log.info('Imported scripts with pubkeys shoud not have their pubkeys go into the keypool')
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('sh(multi(2,' + pub1 + ',' + pub2 + '))'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
# Cannot import those pubkeys to keypool of wallet with privkeys
self.log.info("Pubkeys cannot be added to the keypool of a wallet with private keys")
wrpc = self.nodes[1].get_wallet_rpc("")
assert wrpc.getwalletinfo()['private_keys_enabled']
result = wrpc.importmulti(
[{
'desc': descsum_create('pkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], "Keys can only be imported to the keypool when private keys are disabled")
# Make sure ranged imports import keys in order
self.log.info('Key ranges should be imported in order')
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
assert_equal(wrpc.getwalletinfo()["private_keys_enabled"], False)
xpub = "tpubDAXcJ7s7ZwicqjprRaEWdPoHKrCS215qxGYxpusRLLmJuT69ZSicuGdSfyvyKpvUNYBW1s2U3NSrT6vrCYB9e6nZUEvrqnwXPF8ArTCRXMY"
addresses = [
'yUxX4qnzWntXhEGrYB92v7ez4EZBnUjB1y', # m/0'/0'/0
'yRhTPsPd2qYgYbFFCqY2nuPHJQBjTnMQxg', # m/0'/0'/1
'yUyn3UV9rBdWfw6yJJ6eAoKuzDJ8RVLP1o', # m/0'/0'/2
'yi8GEkfLBgK85wGmBFsMFdSbEvPPNCSnVx', # m/0'/0'/3
'yYB4whdY8APWoCez6ryNdMBrrDjwzFbqMi', # m/0'/0'/4
]
result = wrpc.importmulti(
[{
'desc': descsum_create('pkh([80002067/0h/0h]' + xpub + '/*)'),
'keypool': True,
'timestamp': 'now',
'range' : [0, 4],
}]
)
for i in range(0, 5):
addr = wrpc.getnewaddress('')
assert_equal(addr, addresses[i])
if __name__ == '__main__':
ImportMultiTest().main()
|
|
"""Support to manage a shopping list."""
import asyncio
import logging
import uuid
import voluptuous as vol
from homeassistant.const import HTTP_NOT_FOUND, HTTP_BAD_REQUEST
from homeassistant.core import callback
from homeassistant.components import http
from homeassistant.components.http.data_validator import (
RequestDataValidator)
from homeassistant.helpers import intent
import homeassistant.helpers.config_validation as cv
from homeassistant.util.json import load_json, save_json
from homeassistant.components import websocket_api
ATTR_NAME = 'name'
DOMAIN = 'shopping_list'
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema({DOMAIN: {}}, extra=vol.ALLOW_EXTRA)
EVENT = 'shopping_list_updated'
INTENT_ADD_ITEM = 'HassShoppingListAddItem'
INTENT_LAST_ITEMS = 'HassShoppingListLastItems'
ITEM_UPDATE_SCHEMA = vol.Schema({
'complete': bool,
ATTR_NAME: str,
})
PERSISTENCE = '.shopping_list.json'
SERVICE_ADD_ITEM = 'add_item'
SERVICE_COMPLETE_ITEM = 'complete_item'
SERVICE_ITEM_SCHEMA = vol.Schema({
vol.Required(ATTR_NAME): vol.Any(None, cv.string)
})
WS_TYPE_SHOPPING_LIST_ITEMS = 'shopping_list/items'
WS_TYPE_SHOPPING_LIST_ADD_ITEM = 'shopping_list/items/add'
WS_TYPE_SHOPPING_LIST_UPDATE_ITEM = 'shopping_list/items/update'
WS_TYPE_SHOPPING_LIST_CLEAR_ITEMS = 'shopping_list/items/clear'
SCHEMA_WEBSOCKET_ITEMS = \
websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_SHOPPING_LIST_ITEMS
})
SCHEMA_WEBSOCKET_ADD_ITEM = \
websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_SHOPPING_LIST_ADD_ITEM,
vol.Required('name'): str
})
SCHEMA_WEBSOCKET_UPDATE_ITEM = \
websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_SHOPPING_LIST_UPDATE_ITEM,
vol.Required('item_id'): str,
vol.Optional('name'): str,
vol.Optional('complete'): bool
})
SCHEMA_WEBSOCKET_CLEAR_ITEMS = \
websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_SHOPPING_LIST_CLEAR_ITEMS
})
@asyncio.coroutine
def async_setup(hass, config):
"""Initialize the shopping list."""
@asyncio.coroutine
def add_item_service(call):
"""Add an item with `name`."""
data = hass.data[DOMAIN]
name = call.data.get(ATTR_NAME)
if name is not None:
data.async_add(name)
@asyncio.coroutine
def complete_item_service(call):
"""Mark the item provided via `name` as completed."""
data = hass.data[DOMAIN]
name = call.data.get(ATTR_NAME)
if name is None:
return
try:
item = [item for item in data.items if item['name'] == name][0]
except IndexError:
_LOGGER.error("Removing of item failed: %s cannot be found", name)
else:
data.async_update(item['id'], {'name': name, 'complete': True})
data = hass.data[DOMAIN] = ShoppingData(hass)
yield from data.async_load()
intent.async_register(hass, AddItemIntent())
intent.async_register(hass, ListTopItemsIntent())
hass.services.async_register(
DOMAIN, SERVICE_ADD_ITEM, add_item_service, schema=SERVICE_ITEM_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_COMPLETE_ITEM, complete_item_service,
schema=SERVICE_ITEM_SCHEMA
)
hass.http.register_view(ShoppingListView)
hass.http.register_view(CreateShoppingListItemView)
hass.http.register_view(UpdateShoppingListItemView)
hass.http.register_view(ClearCompletedItemsView)
hass.components.conversation.async_register(INTENT_ADD_ITEM, [
'Add [the] [a] [an] {item} to my shopping list',
])
hass.components.conversation.async_register(INTENT_LAST_ITEMS, [
'What is on my shopping list'
])
yield from hass.components.frontend.async_register_built_in_panel(
'shopping-list', 'shopping_list', 'mdi:cart')
hass.components.websocket_api.async_register_command(
WS_TYPE_SHOPPING_LIST_ITEMS,
websocket_handle_items,
SCHEMA_WEBSOCKET_ITEMS)
hass.components.websocket_api.async_register_command(
WS_TYPE_SHOPPING_LIST_ADD_ITEM,
websocket_handle_add,
SCHEMA_WEBSOCKET_ADD_ITEM)
hass.components.websocket_api.async_register_command(
WS_TYPE_SHOPPING_LIST_UPDATE_ITEM,
websocket_handle_update,
SCHEMA_WEBSOCKET_UPDATE_ITEM)
hass.components.websocket_api.async_register_command(
WS_TYPE_SHOPPING_LIST_CLEAR_ITEMS,
websocket_handle_clear,
SCHEMA_WEBSOCKET_CLEAR_ITEMS)
return True
class ShoppingData:
"""Class to hold shopping list data."""
def __init__(self, hass):
"""Initialize the shopping list."""
self.hass = hass
self.items = []
@callback
def async_add(self, name):
"""Add a shopping list item."""
item = {
'name': name,
'id': uuid.uuid4().hex,
'complete': False
}
self.items.append(item)
self.hass.async_add_job(self.save)
return item
@callback
def async_update(self, item_id, info):
"""Update a shopping list item."""
item = next((itm for itm in self.items if itm['id'] == item_id), None)
if item is None:
raise KeyError
info = ITEM_UPDATE_SCHEMA(info)
item.update(info)
self.hass.async_add_job(self.save)
return item
@callback
def async_clear_completed(self):
"""Clear completed items."""
self.items = [itm for itm in self.items if not itm['complete']]
self.hass.async_add_job(self.save)
@asyncio.coroutine
def async_load(self):
"""Load items."""
def load():
"""Load the items synchronously."""
return load_json(self.hass.config.path(PERSISTENCE), default=[])
self.items = yield from self.hass.async_add_job(load)
def save(self):
"""Save the items."""
save_json(self.hass.config.path(PERSISTENCE), self.items)
class AddItemIntent(intent.IntentHandler):
"""Handle AddItem intents."""
intent_type = INTENT_ADD_ITEM
slot_schema = {
'item': cv.string
}
@asyncio.coroutine
def async_handle(self, intent_obj):
"""Handle the intent."""
slots = self.async_validate_slots(intent_obj.slots)
item = slots['item']['value']
intent_obj.hass.data[DOMAIN].async_add(item)
response = intent_obj.create_response()
response.async_set_speech(
"I've added {} to your shopping list".format(item))
intent_obj.hass.bus.async_fire(EVENT)
return response
class ListTopItemsIntent(intent.IntentHandler):
"""Handle AddItem intents."""
intent_type = INTENT_LAST_ITEMS
slot_schema = {
'item': cv.string
}
@asyncio.coroutine
def async_handle(self, intent_obj):
"""Handle the intent."""
items = intent_obj.hass.data[DOMAIN].items[-5:]
response = intent_obj.create_response()
if not items:
response.async_set_speech(
"There are no items on your shopping list")
else:
response.async_set_speech(
"These are the top {} items on your shopping list: {}".format(
min(len(items), 5),
', '.join(itm['name'] for itm in reversed(items))))
return response
class ShoppingListView(http.HomeAssistantView):
"""View to retrieve shopping list content."""
url = '/api/shopping_list'
name = "api:shopping_list"
@callback
def get(self, request):
"""Retrieve shopping list items."""
return self.json(request.app['hass'].data[DOMAIN].items)
class UpdateShoppingListItemView(http.HomeAssistantView):
"""View to retrieve shopping list content."""
url = '/api/shopping_list/item/{item_id}'
name = "api:shopping_list:item:id"
async def post(self, request, item_id):
"""Update a shopping list item."""
data = await request.json()
try:
item = request.app['hass'].data[DOMAIN].async_update(item_id, data)
request.app['hass'].bus.async_fire(EVENT)
return self.json(item)
except KeyError:
return self.json_message('Item not found', HTTP_NOT_FOUND)
except vol.Invalid:
return self.json_message('Item not found', HTTP_BAD_REQUEST)
class CreateShoppingListItemView(http.HomeAssistantView):
"""View to retrieve shopping list content."""
url = '/api/shopping_list/item'
name = "api:shopping_list:item"
@RequestDataValidator(vol.Schema({
vol.Required('name'): str,
}))
@asyncio.coroutine
def post(self, request, data):
"""Create a new shopping list item."""
item = request.app['hass'].data[DOMAIN].async_add(data['name'])
request.app['hass'].bus.async_fire(EVENT)
return self.json(item)
class ClearCompletedItemsView(http.HomeAssistantView):
"""View to retrieve shopping list content."""
url = '/api/shopping_list/clear_completed'
name = "api:shopping_list:clear_completed"
@callback
def post(self, request):
"""Retrieve if API is running."""
hass = request.app['hass']
hass.data[DOMAIN].async_clear_completed()
hass.bus.async_fire(EVENT)
return self.json_message('Cleared completed items.')
@callback
def websocket_handle_items(hass, connection, msg):
"""Handle get shopping_list items."""
connection.send_message(websocket_api.result_message(
msg['id'], hass.data[DOMAIN].items))
@callback
def websocket_handle_add(hass, connection, msg):
"""Handle add item to shopping_list."""
item = hass.data[DOMAIN].async_add(msg['name'])
hass.bus.async_fire(EVENT)
connection.send_message(websocket_api.result_message(
msg['id'], item))
@websocket_api.async_response
async def websocket_handle_update(hass, connection, msg):
"""Handle update shopping_list item."""
msg_id = msg.pop('id')
item_id = msg.pop('item_id')
msg.pop('type')
data = msg
try:
item = hass.data[DOMAIN].async_update(item_id, data)
hass.bus.async_fire(EVENT)
connection.send_message(websocket_api.result_message(
msg_id, item))
except KeyError:
connection.send_message(websocket_api.error_message(
msg_id, 'item_not_found', 'Item not found'))
@callback
def websocket_handle_clear(hass, connection, msg):
"""Handle clearing shopping_list items."""
hass.data[DOMAIN].async_clear_completed()
hass.bus.async_fire(EVENT)
connection.send_message(websocket_api.result_message(msg['id']))
|
|
import os
import os.path as op
import pytest
import numpy as np
from numpy.testing import (assert_array_equal, assert_equal, assert_allclose,
assert_array_less, assert_almost_equal)
import itertools
import mne
from mne.datasets import testing
from mne.fixes import _get_img_fdata
from mne import read_trans, write_trans
from mne.io import read_info
from mne.transforms import (invert_transform, _get_trans,
rotation, rotation3d, rotation_angles, _find_trans,
combine_transforms, apply_trans, translation,
get_ras_to_neuromag_trans, _pol_to_cart,
quat_to_rot, rot_to_quat, _angle_between_quats,
_find_vector_rotation, _sph_to_cart, _cart_to_sph,
_topo_to_sph, _average_quats,
_SphericalSurfaceWarp as SphericalSurfaceWarp,
rotation3d_align_z_axis, _read_fs_xfm,
_write_fs_xfm, _quat_real, _fit_matched_points,
_quat_to_euler, _euler_to_quat,
_quat_to_affine, _compute_r2, _validate_pipeline)
from mne.utils import requires_nibabel, requires_dipy
data_path = testing.data_path(download=False)
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-trans.fif')
fname_eve = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw-eve.fif')
subjects_dir = op.join(data_path, 'subjects')
fname_t1 = op.join(subjects_dir, 'fsaverage', 'mri', 'T1.mgz')
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
fname_trans = op.join(base_dir, 'sample-audvis-raw-trans.txt')
test_fif_fname = op.join(base_dir, 'test_raw.fif')
ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
def test_tps():
"""Test TPS warping."""
az = np.linspace(0., 2 * np.pi, 20, endpoint=False)
pol = np.linspace(0, np.pi, 12)[1:-1]
sph = np.array(np.meshgrid(1, az, pol, indexing='ij'))
sph.shape = (3, -1)
assert_equal(sph.shape[1], 200)
source = _sph_to_cart(sph.T)
destination = source.copy()
destination *= 2
destination[:, 0] += 1
# fit with 100 points
warp = SphericalSurfaceWarp()
assert 'no ' in repr(warp)
warp.fit(source[::3], destination[::2])
assert 'oct5' in repr(warp)
destination_est = warp.transform(source)
assert_allclose(destination_est, destination, atol=1e-3)
@testing.requires_testing_data
def test_get_trans():
"""Test converting '-trans.txt' to '-trans.fif'."""
trans = read_trans(fname)
trans = invert_transform(trans) # starts out as head->MRI, so invert
trans_2 = _get_trans(fname_trans)[0]
assert trans.__eq__(trans_2, atol=1e-5)
@testing.requires_testing_data
def test_io_trans(tmp_path):
"""Test reading and writing of trans files."""
tempdir = str(tmp_path)
os.mkdir(op.join(tempdir, 'sample'))
pytest.raises(RuntimeError, _find_trans, 'sample', subjects_dir=tempdir)
trans0 = read_trans(fname)
fname1 = op.join(tempdir, 'sample', 'test-trans.fif')
trans0.save(fname1)
assert fname1 == _find_trans('sample', subjects_dir=tempdir)
trans1 = read_trans(fname1)
# check all properties
assert trans0 == trans1
# check reading non -trans.fif files
pytest.raises(IOError, read_trans, fname_eve)
# check warning on bad filenames
fname2 = op.join(tempdir, 'trans-test-bad-name.fif')
with pytest.warns(RuntimeWarning, match='-trans.fif'):
write_trans(fname2, trans0)
def test_get_ras_to_neuromag_trans():
"""Test the coordinate transformation from ras to neuromag."""
# create model points in neuromag-like space
rng = np.random.RandomState(0)
anterior = [0, 1, 0]
left = [-1, 0, 0]
right = [.8, 0, 0]
up = [0, 0, 1]
rand_pts = rng.uniform(-1, 1, (3, 3))
pts = np.vstack((anterior, left, right, up, rand_pts))
# change coord system
rx, ry, rz, tx, ty, tz = rng.uniform(-2 * np.pi, 2 * np.pi, 6)
trans = np.dot(translation(tx, ty, tz), rotation(rx, ry, rz))
pts_changed = apply_trans(trans, pts)
# transform back into original space
nas, lpa, rpa = pts_changed[:3]
hsp_trans = get_ras_to_neuromag_trans(nas, lpa, rpa)
pts_restored = apply_trans(hsp_trans, pts_changed)
err = "Neuromag transformation failed"
assert_allclose(pts_restored, pts, atol=1e-6, err_msg=err)
def _cartesian_to_sphere(x, y, z):
"""Convert using old function."""
hypotxy = np.hypot(x, y)
r = np.hypot(hypotxy, z)
elev = np.arctan2(z, hypotxy)
az = np.arctan2(y, x)
return az, elev, r
def _sphere_to_cartesian(theta, phi, r):
"""Convert using old function."""
z = r * np.sin(phi)
rcos_phi = r * np.cos(phi)
x = rcos_phi * np.cos(theta)
y = rcos_phi * np.sin(theta)
return x, y, z
def test_sph_to_cart():
"""Test conversion between sphere and cartesian."""
# Simple test, expected value (11, 0, 0)
r, theta, phi = 11., 0., np.pi / 2.
z = r * np.cos(phi)
rsin_phi = r * np.sin(phi)
x = rsin_phi * np.cos(theta)
y = rsin_phi * np.sin(theta)
coord = _sph_to_cart(np.array([[r, theta, phi]]))[0]
assert_allclose(coord, (x, y, z), atol=1e-7)
assert_allclose(coord, (r, 0, 0), atol=1e-7)
rng = np.random.RandomState(0)
# round-trip test
coords = rng.randn(10, 3)
assert_allclose(_sph_to_cart(_cart_to_sph(coords)), coords, atol=1e-5)
# equivalence tests to old versions
for coord in coords:
sph = _cart_to_sph(coord[np.newaxis])
cart = _sph_to_cart(sph)
sph_old = np.array(_cartesian_to_sphere(*coord))
cart_old = _sphere_to_cartesian(*sph_old)
sph_old[1] = np.pi / 2. - sph_old[1] # new convention
assert_allclose(sph[0], sph_old[[2, 0, 1]], atol=1e-7)
assert_allclose(cart[0], cart_old, atol=1e-7)
assert_allclose(cart[0], coord, atol=1e-7)
def _polar_to_cartesian(theta, r):
"""Transform polar coordinates to cartesian."""
x = r * np.cos(theta)
y = r * np.sin(theta)
return x, y
def test_polar_to_cartesian():
"""Test helper transform function from polar to cartesian."""
r = 1
theta = np.pi
# expected values are (-1, 0)
x = r * np.cos(theta)
y = r * np.sin(theta)
coord = _pol_to_cart(np.array([[r, theta]]))[0]
# np.pi is an approx since pi is irrational
assert_allclose(coord, (x, y), atol=1e-7)
assert_allclose(coord, (-1, 0), atol=1e-7)
assert_allclose(coord, _polar_to_cartesian(theta, r), atol=1e-7)
rng = np.random.RandomState(0)
r = rng.randn(10)
theta = rng.rand(10) * (2 * np.pi)
polar = np.array((r, theta)).T
assert_allclose([_polar_to_cartesian(p[1], p[0]) for p in polar],
_pol_to_cart(polar), atol=1e-7)
def _topo_to_phi_theta(theta, radius):
"""Convert using old function."""
sph_phi = (0.5 - radius) * 180
sph_theta = -theta
return sph_phi, sph_theta
def test_topo_to_sph():
"""Test topo to sphere conversion."""
rng = np.random.RandomState(0)
angles = rng.rand(10) * 360
radii = rng.rand(10)
angles[0] = 30
radii[0] = 0.25
# new way
sph = _topo_to_sph(np.array([angles, radii]).T)
new = _sph_to_cart(sph)
new[:, [0, 1]] = new[:, [1, 0]] * [-1, 1]
# old way
for ii, (angle, radius) in enumerate(zip(angles, radii)):
sph_phi, sph_theta = _topo_to_phi_theta(angle, radius)
if ii == 0:
assert_allclose(_topo_to_phi_theta(angle, radius), [45, -30])
azimuth = sph_theta / 180.0 * np.pi
elevation = sph_phi / 180.0 * np.pi
assert_allclose(sph[ii], [1., azimuth, np.pi / 2. - elevation],
atol=1e-7)
r = np.ones_like(radius)
x, y, z = _sphere_to_cartesian(azimuth, elevation, r)
pos = [-y, x, z]
if ii == 0:
expected = np.array([1. / 2., np.sqrt(3) / 2., 1.])
expected /= np.sqrt(2)
assert_allclose(pos, expected, atol=1e-7)
assert_allclose(pos, new[ii], atol=1e-7)
def test_rotation():
"""Test conversion between rotation angles and transformation matrix."""
tests = [(0, 0, 1), (.5, .5, .5), (np.pi, 0, -1.5)]
for rot in tests:
x, y, z = rot
m = rotation3d(x, y, z)
m4 = rotation(x, y, z)
assert_array_equal(m, m4[:3, :3])
back = rotation_angles(m)
assert_almost_equal(actual=back, desired=rot, decimal=12)
back4 = rotation_angles(m4)
assert_almost_equal(actual=back4, desired=rot, decimal=12)
def test_rotation3d_align_z_axis():
"""Test rotation3d_align_z_axis."""
# The more complex z axis fails the assert presumably due to tolerance
#
inp_zs = [[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, -1],
[-0.75071668, -0.62183808, 0.22302888]]
exp_res = [[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]],
[[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]],
[[0., 0., 1.], [0., 1., 0.], [-1., 0., 0.]],
[[1., 0., 0.], [0., -1., 0.], [0., 0., -1.]],
[[0.53919688, -0.38169517, -0.75071668],
[-0.38169517, 0.683832, -0.62183808],
[0.75071668, 0.62183808, 0.22302888]]]
for res, z in zip(exp_res, inp_zs):
assert_allclose(res, rotation3d_align_z_axis(z), atol=1e-7)
@testing.requires_testing_data
def test_combine():
"""Test combining transforms."""
trans = read_trans(fname)
inv = invert_transform(trans)
combine_transforms(trans, inv, trans['from'], trans['from'])
pytest.raises(RuntimeError, combine_transforms, trans, inv,
trans['to'], trans['from'])
pytest.raises(RuntimeError, combine_transforms, trans, inv,
trans['from'], trans['to'])
pytest.raises(RuntimeError, combine_transforms, trans, trans,
trans['from'], trans['to'])
def test_quaternions():
"""Test quaternion calculations."""
rots = [np.eye(3)]
for fname in [test_fif_fname, ctf_fname, hp_fif_fname]:
rots += [read_info(fname)['dev_head_t']['trans'][:3, :3]]
# nasty numerical cases
rots += [np.array([
[-0.99978541, -0.01873462, -0.00898756],
[-0.01873462, 0.62565561, 0.77987608],
[-0.00898756, 0.77987608, -0.62587152],
])]
rots += [np.array([
[0.62565561, -0.01873462, 0.77987608],
[-0.01873462, -0.99978541, -0.00898756],
[0.77987608, -0.00898756, -0.62587152],
])]
rots += [np.array([
[-0.99978541, -0.00898756, -0.01873462],
[-0.00898756, -0.62587152, 0.77987608],
[-0.01873462, 0.77987608, 0.62565561],
])]
for rot in rots:
assert_allclose(rot, quat_to_rot(rot_to_quat(rot)),
rtol=1e-5, atol=1e-5)
rot = rot[np.newaxis, np.newaxis, :, :]
assert_allclose(rot, quat_to_rot(rot_to_quat(rot)),
rtol=1e-5, atol=1e-5)
# let's make sure our angle function works in some reasonable way
for ii in range(3):
for jj in range(3):
a = np.zeros(3)
b = np.zeros(3)
a[ii] = 1.
b[jj] = 1.
expected = np.pi if ii != jj else 0.
assert_allclose(_angle_between_quats(a, b), expected, atol=1e-5)
y_180 = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1.]])
assert_allclose(_angle_between_quats(rot_to_quat(y_180),
np.zeros(3)), np.pi)
h_180_attitude_90 = np.array([[0, 1, 0], [1, 0, 0], [0, 0, -1.]])
assert_allclose(_angle_between_quats(rot_to_quat(h_180_attitude_90),
np.zeros(3)), np.pi)
def test_vector_rotation():
"""Test basic rotation matrix math."""
x = np.array([1., 0., 0.])
y = np.array([0., 1., 0.])
rot = _find_vector_rotation(x, y)
assert_array_equal(rot,
[[0, -1, 0], [1, 0, 0], [0, 0, 1]])
quat_1 = rot_to_quat(rot)
quat_2 = rot_to_quat(np.eye(3))
assert_allclose(_angle_between_quats(quat_1, quat_2), np.pi / 2.)
def test_average_quats():
"""Test averaging of quaternions."""
sq2 = 1. / np.sqrt(2.)
quats = np.array([[0, sq2, sq2],
[0, sq2, sq2],
[0, sq2, 0],
[0, 0, sq2],
[sq2, 0, 0]], float)
# In MATLAB:
# quats = [[0, sq2, sq2, 0]; [0, sq2, sq2, 0];
# [0, sq2, 0, sq2]; [0, 0, sq2, sq2]; [sq2, 0, 0, sq2]];
expected = [quats[0],
quats[0],
[0, 0.788675134594813, 0.577350269189626],
[0, 0.657192299694123, 0.657192299694123],
[0.100406058540540, 0.616329446922803, 0.616329446922803]]
# Averaging the first two should give the same thing:
for lim, ex in enumerate(expected):
assert_allclose(_average_quats(quats[:lim + 1]), ex, atol=1e-7)
quats[1] *= -1 # same quaternion (hidden value is zero here)!
rot_0, rot_1 = quat_to_rot(quats[:2])
assert_allclose(rot_0, rot_1, atol=1e-7)
for lim, ex in enumerate(expected):
assert_allclose(_average_quats(quats[:lim + 1]), ex, atol=1e-7)
# Assert some symmetry
count = 0
extras = [[sq2, sq2, 0]] + list(np.eye(3))
for quat in np.concatenate((quats, expected, extras)):
if np.isclose(_quat_real(quat), 0., atol=1e-7): # can flip sign
count += 1
angle = _angle_between_quats(quat, -quat)
assert_allclose(angle, 0., atol=1e-7)
rot_0, rot_1 = quat_to_rot(np.array((quat, -quat)))
assert_allclose(rot_0, rot_1, atol=1e-7)
assert count == 4 + len(extras)
@testing.requires_testing_data
@pytest.mark.parametrize('subject', ('fsaverage', 'sample'))
def test_fs_xfm(subject, tmp_path):
"""Test reading and writing of Freesurfer transforms."""
fname = op.join(data_path, 'subjects', subject, 'mri', 'transforms',
'talairach.xfm')
xfm, kind = _read_fs_xfm(fname)
if subject == 'fsaverage':
assert_allclose(xfm, np.eye(4), atol=1e-5) # fsaverage is in MNI
assert kind == 'MNI Transform File'
tempdir = str(tmp_path)
fname_out = op.join(tempdir, 'out.xfm')
_write_fs_xfm(fname_out, xfm, kind)
xfm_read, kind_read = _read_fs_xfm(fname_out)
assert kind_read == kind
assert_allclose(xfm, xfm_read, rtol=1e-5, atol=1e-5)
# Some wacky one
xfm[:3] = np.random.RandomState(0).randn(3, 4)
_write_fs_xfm(fname_out, xfm, 'foo')
xfm_read, kind_read = _read_fs_xfm(fname_out)
assert kind_read == 'foo'
assert_allclose(xfm, xfm_read, rtol=1e-5, atol=1e-5)
# degenerate conditions
with open(fname_out, 'w') as fid:
fid.write('foo')
with pytest.raises(ValueError, match='Failed to find'):
_read_fs_xfm(fname_out)
_write_fs_xfm(fname_out, xfm[:2], 'foo')
with pytest.raises(ValueError, match='Could not find'):
_read_fs_xfm(fname_out)
@pytest.fixture()
def quats():
"""Make some unit quats."""
quats = np.random.RandomState(0).randn(5, 3)
quats[:, 0] = 0 # identity
quats /= 2 * np.linalg.norm(quats, axis=1, keepdims=True) # some real part
return quats
def _check_fit_matched_points(
p, x, weights, do_scale, angtol=1e-5, dtol=1e-5, stol=1e-7):
__tracebackhide__ = True
mne.coreg._ALLOW_ANALITICAL = False
try:
params = mne.coreg.fit_matched_points(
p, x, weights=weights, scale=do_scale, out='params')
finally:
mne.coreg._ALLOW_ANALITICAL = True
quat_an, scale_an = _fit_matched_points(p, x, weights, scale=do_scale)
assert len(params) == 6 + int(do_scale)
q_co = _euler_to_quat(params[:3])
translate_co = params[3:6]
angle = np.rad2deg(_angle_between_quats(quat_an[:3], q_co))
dist = np.linalg.norm(quat_an[3:] - translate_co)
assert 0 <= angle < angtol, 'angle'
assert 0 <= dist < dtol, 'dist'
if do_scale:
scale_co = params[6]
assert_allclose(scale_an, scale_co, rtol=stol, err_msg='scale')
# errs
trans = _quat_to_affine(quat_an)
trans[:3, :3] *= scale_an
weights = np.ones(1) if weights is None else weights
err_an = np.linalg.norm(
weights[:, np.newaxis] * apply_trans(trans, p) - x)
trans = mne.coreg._trans_from_params((True, True, do_scale), params)
err_co = np.linalg.norm(
weights[:, np.newaxis] * apply_trans(trans, p) - x)
if err_an > 1e-14:
assert err_an < err_co * 1.5
return quat_an, scale_an
@pytest.mark.parametrize('scaling', [0.25, 1])
@pytest.mark.parametrize('do_scale', (True, False))
def test_fit_matched_points(quats, scaling, do_scale):
"""Test analytical least-squares matched point fitting."""
if scaling != 1 and not do_scale:
return # no need to test this, it will not be good
rng = np.random.RandomState(0)
fro = rng.randn(10, 3)
translation = rng.randn(3)
for qi, quat in enumerate(quats):
to = scaling * np.dot(quat_to_rot(quat), fro.T).T + translation
for corrupted in (False, True):
# mess up a point
if corrupted:
to[0, 2] += 100
weights = np.ones(len(to))
weights[0] = 0
else:
weights = None
est, scale_est = _check_fit_matched_points(
fro, to, weights=weights, do_scale=do_scale)
assert_allclose(scale_est, scaling, rtol=1e-5)
assert_allclose(est[:3], quat, atol=1e-14)
assert_allclose(est[3:], translation, atol=1e-14)
# if we don't adjust for the corruption above, it should get worse
angle = dist = None
for weighted in (False, True):
if not weighted:
weights = None
dist_bounds = (5, 20)
if scaling == 1:
angle_bounds = (5, 95)
angtol, dtol, stol = 1, 15, 3
else:
angle_bounds = (5, 105)
angtol, dtol, stol = 20, 15, 3
else:
weights = np.ones(len(to))
weights[0] = 10 # weighted=True here means "make it worse"
angle_bounds = (angle, 180) # unweighted values as new min
dist_bounds = (dist, 100)
if scaling == 1:
# XXX this angtol is not great but there is a hard to
# identify linalg/angle calculation bug on Travis...
angtol, dtol, stol = 180, 70, 3
else:
angtol, dtol, stol = 50, 70, 3
est, scale_est = _check_fit_matched_points(
fro, to, weights=weights, do_scale=do_scale,
angtol=angtol, dtol=dtol, stol=stol)
assert not np.allclose(est[:3], quat, atol=1e-5)
assert not np.allclose(est[3:], translation, atol=1e-5)
angle = np.rad2deg(_angle_between_quats(est[:3], quat))
assert_array_less(angle_bounds[0], angle)
assert_array_less(angle, angle_bounds[1])
dist = np.linalg.norm(est[3:] - translation)
assert_array_less(dist_bounds[0], dist)
assert_array_less(dist, dist_bounds[1])
def test_euler(quats):
"""Test euler transformations."""
euler = _quat_to_euler(quats)
quats_2 = _euler_to_quat(euler)
assert_allclose(quats, quats_2, atol=1e-14)
quat_rot = quat_to_rot(quats)
euler_rot = np.array([rotation(*e)[:3, :3] for e in euler])
assert_allclose(quat_rot, euler_rot, atol=1e-14)
@requires_nibabel()
@requires_dipy()
@pytest.mark.slowtest
@testing.requires_testing_data
def test_volume_registration():
"""Test volume registration."""
import nibabel as nib
from dipy.align import resample
T1 = nib.load(fname_t1)
affine = np.eye(4)
affine[0, 3] = 10
T1_resampled = resample(moving=T1.get_fdata(),
static=T1.get_fdata(),
moving_affine=T1.affine,
static_affine=T1.affine,
between_affine=np.linalg.inv(affine))
for pipeline, cval in zip(('rigids', ('translation', 'sdr')), (0., '1%')):
reg_affine, sdr_morph = mne.transforms.compute_volume_registration(
T1_resampled, T1, pipeline=pipeline, zooms=10, niter=[5])
assert_allclose(affine, reg_affine, atol=0.01)
T1_aligned = mne.transforms.apply_volume_registration(
T1_resampled, T1, reg_affine, sdr_morph, cval=cval)
r2 = _compute_r2(_get_img_fdata(T1_aligned), _get_img_fdata(T1))
assert 99.9 < r2
with pytest.raises(ValueError, match='cval'):
mne.transforms.apply_volume_registration(
T1_resampled, T1, reg_affine, sdr_morph, cval='bad')
# check that all orders of the pipeline work
for pipeline_len in range(1, 5):
for pipeline in itertools.combinations(
('translation', 'rigid', 'affine', 'sdr'), pipeline_len):
_validate_pipeline(pipeline)
_validate_pipeline(list(pipeline))
with pytest.raises(ValueError, match='Steps in pipeline are out of order'):
_validate_pipeline(('sdr', 'affine'))
with pytest.raises(ValueError,
match='Steps in pipeline should not be repeated'):
_validate_pipeline(('affine', 'affine'))
|
|
"""
Reimplementations of constructs introduced in later versions of Python than
we support. Also some functions that are needed SymPy-wide and are located
here for easy import.
"""
from __future__ import print_function, division
import operator
from collections import defaultdict
from sympy.external import import_module
"""
Python 2 and Python 3 compatible imports
String and Unicode compatible changes:
* `unicode()` removed in Python 3, import `unicode` for Python 2/3
compatible function
* `unichr()` removed in Python 3, import `unichr` for Python 2/3 compatible
function
* Use `u()` for escaped unicode sequences (e.g. u'\u2020' -> u('\u2020'))
* Use `u_decode()` to decode utf-8 formatted unicode strings
* `string_types` gives str in Python 3, unicode and str in Python 2,
equivalent to basestring
Integer related changes:
* `long()` removed in Python 3, import `long` for Python 2/3 compatible
function
* `integer_types` gives int in Python 3, int and long in Python 2
Types related changes:
* `class_types` gives type in Python 3, type and ClassType in Python 2
Renamed function attributes:
* Python 2 `.func_code`, Python 3 `.__func__`, access with
`get_function_code()`
* Python 2 `.func_globals`, Python 3 `.__globals__`, access with
`get_function_globals()`
* Python 2 `.func_name`, Python 3 `.__name__`, access with
`get_function_name()`
Moved modules:
* `reduce()`
* `StringIO()`
* `cStringIO()` (same as `StingIO()` in Python 3)
* Python 2 `__builtins__`, access with Python 3 name, `builtins`
Iterator/list changes:
* `xrange` removed in Python 3, import `xrange` for Python 2/3 compatible
iterator version of range
exec:
* Use `exec_()`, with parameters `exec_(code, globs=None, locs=None)`
Metaclasses:
* Use `with_metaclass()`, examples below
* Define class `Foo` with metaclass `Meta`, and no parent:
class Foo(with_metaclass(Meta)):
pass
* Define class `Foo` with metaclass `Meta` and parent class `Bar`:
class Foo(with_metaclass(Meta, Bar)):
pass
"""
import sys
PY3 = sys.version_info[0] > 2
if PY3:
class_types = type,
integer_types = (int,)
string_types = (str,)
long = int
# String / unicode compatibility
unicode = str
unichr = chr
def u(x):
return x
def u_decode(x):
return x
Iterator = object
# Moved definitions
get_function_code = operator.attrgetter("__code__")
get_function_globals = operator.attrgetter("__globals__")
get_function_name = operator.attrgetter("__name__")
import builtins
from functools import reduce
from io import StringIO
cStringIO = StringIO
exec_ = getattr(builtins, "exec")
xrange = range
else:
import codecs
import types
class_types = (type, types.ClassType)
integer_types = (int, long)
string_types = (str, unicode)
long = long
# String / unicode compatibility
unicode = unicode
unichr = unichr
def u(x):
return codecs.unicode_escape_decode(x)[0]
def u_decode(x):
return x.decode('utf-8')
class Iterator(object):
def next(self):
return type(self).__next__(self)
# Moved definitions
get_function_code = operator.attrgetter("func_code")
get_function_globals = operator.attrgetter("func_globals")
get_function_name = operator.attrgetter("func_name")
import __builtin__ as builtins
reduce = reduce
from StringIO import StringIO
from cStringIO import StringIO as cStringIO
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("exec _code_ in _globs_, _locs_")
xrange = xrange
def with_metaclass(meta, *bases):
"""
Create a base class with a metaclass.
For example, if you have the metaclass
>>> class Meta(type):
... pass
Use this as the metaclass by doing
>>> from sympy.core.compatibility import with_metaclass
>>> class MyClass(with_metaclass(Meta, object)):
... pass
This is equivalent to the Python 2::
class MyClass(object):
__metaclass__ = Meta
or Python 3::
class MyClass(object, metaclass=Meta):
pass
That is, the first argument is the metaclass, and the remaining arguments
are the base classes. Note that if the base class is just ``object``, you
may omit it.
>>> MyClass.__mro__
(<class 'MyClass'>, <... 'object'>)
>>> type(MyClass)
<class 'Meta'>
"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass("NewBase", None, {})
# These are in here because telling if something is an iterable just by calling
# hasattr(obj, "__iter__") behaves differently in Python 2 and Python 3. In
# particular, hasattr(str, "__iter__") is False in Python 2 and True in Python 3.
# I think putting them here also makes it easier to use them in the core.
class NotIterable:
"""
Use this as mixin when creating a class which is not supposed to return
true when iterable() is called on its instances. I.e. avoid infinite loop
when calling e.g. list() on the instance
"""
pass
def iterable(i, exclude=(string_types, dict, NotIterable)):
"""
Return a boolean indicating whether ``i`` is SymPy iterable.
True also indicates that the iterator is finite, i.e. you e.g.
call list(...) on the instance.
When SymPy is working with iterables, it is almost always assuming
that the iterable is not a string or a mapping, so those are excluded
by default. If you want a pure Python definition, make exclude=None. To
exclude multiple items, pass them as a tuple.
See also: is_sequence
Examples
========
>>> from sympy.utilities.iterables import iterable
>>> from sympy import Tuple
>>> things = [[1], (1,), set([1]), Tuple(1), (j for j in [1, 2]), {1:2}, '1', 1]
>>> for i in things:
... print('%s %s' % (iterable(i), type(i)))
True <... 'list'>
True <... 'tuple'>
True <... 'set'>
True <class 'sympy.core.containers.Tuple'>
True <... 'generator'>
False <... 'dict'>
False <... 'str'>
False <... 'int'>
>>> iterable({}, exclude=None)
True
>>> iterable({}, exclude=str)
True
>>> iterable("no", exclude=str)
False
"""
try:
iter(i)
except TypeError:
return False
if exclude:
return not isinstance(i, exclude)
return True
def is_sequence(i, include=None):
"""
Return a boolean indicating whether ``i`` is a sequence in the SymPy
sense. If anything that fails the test below should be included as
being a sequence for your application, set 'include' to that object's
type; multiple types should be passed as a tuple of types.
Note: although generators can generate a sequence, they often need special
handling to make sure their elements are captured before the generator is
exhausted, so these are not included by default in the definition of a
sequence.
See also: iterable
Examples
========
>>> from sympy.utilities.iterables import is_sequence
>>> from types import GeneratorType
>>> is_sequence([])
True
>>> is_sequence(set())
False
>>> is_sequence('abc')
False
>>> is_sequence('abc', include=str)
True
>>> generator = (c for c in 'abc')
>>> is_sequence(generator)
False
>>> is_sequence(generator, include=(str, GeneratorType))
True
"""
return (hasattr(i, '__getitem__') and
iterable(i) or
bool(include) and
isinstance(i, include))
try:
from functools import cmp_to_key
except ImportError: # <= Python 2.6
def cmp_to_key(mycmp):
"""
Convert a cmp= function into a key= function
"""
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
try:
from itertools import zip_longest
except ImportError: # <= Python 2.7
from itertools import izip_longest as zip_longest
try:
from itertools import combinations_with_replacement
except ImportError: # <= Python 2.6
def combinations_with_replacement(iterable, r):
"""Return r length subsequences of elements from the input iterable
allowing individual elements to be repeated more than once.
Combinations are emitted in lexicographic sort order. So, if the
input iterable is sorted, the combination tuples will be produced
in sorted order.
Elements are treated as unique based on their position, not on their
value. So if the input elements are unique, the generated combinations
will also be unique.
See also: combinations
Examples
========
>>> from sympy.core.compatibility import combinations_with_replacement
>>> list(combinations_with_replacement('AB', 2))
[('A', 'A'), ('A', 'B'), ('B', 'B')]
"""
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def as_int(n):
"""
Convert the argument to a builtin integer.
The return value is guaranteed to be equal to the input. ValueError is
raised if the input has a non-integral value.
Examples
========
>>> from sympy.core.compatibility import as_int
>>> from sympy import sqrt
>>> 3.0
3.0
>>> as_int(3.0) # convert to int and test for equality
3
>>> int(sqrt(10))
3
>>> as_int(sqrt(10))
Traceback (most recent call last):
...
ValueError: ... is not an integer
"""
try:
result = int(n)
if result != n:
raise TypeError
except TypeError:
raise ValueError('%s is not an integer' % n)
return result
def default_sort_key(item, order=None):
"""Return a key that can be used for sorting.
The key has the structure:
(class_key, (len(args), args), exponent.sort_key(), coefficient)
This key is supplied by the sort_key routine of Basic objects when
``item`` is a Basic object or an object (other than a string) that
sympifies to a Basic object. Otherwise, this function produces the
key.
The ``order`` argument is passed along to the sort_key routine and is
used to determine how the terms *within* an expression are ordered.
(See examples below) ``order`` options are: 'lex', 'grlex', 'grevlex',
and reversed values of the same (e.g. 'rev-lex'). The default order
value is None (which translates to 'lex').
Examples
========
>>> from sympy import S, I, default_sort_key
>>> from sympy.core.function import UndefinedFunction
>>> from sympy.abc import x
The following are eqivalent ways of getting the key for an object:
>>> x.sort_key() == default_sort_key(x)
True
Here are some examples of the key that is produced:
>>> default_sort_key(UndefinedFunction('f'))
((0, 0, 'UndefinedFunction'), (1, ('f',)), ((1, 0, 'Number'),
(0, ()), (), 1), 1)
>>> default_sort_key('1')
((0, 0, 'str'), (1, ('1',)), ((1, 0, 'Number'), (0, ()), (), 1), 1)
>>> default_sort_key(S.One)
((1, 0, 'Number'), (0, ()), (), 1)
>>> default_sort_key(2)
((1, 0, 'Number'), (0, ()), (), 2)
While sort_key is a method only defined for SymPy objects,
default_sort_key will accept anything as an argument so it is
more robust as a sorting key. For the following, using key=
lambda i: i.sort_key() would fail because 2 doesn't have a sort_key
method; that's why default_sort_key is used. Note, that it also
handles sympification of non-string items likes ints:
>>> a = [2, I, -I]
>>> sorted(a, key=default_sort_key)
[2, -I, I]
The returned key can be used anywhere that a key can be specified for
a function, e.g. sort, min, max, etc...:
>>> a.sort(key=default_sort_key); a[0]
2
>>> min(a, key=default_sort_key)
2
Note
----
The key returned is useful for getting items into a canonical order
that will be the same across platforms. It is not directly useful for
sorting lists of expressions:
>>> a, b = x, 1/x
Since ``a`` has only 1 term, its value of sort_key is unaffected by
``order``:
>>> a.sort_key() == a.sort_key('rev-lex')
True
If ``a`` and ``b`` are combined then the key will differ because there
are terms that can be ordered:
>>> eq = a + b
>>> eq.sort_key() == eq.sort_key('rev-lex')
False
>>> eq.as_ordered_terms()
[x, 1/x]
>>> eq.as_ordered_terms('rev-lex')
[1/x, x]
But since the keys for each of these terms are independent of ``order``'s
value, they don't sort differently when they appear separately in a list:
>>> sorted(eq.args, key=default_sort_key)
[1/x, x]
>>> sorted(eq.args, key=lambda i: default_sort_key(i, order='rev-lex'))
[1/x, x]
The order of terms obtained when using these keys is the order that would
be obtained if those terms were *factors* in a product.
See Also
========
sympy.core.expr.as_ordered_factors, sympy.core.expr.as_ordered_terms
"""
from sympy.core import S, Basic
from sympy.core.sympify import sympify, SympifyError
from sympy.core.compatibility import iterable
if isinstance(item, Basic):
return item.sort_key(order=order)
if iterable(item, exclude=string_types):
if isinstance(item, dict):
args = item.items()
unordered = True
elif isinstance(item, set):
args = item
unordered = True
else:
# e.g. tuple, list
args = list(item)
unordered = False
args = [default_sort_key(arg, order=order) for arg in args]
if unordered:
# e.g. dict, set
args = sorted(args)
cls_index, args = 10, (len(args), tuple(args))
else:
if not isinstance(item, string_types):
try:
item = sympify(item)
except SympifyError:
# e.g. lambda x: x
pass
else:
if isinstance(item, Basic):
# e.g int -> Integer
return default_sort_key(item)
# e.g. UndefinedFunction
# e.g. str
cls_index, args = 0, (1, (str(item),))
return (cls_index, 0, item.__class__.__name__
), args, S.One.sort_key(), S.One
def _nodes(e):
"""
A helper for ordered() which returns the node count of ``e`` which
for Basic object is the number of Basic nodes in the expression tree
but for other object is 1 (unless the object is an iterable or dict
for which the sum of nodes is returned).
"""
from .basic import Basic
if isinstance(e, Basic):
return e.count(Basic)
elif iterable(e):
return 1 + sum(_nodes(ei) for ei in e)
elif isinstance(e, dict):
return 1 + sum(_nodes(k) + _nodes(v) for k, v in e.items())
else:
return 1
def ordered(seq, keys=None, default=True, warn=False):
"""Return an iterator of the seq where keys are used to break ties.
Two default keys will be applied after and provided unless ``default``
is False. The two keys are _nodes and default_sort_key which will
place smaller expressions before larger ones (in terms of Basic nodes)
and where there are ties, they will be broken by the default_sort_key.
If ``warn`` is True then an error will be raised if there were no
keys remaining to break ties. This can be used if it was expected that
there should be no ties.
Examples
========
>>> from sympy.utilities.iterables import ordered
>>> from sympy import count_ops
>>> from sympy.abc import x, y
The count_ops is not sufficient to break ties in this list and the first
two items appear in their original order (i.e. the sorting is stable):
>>> list(ordered([y + 2, x + 2, x**2 + y + 3],
... count_ops, default=False, warn=False))
...
[y + 2, x + 2, x**2 + y + 3]
The default_sort_key allows the tie to be broken:
>>> list(ordered([y + 2, x + 2, x**2 + y + 3]))
...
[x + 2, y + 2, x**2 + y + 3]
Here, sequences are sorted by length, then sum:
>>> seq, keys = [[[1, 2, 1], [0, 3, 1], [1, 1, 3], [2], [1]], [
... lambda x: len(x),
... lambda x: sum(x)]]
...
>>> list(ordered(seq, keys, default=False, warn=False))
[[1], [2], [1, 2, 1], [0, 3, 1], [1, 1, 3]]
If ``warn`` is True, an error will be raised if there were not
enough keys to break ties:
>>> list(ordered(seq, keys, default=False, warn=True))
Traceback (most recent call last):
...
ValueError: not enough keys to break ties
Notes
=====
The decorated sort is one of the fastest ways to sort a sequence for
which special item comparison is desired: the sequence is decorated,
sorted on the basis of the decoration (e.g. making all letters lower
case) and then undecorated. If one wants to break ties for items that
have the same decorated value, a second key can be used. But if the
second key is expensive to compute then it is inefficient to decorate
all items with both keys: only those items having identical first key
values need to be decorated. This function applies keys successively
only when needed to break ties. By yielding an iterator, use of the
tie-breaker is delayed as long as possible.
This function is best used in cases when use of the first key is
expected to be a good hashing function; if there are no unique hashes
from application of a key then that key should not have been used. The
exception, however, is that even if there are many collisions, if the
first group is small and one does not need to process all items in the
list then time will not be wasted sorting what one was not interested
in. For example, if one were looking for the minimum in a list and
there were several criteria used to define the sort order, then this
function would be good at returning that quickly if the first group
of candidates is small relative to the number of items being processed.
"""
d = defaultdict(list)
if keys:
if not isinstance(keys, (list, tuple)):
keys = [keys]
keys = list(keys)
f = keys.pop(0)
for a in seq:
d[f(a)].append(a)
else:
if not default:
raise ValueError('if default=False then keys must be provided')
d[None].extend(seq)
for k in sorted(d.keys()):
if len(d[k]) > 1:
if keys:
d[k] = ordered(d[k], keys, default, warn)
elif default:
d[k] = ordered(d[k], (_nodes, default_sort_key,),
default=False, warn=warn)
elif warn:
raise ValueError('not enough keys to break ties')
for v in d[k]:
yield v
d.pop(k)
# If HAS_GMPY is 0, no supported version of gmpy is available. Otherwise,
# HAS_GMPY contains the major version number of gmpy; i.e. 1 for gmpy, and
# 2 for gmpy2.
# Versions of gmpy prior to 1.03 do not work correctly with int(largempz)
# For example, int(gmpy.mpz(2**256)) would raise OverflowError.
# See issue 4980.
# Minimum version of gmpy changed to 1.13 to allow a single code base to also
# work with gmpy2.
def _getenv(key, default=None):
from os import getenv
return getenv(key, default)
GROUND_TYPES = _getenv('SYMPY_GROUND_TYPES', 'auto').lower()
HAS_GMPY = 0
if GROUND_TYPES != 'python':
# Don't try to import gmpy2 if ground types is set to gmpy1. This is
# primarily intended for testing.
if GROUND_TYPES != 'gmpy1':
gmpy = import_module('gmpy2', min_module_version='2.0.0',
module_version_attr='version', module_version_attr_call_args=())
if gmpy:
HAS_GMPY = 2
else:
GROUND_TYPES = 'gmpy'
if not HAS_GMPY:
gmpy = import_module('gmpy', min_module_version='1.13',
module_version_attr='version', module_version_attr_call_args=())
if gmpy:
HAS_GMPY = 1
if GROUND_TYPES == 'auto':
if HAS_GMPY:
GROUND_TYPES = 'gmpy'
else:
GROUND_TYPES = 'python'
if GROUND_TYPES == 'gmpy' and not HAS_GMPY:
from warnings import warn
warn("gmpy library is not installed, switching to 'python' ground types")
GROUND_TYPES = 'python'
# SYMPY_INTS is a tuple containing the base types for valid integer types.
SYMPY_INTS = integer_types
if GROUND_TYPES == 'gmpy':
SYMPY_INTS += (type(gmpy.mpz(0)),)
# check_output() is new in Python 2.7
import os
try:
try:
from subprocess import check_output
except ImportError: # <= Python 2.6
from subprocess import CalledProcessError, check_call
def check_output(*args, **kwargs):
with open(os.devnull, 'w') as fh:
kwargs['stdout'] = fh
try:
return check_call(*args, **kwargs)
except CalledProcessError as e:
e.output = ("program output is not available for Python 2.6.x")
raise e
except ImportError:
# running on platform like App Engine, no subprocess at all
pass
|
|
# Copyright 2020 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KFServing
Python SDK for KFServing # noqa: E501
The version of the OpenAPI document: v0.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kfserving.configuration import Configuration
class V1beta1TritonSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'args': 'list[str]',
'command': 'list[str]',
'env': 'list[V1EnvVar]',
'env_from': 'list[V1EnvFromSource]',
'image': 'str',
'image_pull_policy': 'str',
'lifecycle': 'V1Lifecycle',
'liveness_probe': 'V1Probe',
'name': 'str',
'ports': 'list[V1ContainerPort]',
'protocol_version': 'str',
'readiness_probe': 'V1Probe',
'resources': 'V1ResourceRequirements',
'runtime_version': 'str',
'security_context': 'V1SecurityContext',
'startup_probe': 'V1Probe',
'stdin': 'bool',
'stdin_once': 'bool',
'storage_uri': 'str',
'termination_message_path': 'str',
'termination_message_policy': 'str',
'tty': 'bool',
'volume_devices': 'list[V1VolumeDevice]',
'volume_mounts': 'list[V1VolumeMount]',
'working_dir': 'str'
}
attribute_map = {
'args': 'args',
'command': 'command',
'env': 'env',
'env_from': 'envFrom',
'image': 'image',
'image_pull_policy': 'imagePullPolicy',
'lifecycle': 'lifecycle',
'liveness_probe': 'livenessProbe',
'name': 'name',
'ports': 'ports',
'protocol_version': 'protocolVersion',
'readiness_probe': 'readinessProbe',
'resources': 'resources',
'runtime_version': 'runtimeVersion',
'security_context': 'securityContext',
'startup_probe': 'startupProbe',
'stdin': 'stdin',
'stdin_once': 'stdinOnce',
'storage_uri': 'storageUri',
'termination_message_path': 'terminationMessagePath',
'termination_message_policy': 'terminationMessagePolicy',
'tty': 'tty',
'volume_devices': 'volumeDevices',
'volume_mounts': 'volumeMounts',
'working_dir': 'workingDir'
}
def __init__(self, args=None, command=None, env=None, env_from=None, image=None, image_pull_policy=None, lifecycle=None, liveness_probe=None, name=None, ports=None, protocol_version=None, readiness_probe=None, resources=None, runtime_version=None, security_context=None, startup_probe=None, stdin=None, stdin_once=None, storage_uri=None, termination_message_path=None, termination_message_policy=None, tty=None, volume_devices=None, volume_mounts=None, working_dir=None, local_vars_configuration=None): # noqa: E501
"""V1beta1TritonSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._args = None
self._command = None
self._env = None
self._env_from = None
self._image = None
self._image_pull_policy = None
self._lifecycle = None
self._liveness_probe = None
self._name = None
self._ports = None
self._protocol_version = None
self._readiness_probe = None
self._resources = None
self._runtime_version = None
self._security_context = None
self._startup_probe = None
self._stdin = None
self._stdin_once = None
self._storage_uri = None
self._termination_message_path = None
self._termination_message_policy = None
self._tty = None
self._volume_devices = None
self._volume_mounts = None
self._working_dir = None
self.discriminator = None
if args is not None:
self.args = args
if command is not None:
self.command = command
if env is not None:
self.env = env
if env_from is not None:
self.env_from = env_from
if image is not None:
self.image = image
if image_pull_policy is not None:
self.image_pull_policy = image_pull_policy
if lifecycle is not None:
self.lifecycle = lifecycle
if liveness_probe is not None:
self.liveness_probe = liveness_probe
if name is not None:
self.name = name
if ports is not None:
self.ports = ports
if protocol_version is not None:
self.protocol_version = protocol_version
if readiness_probe is not None:
self.readiness_probe = readiness_probe
if resources is not None:
self.resources = resources
if runtime_version is not None:
self.runtime_version = runtime_version
if security_context is not None:
self.security_context = security_context
if startup_probe is not None:
self.startup_probe = startup_probe
if stdin is not None:
self.stdin = stdin
if stdin_once is not None:
self.stdin_once = stdin_once
if storage_uri is not None:
self.storage_uri = storage_uri
if termination_message_path is not None:
self.termination_message_path = termination_message_path
if termination_message_policy is not None:
self.termination_message_policy = termination_message_policy
if tty is not None:
self.tty = tty
if volume_devices is not None:
self.volume_devices = volume_devices
if volume_mounts is not None:
self.volume_mounts = volume_mounts
if working_dir is not None:
self.working_dir = working_dir
@property
def args(self):
"""Gets the args of this V1beta1TritonSpec. # noqa: E501
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:return: The args of this V1beta1TritonSpec. # noqa: E501
:rtype: list[str]
"""
return self._args
@args.setter
def args(self, args):
"""Sets the args of this V1beta1TritonSpec.
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:param args: The args of this V1beta1TritonSpec. # noqa: E501
:type: list[str]
"""
self._args = args
@property
def command(self):
"""Gets the command of this V1beta1TritonSpec. # noqa: E501
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:return: The command of this V1beta1TritonSpec. # noqa: E501
:rtype: list[str]
"""
return self._command
@command.setter
def command(self, command):
"""Sets the command of this V1beta1TritonSpec.
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:param command: The command of this V1beta1TritonSpec. # noqa: E501
:type: list[str]
"""
self._command = command
@property
def env(self):
"""Gets the env of this V1beta1TritonSpec. # noqa: E501
List of environment variables to set in the container. Cannot be updated. # noqa: E501
:return: The env of this V1beta1TritonSpec. # noqa: E501
:rtype: list[V1EnvVar]
"""
return self._env
@env.setter
def env(self, env):
"""Sets the env of this V1beta1TritonSpec.
List of environment variables to set in the container. Cannot be updated. # noqa: E501
:param env: The env of this V1beta1TritonSpec. # noqa: E501
:type: list[V1EnvVar]
"""
self._env = env
@property
def env_from(self):
"""Gets the env_from of this V1beta1TritonSpec. # noqa: E501
List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. # noqa: E501
:return: The env_from of this V1beta1TritonSpec. # noqa: E501
:rtype: list[V1EnvFromSource]
"""
return self._env_from
@env_from.setter
def env_from(self, env_from):
"""Sets the env_from of this V1beta1TritonSpec.
List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. # noqa: E501
:param env_from: The env_from of this V1beta1TritonSpec. # noqa: E501
:type: list[V1EnvFromSource]
"""
self._env_from = env_from
@property
def image(self):
"""Gets the image of this V1beta1TritonSpec. # noqa: E501
Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. # noqa: E501
:return: The image of this V1beta1TritonSpec. # noqa: E501
:rtype: str
"""
return self._image
@image.setter
def image(self, image):
"""Sets the image of this V1beta1TritonSpec.
Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. # noqa: E501
:param image: The image of this V1beta1TritonSpec. # noqa: E501
:type: str
"""
self._image = image
@property
def image_pull_policy(self):
"""Gets the image_pull_policy of this V1beta1TritonSpec. # noqa: E501
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images # noqa: E501
:return: The image_pull_policy of this V1beta1TritonSpec. # noqa: E501
:rtype: str
"""
return self._image_pull_policy
@image_pull_policy.setter
def image_pull_policy(self, image_pull_policy):
"""Sets the image_pull_policy of this V1beta1TritonSpec.
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images # noqa: E501
:param image_pull_policy: The image_pull_policy of this V1beta1TritonSpec. # noqa: E501
:type: str
"""
self._image_pull_policy = image_pull_policy
@property
def lifecycle(self):
"""Gets the lifecycle of this V1beta1TritonSpec. # noqa: E501
:return: The lifecycle of this V1beta1TritonSpec. # noqa: E501
:rtype: V1Lifecycle
"""
return self._lifecycle
@lifecycle.setter
def lifecycle(self, lifecycle):
"""Sets the lifecycle of this V1beta1TritonSpec.
:param lifecycle: The lifecycle of this V1beta1TritonSpec. # noqa: E501
:type: V1Lifecycle
"""
self._lifecycle = lifecycle
@property
def liveness_probe(self):
"""Gets the liveness_probe of this V1beta1TritonSpec. # noqa: E501
:return: The liveness_probe of this V1beta1TritonSpec. # noqa: E501
:rtype: V1Probe
"""
return self._liveness_probe
@liveness_probe.setter
def liveness_probe(self, liveness_probe):
"""Sets the liveness_probe of this V1beta1TritonSpec.
:param liveness_probe: The liveness_probe of this V1beta1TritonSpec. # noqa: E501
:type: V1Probe
"""
self._liveness_probe = liveness_probe
@property
def name(self):
"""Gets the name of this V1beta1TritonSpec. # noqa: E501
Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. # noqa: E501
:return: The name of this V1beta1TritonSpec. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1beta1TritonSpec.
Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. # noqa: E501
:param name: The name of this V1beta1TritonSpec. # noqa: E501
:type: str
"""
self._name = name
@property
def ports(self):
"""Gets the ports of this V1beta1TritonSpec. # noqa: E501
List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated. # noqa: E501
:return: The ports of this V1beta1TritonSpec. # noqa: E501
:rtype: list[V1ContainerPort]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""Sets the ports of this V1beta1TritonSpec.
List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated. # noqa: E501
:param ports: The ports of this V1beta1TritonSpec. # noqa: E501
:type: list[V1ContainerPort]
"""
self._ports = ports
@property
def protocol_version(self):
"""Gets the protocol_version of this V1beta1TritonSpec. # noqa: E501
Protocol version to use by the predictor (i.e. v1 or v2) # noqa: E501
:return: The protocol_version of this V1beta1TritonSpec. # noqa: E501
:rtype: str
"""
return self._protocol_version
@protocol_version.setter
def protocol_version(self, protocol_version):
"""Sets the protocol_version of this V1beta1TritonSpec.
Protocol version to use by the predictor (i.e. v1 or v2) # noqa: E501
:param protocol_version: The protocol_version of this V1beta1TritonSpec. # noqa: E501
:type: str
"""
self._protocol_version = protocol_version
@property
def readiness_probe(self):
"""Gets the readiness_probe of this V1beta1TritonSpec. # noqa: E501
:return: The readiness_probe of this V1beta1TritonSpec. # noqa: E501
:rtype: V1Probe
"""
return self._readiness_probe
@readiness_probe.setter
def readiness_probe(self, readiness_probe):
"""Sets the readiness_probe of this V1beta1TritonSpec.
:param readiness_probe: The readiness_probe of this V1beta1TritonSpec. # noqa: E501
:type: V1Probe
"""
self._readiness_probe = readiness_probe
@property
def resources(self):
"""Gets the resources of this V1beta1TritonSpec. # noqa: E501
:return: The resources of this V1beta1TritonSpec. # noqa: E501
:rtype: V1ResourceRequirements
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this V1beta1TritonSpec.
:param resources: The resources of this V1beta1TritonSpec. # noqa: E501
:type: V1ResourceRequirements
"""
self._resources = resources
@property
def runtime_version(self):
"""Gets the runtime_version of this V1beta1TritonSpec. # noqa: E501
Runtime version of the predictor docker image # noqa: E501
:return: The runtime_version of this V1beta1TritonSpec. # noqa: E501
:rtype: str
"""
return self._runtime_version
@runtime_version.setter
def runtime_version(self, runtime_version):
"""Sets the runtime_version of this V1beta1TritonSpec.
Runtime version of the predictor docker image # noqa: E501
:param runtime_version: The runtime_version of this V1beta1TritonSpec. # noqa: E501
:type: str
"""
self._runtime_version = runtime_version
@property
def security_context(self):
"""Gets the security_context of this V1beta1TritonSpec. # noqa: E501
:return: The security_context of this V1beta1TritonSpec. # noqa: E501
:rtype: V1SecurityContext
"""
return self._security_context
@security_context.setter
def security_context(self, security_context):
"""Sets the security_context of this V1beta1TritonSpec.
:param security_context: The security_context of this V1beta1TritonSpec. # noqa: E501
:type: V1SecurityContext
"""
self._security_context = security_context
@property
def startup_probe(self):
"""Gets the startup_probe of this V1beta1TritonSpec. # noqa: E501
:return: The startup_probe of this V1beta1TritonSpec. # noqa: E501
:rtype: V1Probe
"""
return self._startup_probe
@startup_probe.setter
def startup_probe(self, startup_probe):
"""Sets the startup_probe of this V1beta1TritonSpec.
:param startup_probe: The startup_probe of this V1beta1TritonSpec. # noqa: E501
:type: V1Probe
"""
self._startup_probe = startup_probe
@property
def stdin(self):
"""Gets the stdin of this V1beta1TritonSpec. # noqa: E501
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. # noqa: E501
:return: The stdin of this V1beta1TritonSpec. # noqa: E501
:rtype: bool
"""
return self._stdin
@stdin.setter
def stdin(self, stdin):
"""Sets the stdin of this V1beta1TritonSpec.
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. # noqa: E501
:param stdin: The stdin of this V1beta1TritonSpec. # noqa: E501
:type: bool
"""
self._stdin = stdin
@property
def stdin_once(self):
"""Gets the stdin_once of this V1beta1TritonSpec. # noqa: E501
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false # noqa: E501
:return: The stdin_once of this V1beta1TritonSpec. # noqa: E501
:rtype: bool
"""
return self._stdin_once
@stdin_once.setter
def stdin_once(self, stdin_once):
"""Sets the stdin_once of this V1beta1TritonSpec.
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false # noqa: E501
:param stdin_once: The stdin_once of this V1beta1TritonSpec. # noqa: E501
:type: bool
"""
self._stdin_once = stdin_once
@property
def storage_uri(self):
"""Gets the storage_uri of this V1beta1TritonSpec. # noqa: E501
This field points to the location of the trained model which is mounted onto the pod. # noqa: E501
:return: The storage_uri of this V1beta1TritonSpec. # noqa: E501
:rtype: str
"""
return self._storage_uri
@storage_uri.setter
def storage_uri(self, storage_uri):
"""Sets the storage_uri of this V1beta1TritonSpec.
This field points to the location of the trained model which is mounted onto the pod. # noqa: E501
:param storage_uri: The storage_uri of this V1beta1TritonSpec. # noqa: E501
:type: str
"""
self._storage_uri = storage_uri
@property
def termination_message_path(self):
"""Gets the termination_message_path of this V1beta1TritonSpec. # noqa: E501
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. # noqa: E501
:return: The termination_message_path of this V1beta1TritonSpec. # noqa: E501
:rtype: str
"""
return self._termination_message_path
@termination_message_path.setter
def termination_message_path(self, termination_message_path):
"""Sets the termination_message_path of this V1beta1TritonSpec.
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. # noqa: E501
:param termination_message_path: The termination_message_path of this V1beta1TritonSpec. # noqa: E501
:type: str
"""
self._termination_message_path = termination_message_path
@property
def termination_message_policy(self):
"""Gets the termination_message_policy of this V1beta1TritonSpec. # noqa: E501
Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. # noqa: E501
:return: The termination_message_policy of this V1beta1TritonSpec. # noqa: E501
:rtype: str
"""
return self._termination_message_policy
@termination_message_policy.setter
def termination_message_policy(self, termination_message_policy):
"""Sets the termination_message_policy of this V1beta1TritonSpec.
Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. # noqa: E501
:param termination_message_policy: The termination_message_policy of this V1beta1TritonSpec. # noqa: E501
:type: str
"""
self._termination_message_policy = termination_message_policy
@property
def tty(self):
"""Gets the tty of this V1beta1TritonSpec. # noqa: E501
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. # noqa: E501
:return: The tty of this V1beta1TritonSpec. # noqa: E501
:rtype: bool
"""
return self._tty
@tty.setter
def tty(self, tty):
"""Sets the tty of this V1beta1TritonSpec.
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. # noqa: E501
:param tty: The tty of this V1beta1TritonSpec. # noqa: E501
:type: bool
"""
self._tty = tty
@property
def volume_devices(self):
"""Gets the volume_devices of this V1beta1TritonSpec. # noqa: E501
volumeDevices is the list of block devices to be used by the container. # noqa: E501
:return: The volume_devices of this V1beta1TritonSpec. # noqa: E501
:rtype: list[V1VolumeDevice]
"""
return self._volume_devices
@volume_devices.setter
def volume_devices(self, volume_devices):
"""Sets the volume_devices of this V1beta1TritonSpec.
volumeDevices is the list of block devices to be used by the container. # noqa: E501
:param volume_devices: The volume_devices of this V1beta1TritonSpec. # noqa: E501
:type: list[V1VolumeDevice]
"""
self._volume_devices = volume_devices
@property
def volume_mounts(self):
"""Gets the volume_mounts of this V1beta1TritonSpec. # noqa: E501
Pod volumes to mount into the container's filesystem. Cannot be updated. # noqa: E501
:return: The volume_mounts of this V1beta1TritonSpec. # noqa: E501
:rtype: list[V1VolumeMount]
"""
return self._volume_mounts
@volume_mounts.setter
def volume_mounts(self, volume_mounts):
"""Sets the volume_mounts of this V1beta1TritonSpec.
Pod volumes to mount into the container's filesystem. Cannot be updated. # noqa: E501
:param volume_mounts: The volume_mounts of this V1beta1TritonSpec. # noqa: E501
:type: list[V1VolumeMount]
"""
self._volume_mounts = volume_mounts
@property
def working_dir(self):
"""Gets the working_dir of this V1beta1TritonSpec. # noqa: E501
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. # noqa: E501
:return: The working_dir of this V1beta1TritonSpec. # noqa: E501
:rtype: str
"""
return self._working_dir
@working_dir.setter
def working_dir(self, working_dir):
"""Sets the working_dir of this V1beta1TritonSpec.
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. # noqa: E501
:param working_dir: The working_dir of this V1beta1TritonSpec. # noqa: E501
:type: str
"""
self._working_dir = working_dir
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1TritonSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1TritonSpec):
return True
return self.to_dict() != other.to_dict()
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Artman conductor to claim and execute remote tasks.'''
from __future__ import absolute_import
import base64
import io
import logging
import os
import subprocess
import sys
import time
import traceback
import uuid
from oauth2client.client import GoogleCredentials
from gcloud import logging as cloud_logging
from googleapiclient.discovery import build_from_document
from artman.cli import main
from artman.utils.logger import logger, output_logger
MAX_ATTEMPTS = 3
CLOUD_LOGGING_CLIENT = None
def run(queue_name):
task_client = _create_tasks_client()
while True:
_pull_and_execute_tasks(task_client, queue_name)
def _pull_and_execute_tasks(task_client, queue_name):
pull_task_response = _pull_task(task_client, queue_name)
tasks = pull_task_response.get('tasks', [])
if not tasks:
# Sleep for 30 seconds if there is no tasks returned.
logger.debug('There is no pending task. Sleep for 10 seconds.')
time.sleep(10)
for task in tasks:
task_id, tmp_root, artman_user_config, log_file_path = _prepare_dir()
log_file_handler = None
try:
log_file_handler = _setup_logger(log_file_path)
logger.info('Starting to execute task %s' % task)
if int(task['taskStatus']['attemptDispatchCount']) > MAX_ATTEMPTS:
logger.info('Delete task which exceeds max attempts.')
_delete_task(task_client, task)
continue
_execute_task(artman_user_config, task)
_ack_task(task_client, task)
logger.info('Task execution finished')
except Exception as e:
logger.error('\n'.join(traceback.format_tb(sys.exc_info()[2])))
_cancel_task_lease(task_client, task)
finally:
logger.info('Cleanup tmp directory %s' % tmp_root)
# Use task id as log name
_write_to_cloud_logging(task_id, log_file_path)
_cleanup(tmp_root, log_file_handler)
def _create_tasks_client():
credentials = GoogleCredentials.get_application_default()
with open(
os.path.join(os.path.dirname(__file__), 'cloudtasks.json'), 'r') as f:
return build_from_document(f.read(), credentials=credentials)
def _pull_task(task_client, queue_name):
body = {
"maxTasks": 1,
"leaseDuration": {"seconds": 300, "nanos": 0}, # Expire after 300 secs.
"responseView": "FULL",
"name": "%s" % queue_name
}
tasks = task_client.projects().locations().queues().tasks().pull(
name=queue_name, body=body).execute()
logger.info('Pulling tasks request returned %s' % tasks)
return tasks
def _ack_task(task_client, task):
body = {'scheduleTime': task['scheduleTime']}
response = task_client.projects().locations().queues().tasks().acknowledge(
name=task['name'],
body=body).execute()
logger.info('Acknowledge task request returned %s' % response)
return response
def _cancel_task_lease(task_client, task):
body = {'scheduleTime': task['scheduleTime'], 'responseView': 'FULL'}
response = task_client.projects().locations().queues().tasks().cancelLease(
name=task['name'],
body=body).execute()
logger.info('Cancel task request returned %s' % response)
return response
def _delete_task(task_client, task):
response = task_client.projects().locations().queues().tasks().delete(
name=task['name']).execute()
logger.info('Delete task request returned %s' % response)
return response
def _setup_logger(log_path):
"""Setup logger with one-time logging FileHandler."""
log_file_handler = logging.FileHandler(log_path)
logger.addHandler(log_file_handler)
return log_file_handler
def _write_to_cloud_logging(log_id, log_file_path):
"""Write log file content to cloud logging"""
# TODO(ethanbao): Turn conductor into a python object so that the logging
# client can be instance variable not global variable.
global CLOUD_LOGGING_CLIENT
if not CLOUD_LOGGING_CLIENT:
CLOUD_LOGGING_CLIENT = cloud_logging.Client()
cloud_logger = CLOUD_LOGGING_CLIENT.logger(log_id)
if log_file_path:
with open(log_file_path, 'r') as log_file:
cloud_logger.log_text(log_file.read())
def _execute_task(artman_user_config, task):
"""Execute the remote artman tasks.
It execute the artman command with a customized artman user config and
additional pipeline arguments."""
task_payload = base64.b64decode(task['pullTaskTarget']['payload'])
artman_args = task_payload.decode("utf-8").split(' ')
artman_args.append('--user-config')
artman_args.append(artman_user_config)
main.main(*artman_args)
def _prepare_dir(source_repo="https://github.com/googleapis/googleapis.git"):
"""Prepare the temporary folder to task execution.
It downloads the googleapis repo and adds a one-time artman config yaml.
TODO(ethanbao): support loading more input files from heterogeneous data
sources"""
task_id = str(uuid.uuid4())[0:8]
repo_root = '/tmp/artman/%s' % task_id
logger.info('Prepare a temporary root repo: %s' % repo_root)
try:
os.makedirs(repo_root)
except OSError as e:
raise e
logger.info('Checking out fresh clone of %s.' % source_repo)
googleapis_dir = os.path.join(repo_root, "googleapis")
subprocess.check_output(['rm', '-f', '.git/config'])
git_clone_args = ['git', 'clone', source_repo, googleapis_dir]
output = subprocess.check_output(git_clone_args)
if output:
output = output.decode('utf8')
output_logger.success(output)
artman_user_config = os.path.join(repo_root, 'artman-config.yaml')
with io.open(artman_user_config, 'w+') as file_:
file_.write(u'---\n')
file_.write(u'local_paths:\n')
file_.write(u' reporoot: %s\n' % repo_root)
if os.environ.get('TOOLKIT_HOME'):
toolkit_home = os.environ.get('TOOLKIT_HOME')
file_.write(u' toolkit: %s \n' % toolkit_home)
file_.write(u'publish: noop \n')
log_path = os.path.join(repo_root, 'artman.log')
with io.open(log_path, 'w+') as file_:
file_.write(u'-------- Beginning of %s -----------\n' % task_id)
return task_id, repo_root, artman_user_config, log_path
def _cleanup(tmp_dir, log_file_handler):
# Close the one-time logging FileHandler
if log_file_handler:
log_file_handler.close()
logger.removeHandler(log_file_handler)
# Remove tmp directory.
subprocess.check_call(['rm', '-rf', tmp_dir])
# Change working directory to the root tmp directory, as the current one
# has been removed.
os.chdir('/tmp')
|
|
# Copyright 2011 OpenStack LLC. # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Host Filters.
"""
import httplib
import stubout
from nova import context
from nova import db
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.scheduler import filters
from nova.scheduler.filters import extra_specs_ops
from nova.scheduler.filters.trusted_filter import AttestationService
from nova import servicegroup
from nova import test
from nova.tests.scheduler import fakes
CONF = cfg.CONF
CONF.import_opt('my_ip', 'nova.config')
DATA = ''
def stub_out_https_backend(stubs):
"""
Stubs out the httplib.HTTPRequest.getresponse to return
faked-out data instead of grabbing actual contents of a resource
The stubbed getresponse() returns an iterator over
the data "I am a teapot, short and stout\n"
:param stubs: Set of stubout stubs
"""
class FakeHTTPResponse(object):
def read(self):
return DATA
def fake_do_request(self, *args, **kwargs):
return httplib.OK, FakeHTTPResponse()
stubs.Set(AttestationService, '_do_request', fake_do_request)
class TestFilter(filters.BaseHostFilter):
pass
class TestBogusFilter(object):
"""Class that doesn't inherit from BaseHostFilter"""
pass
class ExtraSpecsOpsTestCase(test.TestCase):
def _do_extra_specs_ops_test(self, value, req, matches):
assertion = self.assertTrue if matches else self.assertFalse
assertion(extra_specs_ops.match(value, req))
def test_extra_specs_matches_simple(self):
self._do_extra_specs_ops_test(
value='1',
req='1',
matches=True)
def test_extra_specs_fails_simple(self):
self._do_extra_specs_ops_test(
value='',
req='1',
matches=False)
def test_extra_specs_fails_simple2(self):
self._do_extra_specs_ops_test(
value='3',
req='1',
matches=False)
def test_extra_specs_fails_simple3(self):
self._do_extra_specs_ops_test(
value='222',
req='2',
matches=False)
def test_extra_specs_fails_with_bogus_ops(self):
self._do_extra_specs_ops_test(
value='4',
req='> 2',
matches=False)
def test_extra_specs_matches_with_op_eq(self):
self._do_extra_specs_ops_test(
value='123',
req='= 123',
matches=True)
def test_extra_specs_matches_with_op_eq2(self):
self._do_extra_specs_ops_test(
value='124',
req='= 123',
matches=True)
def test_extra_specs_fails_with_op_eq(self):
self._do_extra_specs_ops_test(
value='34',
req='= 234',
matches=False)
def test_extra_specs_fails_with_op_eq3(self):
self._do_extra_specs_ops_test(
value='34',
req='=',
matches=False)
def test_extra_specs_matches_with_op_seq(self):
self._do_extra_specs_ops_test(
value='123',
req='s== 123',
matches=True)
def test_extra_specs_fails_with_op_seq(self):
self._do_extra_specs_ops_test(
value='1234',
req='s== 123',
matches=False)
def test_extra_specs_matches_with_op_sneq(self):
self._do_extra_specs_ops_test(
value='1234',
req='s!= 123',
matches=True)
def test_extra_specs_fails_with_op_sneq(self):
self._do_extra_specs_ops_test(
value='123',
req='s!= 123',
matches=False)
def test_extra_specs_fails_with_op_sge(self):
self._do_extra_specs_ops_test(
value='1000',
req='s>= 234',
matches=False)
def test_extra_specs_fails_with_op_sle(self):
self._do_extra_specs_ops_test(
value='1234',
req='s<= 1000',
matches=False)
def test_extra_specs_fails_with_op_sl(self):
self._do_extra_specs_ops_test(
value='2',
req='s< 12',
matches=False)
def test_extra_specs_fails_with_op_sg(self):
self._do_extra_specs_ops_test(
value='12',
req='s> 2',
matches=False)
def test_extra_specs_matches_with_op_in(self):
self._do_extra_specs_ops_test(
value='12311321',
req='<in> 11',
matches=True)
def test_extra_specs_matches_with_op_in2(self):
self._do_extra_specs_ops_test(
value='12311321',
req='<in> 12311321',
matches=True)
def test_extra_specs_matches_with_op_in3(self):
self._do_extra_specs_ops_test(
value='12311321',
req='<in> 12311321 <in>',
matches=True)
def test_extra_specs_fails_with_op_in(self):
self._do_extra_specs_ops_test(
value='12310321',
req='<in> 11',
matches=False)
def test_extra_specs_fails_with_op_in2(self):
self._do_extra_specs_ops_test(
value='12310321',
req='<in> 11 <in>',
matches=False)
def test_extra_specs_matches_with_op_or(self):
self._do_extra_specs_ops_test(
value='12',
req='<or> 11 <or> 12',
matches=True)
def test_extra_specs_matches_with_op_or2(self):
self._do_extra_specs_ops_test(
value='12',
req='<or> 11 <or> 12 <or>',
matches=True)
def test_extra_specs_fails_with_op_or(self):
self._do_extra_specs_ops_test(
value='13',
req='<or> 11 <or> 12',
matches=False)
def test_extra_specs_fails_with_op_or2(self):
self._do_extra_specs_ops_test(
value='13',
req='<or> 11 <or> 12 <or>',
matches=False)
def test_extra_specs_matches_with_op_le(self):
self._do_extra_specs_ops_test(
value='2',
req='<= 10',
matches=True)
def test_extra_specs_fails_with_op_le(self):
self._do_extra_specs_ops_test(
value='3',
req='<= 2',
matches=False)
def test_extra_specs_matches_with_op_ge(self):
self._do_extra_specs_ops_test(
value='3',
req='>= 1',
matches=True)
def test_extra_specs_fails_with_op_ge(self):
self._do_extra_specs_ops_test(
value='2',
req='>= 3',
matches=False)
class HostFiltersTestCase(test.TestCase):
"""Test case for host filters."""
def setUp(self):
super(HostFiltersTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
stub_out_https_backend(self.stubs)
self.context = context.RequestContext('fake', 'fake')
self.json_query = jsonutils.dumps(
['and', ['>=', '$free_ram_mb', 1024],
['>=', '$free_disk_mb', 200 * 1024]])
filter_handler = filters.HostFilterHandler()
classes = filter_handler.get_matching_classes(
['nova.scheduler.filters.all_filters'])
self.class_map = {}
for cls in classes:
self.class_map[cls.__name__] = cls
def test_standard_filters_is_deprecated(self):
info = {'called': False}
def _fake_deprecated(*args, **kwargs):
info['called'] = True
self.stubs.Set(filters.LOG, 'deprecated', _fake_deprecated)
filter_handler = filters.HostFilterHandler()
filter_handler.get_matching_classes(
['nova.scheduler.filters.standard_filters'])
self.assertTrue(info['called'])
self.assertIn('AllHostsFilter', self.class_map)
self.assertIn('ComputeFilter', self.class_map)
def test_all_filters(self):
# Double check at least a couple of known filters exist
self.assertIn('AllHostsFilter', self.class_map)
self.assertIn('ComputeFilter', self.class_map)
def test_all_host_filter(self):
filt_cls = self.class_map['AllHostsFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, {}))
def _stub_service_is_up(self, ret_value):
def fake_service_is_up(self, service):
return ret_value
self.stubs.Set(servicegroup.API, 'service_is_up', fake_service_is_up)
def test_affinity_different_filter_passes(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [instance_uuid], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_no_list_passes(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': instance_uuid}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_fails(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [instance_uuid], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_handles_none(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_no_list_passes(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': instance_uuid}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_passes(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [instance_uuid], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_fails(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [instance_uuid], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_handles_none(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_passes(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
host.capabilities = {'host_ip': '10.8.1.1'}
affinity_ip = "10.8.1.100"
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'cidr': '/24',
'build_near_host_ip': affinity_ip}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_fails(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
host.capabilities = {'host_ip': '10.8.1.1'}
affinity_ip = "10.8.1.100"
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'cidr': '/32',
'build_near_host_ip': affinity_ip}}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_handles_none(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
affinity_ip = CONF.my_ip.split('.')[0:3]
affinity_ip.append('100')
affinity_ip = str.join('.', affinity_ip)
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_type_filter(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['TypeAffinityFilter']()
filter_properties = {'context': self.context,
'instance_type': {'id': 1}}
filter2_properties = {'context': self.context,
'instance_type': {'id': 2}}
capabilities = {'enabled': True}
service = {'disabled': False}
host = fakes.FakeHostState('fake_host', 'fake_node',
{'capabilities': capabilities,
'service': service})
#True since empty
self.assertTrue(filt_cls.host_passes(host, filter_properties))
fakes.FakeInstance(context=self.context,
params={'host': 'fake_host', 'instance_type_id': 1})
#True since same type
self.assertTrue(filt_cls.host_passes(host, filter_properties))
#False since different type
self.assertFalse(filt_cls.host_passes(host, filter2_properties))
#False since node not homogeneous
fakes.FakeInstance(context=self.context,
params={'host': 'fake_host', 'instance_type_id': 2})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_aggregate_type_filter(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateTypeAffinityFilter']()
filter_properties = {'context': self.context,
'instance_type': {'name': 'fake1'}}
filter2_properties = {'context': self.context,
'instance_type': {'name': 'fake2'}}
capabilities = {'enabled': True}
service = {'disabled': False}
host = fakes.FakeHostState('fake_host', 'fake_node',
{'capabilities': capabilities,
'service': service})
#True since no aggregates
self.assertTrue(filt_cls.host_passes(host, filter_properties))
#True since type matches aggregate, metadata
self._create_aggregate_with_host(name='fake_aggregate',
hosts=['fake_host'], metadata={'instance_type': 'fake1'})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
#False since type matches aggregate, metadata
self.assertFalse(filt_cls.host_passes(host, filter2_properties))
def test_ram_filter_fails_on_memory(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['RamFilter']()
self.flags(ram_allocation_ratio=1.0)
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_ram_filter_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['RamFilter']()
self.flags(ram_allocation_ratio=1.0)
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'total_usable_ram_mb': 1024,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_ram_filter_oversubscribe(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['RamFilter']()
self.flags(ram_allocation_ratio=2.0)
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': -1024, 'total_usable_ram_mb': 2048,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(2048 * 2.0, host.limits['memory_mb'])
def test_disk_filter_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['DiskFilter']()
self.flags(disk_allocation_ratio=1.0)
filter_properties = {'instance_type': {'root_gb': 1,
'ephemeral_gb': 1}}
capabilities = {'enabled': True}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_disk_filter_fails(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['DiskFilter']()
self.flags(disk_allocation_ratio=1.0)
filter_properties = {'instance_type': {'root_gb': 11,
'ephemeral_gb': 1}}
capabilities = {'enabled': True}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_disk_filter_oversubscribe(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['DiskFilter']()
self.flags(disk_allocation_ratio=10.0)
filter_properties = {'instance_type': {'root_gb': 100,
'ephemeral_gb': 19}}
capabilities = {'enabled': True}
service = {'disabled': False}
# 1GB used... so 119GB allowed...
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(12 * 10.0, host.limits['disk_gb'])
def test_disk_filter_oversubscribe_fail(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['DiskFilter']()
self.flags(disk_allocation_ratio=10.0)
filter_properties = {'instance_type': {'root_gb': 100,
'ephemeral_gb': 20}}
capabilities = {'enabled': True}
service = {'disabled': False}
# 1GB used... so 119GB allowed...
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_on_service_disabled(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': True}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_on_service_down(self):
self._stub_service_is_up(False)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_on_capability_disabled(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': False}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_passes_same_inst_props(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'_architecture': 'x86_64',
'hypervisor_type': 'kvm',
'vm_mode': 'hvm'}}
filter_properties = {'request_spec': {'image': img_props}}
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_fails_different_inst_props(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'architecture': 'arm',
'hypervisor_type': 'qemu',
'vm_mode': 'hvm'}}
filter_properties = {'request_spec': {'image': img_props}}
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_passes_partial_inst_props(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'architecture': 'x86_64',
'vm_mode': 'hvm'}}
filter_properties = {'request_spec': {'image': img_props}}
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_fails_partial_inst_props(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'architecture': 'x86_64',
'vm_mode': 'hvm'}}
filter_properties = {'request_spec': {'image': img_props}}
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'xen', 'xen')]}
host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_passes_without_inst_props(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
filter_properties = {'request_spec': {}}
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_fails_without_host_props(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'architecture': 'x86_64',
'hypervisor_type': 'kvm',
'vm_mode': 'hvm'}}
filter_properties = {'request_spec': {'image': img_props}}
capabilities = {'enabled': True}
host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def _do_test_compute_filter_extra_specs(self, ecaps, especs, passes):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeCapabilitiesFilter']()
capabilities = {'enabled': True}
capabilities.update(ecaps)
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': especs}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
assertion = self.assertTrue if passes else self.assertFalse
assertion(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes_extra_specs_simple(self):
self._do_test_compute_filter_extra_specs(
ecaps={'opt1': '1', 'opt2': '2'},
especs={'opt1': '1', 'opt2': '2', 'trust:trusted_host': 'true'},
passes=True)
def test_compute_filter_fails_extra_specs_simple(self):
self._do_test_compute_filter_extra_specs(
ecaps={'opt1': '1', 'opt2': '2'},
especs={'opt1': '1', 'opt2': '222', 'trust:trusted_host': 'true'},
passes=False)
def test_compute_filter_pass_extra_specs_simple_with_scope(self):
self._do_test_compute_filter_extra_specs(
ecaps={'opt1': '1', 'opt2': '2'},
especs={'capabilities:opt1': '1',
'trust:trusted_host': 'true'},
passes=True)
def test_compute_filter_extra_specs_simple_with_wrong_scope(self):
self._do_test_compute_filter_extra_specs(
ecaps={'opt1': '1', 'opt2': '2'},
especs={'wrong_scope:opt1': '1',
'trust:trusted_host': 'true'},
passes=True)
def test_compute_filter_extra_specs_pass_multi_level_with_scope(self):
self._do_test_compute_filter_extra_specs(
ecaps={'opt1': {'a': '1', 'b': {'aa': '2'}}, 'opt2': '2'},
especs={'opt1:a': '1', 'capabilities:opt1:b:aa': '2',
'trust:trusted_host': 'true'},
passes=True)
def test_aggregate_filter_passes_no_extra_specs(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateInstanceExtraSpecsFilter']()
capabilities = {'enabled': True, 'opt1': 1, 'opt2': 2}
filter_properties = {'context': self.context, 'instance_type':
{'memory_mb': 1024}}
host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def _create_aggregate_with_host(self, name='fake_aggregate',
metadata=None,
hosts=['host1']):
values = {'name': name,
'availability_zone': 'fake_avail_zone', }
result = db.aggregate_create(self.context.elevated(), values, metadata)
for host in hosts:
db.aggregate_host_add(self.context.elevated(), result['id'], host)
return result
def _do_test_aggregate_filter_extra_specs(self, emeta, especs, passes):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateInstanceExtraSpecsFilter']()
self._create_aggregate_with_host(name='fake2', metadata=emeta)
filter_properties = {'context': self.context,
'instance_type': {'memory_mb': 1024, 'extra_specs': especs}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024})
assertion = self.assertTrue if passes else self.assertFalse
assertion(filt_cls.host_passes(host, filter_properties))
def test_aggregate_filter_fails_extra_specs_deleted_host(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateInstanceExtraSpecsFilter']()
extra_specs = {'opt1': 's== 1', 'opt2': 's== 2',
'trust:trusted_host': 'true'}
self._create_aggregate_with_host(metadata={'opt1': '1'})
agg2 = self._create_aggregate_with_host(name='fake2',
metadata={'opt2': '2'})
filter_properties = {'context': self.context, 'instance_type':
{'memory_mb': 1024, 'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024})
db.aggregate_host_delete(self.context.elevated(), agg2['id'], 'host1')
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_aggregate_filter_passes_extra_specs_simple(self):
self._do_test_aggregate_filter_extra_specs(
emeta={'opt1': '1', 'opt2': '2'},
especs={'opt1': '1', 'opt2': '2',
'trust:trusted_host': 'true'},
passes=True)
def test_aggregate_filter_fails_extra_specs_simple(self):
self._do_test_aggregate_filter_extra_specs(
emeta={'opt1': '1', 'opt2': '2'},
especs={'opt1': '1', 'opt2': '222',
'trust:trusted_host': 'true'},
passes=False)
def test_isolated_hosts_fails_isolated_on_non_isolated(self):
self.flags(isolated_images=['isolated'], isolated_hosts=['isolated'])
filt_cls = self.class_map['IsolatedHostsFilter']()
filter_properties = {
'request_spec': {
'instance_properties': {'image_ref': 'isolated'}
}
}
host = fakes.FakeHostState('non-isolated', 'node', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_fails_non_isolated_on_isolated(self):
self.flags(isolated_images=['isolated'], isolated_hosts=['isolated'])
filt_cls = self.class_map['IsolatedHostsFilter']()
filter_properties = {
'request_spec': {
'instance_properties': {'image_ref': 'non-isolated'}
}
}
host = fakes.FakeHostState('isolated', 'node', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_passes_isolated_on_isolated(self):
self.flags(isolated_images=['isolated'], isolated_hosts=['isolated'])
filt_cls = self.class_map['IsolatedHostsFilter']()
filter_properties = {
'request_spec': {
'instance_properties': {'image_ref': 'isolated'}
}
}
host = fakes.FakeHostState('isolated', 'node', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_passes_non_isolated_on_non_isolated(self):
self.flags(isolated_images=['isolated'], isolated_hosts=['isolated'])
filt_cls = self.class_map['IsolatedHostsFilter']()
filter_properties = {
'request_spec': {
'instance_properties': {'image_ref': 'non-isolated'}
}
}
host = fakes.FakeHostState('non-isolated', 'node', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_passes(self):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_passes_with_no_query(self):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0}}
capabilities = {'enabled': True}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 0,
'free_disk_mb': 0,
'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_memory(self):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1023,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_disk(self):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': (200 * 1024) - 1,
'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_caps_disabled(self):
filt_cls = self.class_map['JsonFilter']()
json_query = jsonutils.dumps(
['and', ['>=', '$free_ram_mb', 1024],
['>=', '$free_disk_mb', 200 * 1024],
'$capabilities.enabled'])
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': json_query}}
capabilities = {'enabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_service_disabled(self):
filt_cls = self.class_map['JsonFilter']()
json_query = jsonutils.dumps(
['and', ['>=', '$free_ram_mb', 1024],
['>=', '$free_disk_mb', 200 * 1024],
['not', '$service.disabled']])
filter_properties = {'instance_type': {'memory_mb': 1024,
'local_gb': 200},
'scheduler_hints': {'query': json_query}}
capabilities = {'enabled': True}
service = {'disabled': True}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_happy_day(self):
"""Test json filter more thoroughly"""
filt_cls = self.class_map['JsonFilter']()
raw = ['and',
'$capabilities.enabled',
['=', '$capabilities.opt1', 'match'],
['or',
['and',
['<', '$free_ram_mb', 30],
['<', '$free_disk_mb', 300]],
['and',
['>', '$free_ram_mb', 30],
['>', '$free_disk_mb', 300]]]]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
# Passes
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 10,
'free_disk_mb': 200,
'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
# Passes
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
# Fails due to capabilities being disabled
capabilities = {'enabled': False, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
# Fails due to being exact memory/disk we don't want
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 30,
'free_disk_mb': 300,
'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
# Fails due to memory lower but disk higher
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
# Fails due to capabilities 'opt1' not equal
capabilities = {'enabled': True, 'opt1': 'no-match'}
service = {'enabled': True}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_basic_operators(self):
filt_cls = self.class_map['JsonFilter']()
host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
# (operator, arguments, expected_result)
ops_to_test = [
['=', [1, 1], True],
['=', [1, 2], False],
['<', [1, 2], True],
['<', [1, 1], False],
['<', [2, 1], False],
['>', [2, 1], True],
['>', [2, 2], False],
['>', [2, 3], False],
['<=', [1, 2], True],
['<=', [1, 1], True],
['<=', [2, 1], False],
['>=', [2, 1], True],
['>=', [2, 2], True],
['>=', [2, 3], False],
['in', [1, 1], True],
['in', [1, 1, 2, 3], True],
['in', [4, 1, 2, 3], False],
['not', [True], False],
['not', [False], True],
['or', [True, False], True],
['or', [False, False], False],
['and', [True, True], True],
['and', [False, False], False],
['and', [True, False], False],
# Nested ((True or False) and (2 > 1)) == Passes
['and', [['or', True, False], ['>', 2, 1]], True]]
for (op, args, expected) in ops_to_test:
raw = [op] + args
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertEqual(expected,
filt_cls.host_passes(host, filter_properties))
# This results in [False, True, False, True] and if any are True
# then it passes...
raw = ['not', True, False, True, False]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
# This results in [False, False, False] and if any are True
# then it passes...which this doesn't
raw = ['not', True, True, True]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_unknown_operator_raises(self):
filt_cls = self.class_map['JsonFilter']()
raw = ['!=', 1, 2]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
self.assertRaises(KeyError,
filt_cls.host_passes, host, filter_properties)
def test_json_filter_empty_filters_pass(self):
filt_cls = self.class_map['JsonFilter']()
host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
raw = []
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
raw = {}
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_invalid_num_arguments_fails(self):
filt_cls = self.class_map['JsonFilter']()
host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
raw = ['>', 1]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_unknown_variable_ignored(self):
filt_cls = self.class_map['JsonFilter']()
host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
raw = ['=', '$........', 1, 1]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
raw = ['=', '$foo', 2, 2]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_default_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_trusted_passes(self):
global DATA
DATA = '{"hosts":[{"host_name":"host1","trust_lvl":"trusted"}]}'
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'trusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_untrusted_fails(self):
global DATA
DATA = '{"hosts":[{"host_name":"host1","trust_lvl":"untrusted"}]}'
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'trusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_trusted_fails(self):
global DATA
DATA = '{"hosts":[{"host_name":"host1","trust_lvl":"trusted"}]}'
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'untrusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_untrusted_passes(self):
global DATA
DATA = '{"hosts":[{"host_name":"host1","trust_lvl":"untrusted"}]}'
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'untrusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_passes(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 7})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails_safe(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 8})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@staticmethod
def _make_zone_request(zone, is_admin=False):
ctxt = context.RequestContext('fake', 'fake', is_admin=is_admin)
return {
'context': ctxt,
'request_spec': {
'instance_properties': {
'availability_zone': zone
}
}
}
def test_availability_zone_filter_same(self):
filt_cls = self.class_map['AvailabilityZoneFilter']()
service = {'availability_zone': 'nova'}
request = self._make_zone_request('nova')
host = fakes.FakeHostState('host1', 'node1',
{'service': service})
self.assertTrue(filt_cls.host_passes(host, request))
def test_availability_zone_filter_different(self):
filt_cls = self.class_map['AvailabilityZoneFilter']()
service = {'availability_zone': 'nova'}
request = self._make_zone_request('bad')
host = fakes.FakeHostState('host1', 'node1',
{'service': service})
self.assertFalse(filt_cls.host_passes(host, request))
def test_retry_filter_disabled(self):
"""Test case where retry/re-scheduling is disabled"""
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_retry_filter_pass(self):
"""Node not previously tried"""
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', 'nodeX', {})
retry = dict(num_attempts=2,
hosts=[('host1', 'node1'), # same host, different node
('host2', 'node2'), # different host and node
])
filter_properties = dict(retry=retry)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_retry_filter_fail(self):
"""Node was already tried"""
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
retry = dict(num_attempts=1,
hosts=[('host1', 'node1')])
filter_properties = dict(retry=retry)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_filter_num_iops_passes(self):
self.flags(max_io_ops_per_host=8)
filt_cls = self.class_map['IoOpsFilter']()
host = fakes.FakeHostState('host1', 'node1',
{'num_io_ops': 7})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_filter_num_iops_fails(self):
self.flags(max_io_ops_per_host=8)
filt_cls = self.class_map['IoOpsFilter']()
host = fakes.FakeHostState('host1', 'node1',
{'num_io_ops': 8})
def test_filter_num_instances_passes(self):
self.flags(max_instances_per_host=5)
filt_cls = self.class_map['NumInstancesFilter']()
host = fakes.FakeHostState('host1', 'node1',
{'num_instances': 4})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_filter_num_instances_fails(self):
self.flags(max_instances_per_host=5)
filt_cls = self.class_map['NumInstancesFilter']()
host = fakes.FakeHostState('host1', 'node1',
{'num_instances': 5})
filter_properties = {}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
|
""" Galois Field GF(2^m) and polynomials over it. """
#-------------------------------------------------------------------------------
from math import log
import numpy as np
from itertools import zip_longest
from io import StringIO
import locale
#-------------------------------------------------------------------------------
class GF:
"GF(2^m): Galois Field of order 2^m."
# Default primitive polynomials for generating GF(2^m).
default_prim_poly = dict()
#A9876543210
default_prim_poly[2] = 0b00000000111 # X^2 + X + 1
default_prim_poly[3] = 0b00000001011 # X^3 + X + 1
default_prim_poly[4] = 0b00000010011 # X^4 + X + 1
default_prim_poly[5] = 0b00000100101 # X^5 + X^2 + 1
default_prim_poly[6] = 0b00001000011 # X^6 + X + 1
default_prim_poly[7] = 0b00010001001 # X^7 + X^3 + 1
default_prim_poly[8] = 0b00100011101 # X^8 + X^4 + X^3 + X^2 + 1
default_prim_poly[9] = 0b01000010001 # X^9 + X^4 + 1
default_prim_poly[10] = 0b10000001001 # X^10 + X^3 + 1
def __init__(self, order, prim_poly=None):
"""
Create the Galois Field GF(2^m).
Parameters
----------
order : int
Order of the field. Must be a power of 2.
prim_poly : int, optional
Primitive polynomial for generating the field.
Default primitive polynomials for generating GF(2^m).
m | Default prim_poly
---+--------------------------------------------
2 | 0b00000000111 # X^2 + X + 1
3 | 0b00000001011 # X^3 + X + 1
4 | 0b00000010011 # X^4 + X + 1
5 | 0b00000100101 # X^5 + X^2 + 1
6 | 0b00001000011 # X^6 + X + 1
7 | 0b00010001001 # X^7 + X^3 + 1
8 | 0b00100011101 # X^8 + X^4 + X^3 + X^2 + 1
9 | 0b01000010001 # X^9 + X^4 + 1
10 | 0b10000001001 # X^10 + X^3 + 1
"""
m = int(log(order, 2))
if 2 ** m != order:
raise ValueError(
'order should be a power of 2. Given: {}.'.format(order))
self.order = order
self.m = m
if prim_poly is None:
try:
prim_poly = self.default_prim_poly[m]
except KeyError:
raise ValueError('No default prim_poly for m = {}.'.format(m))
if 3 < prim_poly < 2 * self.order:
self.prim_poly = prim_poly
else:
raise ValueError('prim_poly shoud be > 3 and < 2^(m+1)')
self.exptable = np.empty(self.order, dtype='int') # i -> e = alpha^i
self.logtable = np.empty(self.order, dtype='int') # e -> i, such that alpha^i = e
alpha_pow_i = 1
for i in range(self.order):
self.exptable[i] = alpha_pow_i
self.logtable[alpha_pow_i] = i
alpha_pow_i <<= 1 # equivalent to multiplication by alpha=2
if alpha_pow_i & 2**m:
alpha_pow_i ^= self.prim_poly
# TODO set logtable[0] to min_int or -1 or some 'safe' value like
# -minint/2? Or maybe, -2**m?
self.logtable[0] = np.iinfo(int).min
self.logtable[1] = 0
elements = [GFElement(value, self) for value in range(self.order)]
self.elements = np.array(elements)
def __repr__(self):
return 'GF(2^{.m})'.format(self)
def __call__(self, *coeffs, **kwargs):
"""
Return a new polynomial over the field with the given coeffecients.
The coeffecients are given as individual arguments.
"""
return GFPolynomial(coeffs, self, kwargs.get('degbound'))
def poly(self, coeffs, degbound=None):
"Return a new polynomial over the field with the given coeffecients and degree bound."
return GFPolynomial(coeffs, self, degbound)
def zero(self, degbound=1):
"Return a new zero polynomial with the given degree bound."
return GFPolynomial.zero(self, degbound)
def random(self, degbound=1):
"Return a new polynomial with random coeffecients and the given degree bound."
return GFPolynomial.random(self, degbound)
def elem(self, value):
#return GFElement(value, self)
return self.elements[value]
def add(self, x1, x2):
"Add (xor) arguments element-wise (similar to numpy.add)."
return np.bitwise_xor(x1, x2)
def sum(self, a):
"Sum (xor) array elements (similar to numpy.sum)."
return np.bitwise_xor.reduce(a)
def multiply(self, x1, x2):
"Multiply arguments element-wise (similar to numpy.multiply)."
nonzeros = np.logical_and(x1, x2)
if not np.any(nonzeros):
return nonzeros.astype('int')
logx1 = self.logtable[x1]
logx2 = self.logtable[x2]
#logy = logx1 + logx2
logy = logx1
logy += logx2
logy %= self.order - 1
y = self.exptable[logy]
y *= nonzeros # NOTE: This relies on True = 1 and False = 0 !!!
return y
def multiply_ss(self, s1, s2):
"Multiply two scalar arguments s1 and s2."
if s1 == 0 or s2 == 0:
return 0
x1 = self.logtable[s1]
x2 = self.logtable[s2]
y = (x1 + x2) % (self.order - 1)
return self.exptable[y]
def multiply_as(self, a, s):
"Multiply array argument a to scalar argument s."
zeros_a = a == 0
if s == 0 or all(zeros_a):
return np.zeros_like(a, dtype='int')
xa = self.logtable[a]
xs = self.logtable[s]
ya = xa + xs
ya %= self.order - 1
mul_a = self.exptable[ya]
mul_a[zeros_a] = 0
return mul_a
def pow(self, a, b):
if a == 0:
return 0
if b == 0:
return 1
x = self.logtable[a]
z = (x * b) % (self.order - 1)
return self.exptable[z]
def inverse(self, a):
if a == 0:
raise ZeroDivisionError('division by zero')
x = self.logtable[a]
z = self.order - 1 - x
return self.exptable[z]
def div(self, a, b):
return self.multiply_ss(a, self.inverse(b))
class GFPolynomial:
"Polynomials over Galois Fields GF(2^m)"
def __init__(self, coeffs, field, degbound=None):
"""
Create a polynomial with coeffecients over the given field.
p(x) = c_0 + c_1 x + c_2 x^2 + ...
Parameters
----------
coeffs : array_like
Coefficients of the polynomial using the straightforward index
notaion, i.e., coeffs[i] = c_i = coeffecient corresponding to x^i.
field : instance of Galois Field GF(2^m)
degbound : int, optional
Degree bound for the polynomial. An array of this is created
internally to hold the polynomial coefficients, with the higher
terms set to 0. By default, the parameter 'coeffs' is directly
used.
"""
self.field = field
if degbound is None:
if len(coeffs) == 0:
self.coeffs = np.zeros(1, dtype='int')
else:
self.coeffs = np.asarray(coeffs, dtype='int')
elif degbound > 0:
self.coeffs = np.zeros(degbound, dtype='int')
ncoeffs = min(len(coeffs), degbound)
self.coeffs[:ncoeffs] = coeffs[:ncoeffs]
else:
raise ValueError('degree bound should be > 0.')
if np.any(self.coeffs < 0) or np.any(self.coeffs >= field.order):
raise ValueError('Elements of {} must be in range [0, {}].'.format(
field, field.order - 1))
self._update_deg()
@classmethod
def zero(cls, field, degbound=1):
"Return a new zero polynomial with the given degree bound."
if degbound <= 0:
raise ValueError('degree bound should be > 0.')
poly = cls.__new__(cls)
poly.field = field
poly.coeffs = np.zeros(degbound, dtype='int')
poly.deg = -1
return poly
@classmethod
def random(cls, field, degbound=1):
"Return a new polynomial with random coeffecients and the given degree bound."
coeffs = np.random.randint(field.order, size=degbound)
return cls(coeffs, field)
def _update_deg(self):
for i in reversed(range(len(self.coeffs))):
if self.coeffs[i] != 0:
self.deg = i
return
self.deg = -1
def pretty_print(self, x='x'):
"""Return the polynomial as a pretty printed string, formatted as sums
of nonzero coeffecients multiplied by appropriate powers of x. The
indeterminate x can be changed to something else via the argument x.
"""
field = self.field
if locale.getdefaultlocale()[1].lower() == 'utf-8':
alpha = '\N{GREEK SMALL LETTER ALPHA}'
else:
alpha = 'a'
def xi(i):
if i == 0: return ''
if i == 1: return x
return '{}^{}'.format(x, i)
def coeff(i, c):
if i == 0 and c == 1:
return '1'
if c == 1:
return ''
p = field.logtable[c]
if p == 1:
return '{}'.format(alpha)
return '{}^{}'.format(alpha, p)
def term(i, c):
if i == 0:
return coeff(i, c)
elif c == 1 or c == '':
return xi(i)
else:
return coeff(i, c) + ' ' + xi(i)
if self == 0:
return '0'
else:
return ' + '.join(term(i, c) for i, c in enumerate(self) if c != 0)
@property
def p(self):
"User convenience property for pretty printing."
return self.pretty_print()
def __repr__(self):
return 'GFPolynomial({}, {})'.format(np.array_repr(self.coeffs),
self.field)
def __str__(self):
return self.pretty_print() + ' ' + str(self.field)
def copy(self):
"Return a copy of the polynomial."
return GFPolynomial(self.coeffs.copy(), self.field)
def __len__(self):
"Return the degree bound of the polynomial."
return len(self.coeffs)
def __getitem__(self, key):
"Return coeffecients indexed/sliced by key, i.e., coeffs[key]."
#TODO: is it better to return a GFPolynomial instance?
return self.coeffs[key]
#try:
# return self.coeffs[key]
#except IndexError:
# return 0
def __eq__(self, other):
"Return True if polynomials have same degree and same coeffecients."
try:
return np.all(self.coeffs[:self.deg+1] == other.coeffs[:other.deg+1])
except AttributeError:
pass
return np.all(self.coeffs[:self.deg+1] == other)
def __add__(self, other):
"Return the sum of the two polynomials."
if isinstance(other, (int, np.integer)):
sum_coeffs = self.coeffs.copy()
sum_coeffs[0] ^= other
return GFPolynomial(sum_coeffs, self.field)
try:
if self.deg >= other.deg:
sum_coeffs = self.coeffs.copy()
sum_coeffs[:other.deg+1] ^= other.coeffs[:other.deg+1]
else:
sum_coeffs = other.coeffs.copy()
sum_coeffs[:self.deg+1] ^= self.coeffs[:self.deg+1]
except:
return NotImplemented
return GFPolynomial(sum_coeffs, self.field)
__radd__ = __add__
def __iadd__(self, other):
"In-place add the RHS polynomial to the LHS polynomial."
if isinstance(other, (int, np.integer)):
other = GFPolynomial([other], self.field)
if len(self) > other.deg:
self.coeffs[:other.deg+1] ^= other.coeffs[:other.deg+1]
else:
raise ValueError(
'length of LHS ({}) is too small for in-place addition. '
'Must be > {}.'.format(len(self), other.deg))
self._update_deg()
return self
__sub__ = __add__
__rsub__ = __add__
__isub__ = __iadd__
def __pos__(self):
"Return +self"
return self
def __neg__(self):
"Return -self"
return self
def __mul__(self, other):
"Return the product of the two polynomials."
if self == 0 or other == 0:
return GFPolynomial([0], self.field)
if isinstance(other, (int, np.integer)):
#mul_coeffs = self.field.multiply(self.coeffs, other)
mul_coeffs = self.field.multiply_as(self.coeffs, other)
return GFPolynomial(mul_coeffs, self.field)
# Speed optimization
if self.deg > other.deg:
p, q = self, other
else:
p, q = other, self
pq_coeffs = np.zeros(p.deg + q.deg + 1, dtype='int')
for i, qi in enumerate(q.coeffs[:q.deg+1]):
pq_coeffs[i:i+p.deg+1] ^= self.field.multiply_as(p.coeffs[:p.deg+1], qi)
return GFPolynomial(pq_coeffs, self.field)
def _mulpy(self, other):
if isinstance(other, (int, np.integer)):
other = GFPolynomial([other], self.field)
coeffs = [0] * (self.deg + other.deg + 1)
for i, pi in enumerate(self.coeffs[:self.deg+1]):
for j, qj in enumerate(other.coeffs[:other.deg+1]):
coeffs[i + j] ^= self.field.mul(pi, qj)
return self.field.poly(coeffs)
__rmul__ = __mul__
def __pow__(self, exponent):
"Return the polynomial raised to the exponent."
if not isinstance(exponent, (int, np.integer)):
raise TypeError('unsupported exponent type for ** or pow(): {}.'
' Must be int.'.format(type(exponent)))
if exponent == 0:
return GFPolynomial([1], self.field)
if self.deg == 0:
coeffs = [self.field.pow(self.coeffs[0], exponent)]
return GFPolynomial(coeffs, self.field)
if exponent < 0:
raise ValueError(
'non-constant polynomial can\'t be raised to a -ve exponent.')
return (self ** (exponent - 1)) * self
def __divmod__(self, other):
"Return the quotient and remainder when 1st polynomial is divided by 2nd."
if other == 0:
raise ZeroDivisionError('division by zero')
if self == 0:
return GFPolynomial([0], self.field), GFPolynomial([0], self.field)
if isinstance(other, (int, np.integer)):
other = GFPolynomial([other], self.field)
num = self.coeffs[:self.deg+1] # Numerator
den = other.coeffs[:other.deg+1] # Denominator
num_deg = self.deg
den_deg = other.deg
dlc = den[den_deg] # Denominator leading coeff
coeffs = num.copy() # Result goes here
for i in range(num_deg, den_deg - 1, -1):
qcoeff = self.field.div(coeffs[i], dlc)
if qcoeff != 0:
#coeffs[i - den_deg:i] ^= self.field.multiply(den[:den_deg], qcoeff)
coeffs[i - den_deg:i] ^= self.field.multiply_as(den[:den_deg], qcoeff)
coeffs[i] = qcoeff
q = coeffs[den_deg:] # Quotient
r = coeffs[:den_deg] # Remainder
return GFPolynomial(q, self.field), GFPolynomial(r, self.field)
def __floordiv__(self, other):
"Return the quotient when 1st polynomial is divided by 2nd."
return divmod(self, other)[0]
#__truediv__ = __floordiv__
def __mod__(self, other):
"Return the remainder when 1st polynomial is divided by 2nd."
return divmod(self, other)[1]
def __call__(self, x):
"""Evaluate the polynomial at x, element-wise.
"""
# Horner's method
rev_coeffs = reversed(self.coeffs)
y = 0
for coeff in rev_coeffs:
y = self.field.multiply(y, x) ^ coeff
return y
def roots(self):
"""Compute the roots of the polynomial.
NOTE: multiplicity is not handled.
"""
vals = self(range(self.field.order))
return np.where(vals == 0)[0]
def deriv(self):
"""Compute the formal derivative of the polynomial.
Given the polynomial
p(x) = p_0 + p_1 x + p_2 x^2 + p_3 x^3 + ... + p_n x^n
it's formal derivative is defined as
p'(x) = p_1 + 2p_2 x + 2p_3 x^2 + ... + np_n x^(n-1)
Here, jp_j = p_j + p_j + ... + p_j (j times)
= 0 if j is even else p_j
= p_j * (j mod 2),
where 2 is the characteristic of the field GF(2^m).
"""
coeffs = [c * (i % 2) for i, c in enumerate(self)]
return GFPolynomial(coeffs[1:], self.field)
class GFElement:
def __init__(self, value, field):
#if value < 0 or value >= field.order:
# raise ValueError('Elements of {} must be in range [0, {}].'.format(
# field, field.order - 1))
self.value = value
self.field = field
def __add__(self, other):
value = self.value ^ other.value
#return GFElement(value, self.field)
return self.field.elements[value]
__radd__ = __add__
__sub__ = __add__
__rsub__ = __add__
def __pos__(self):
return self
def __neg__(self):
return self
def __mul__(self, other):
value = self.field.multiply_ss(self.value, other.value)
#return GFElement(value, self.field)
return self.field.elements[value]
def __pow__(self, exponent):
if not isinstance(exponent, (int, np.integer)):
raise TypeError('unsupported exponent type for ** or pow(): {}.'
' Must be int.'.format(type(exponent)))
value = self.field.pow(self.value, exponent)
#return GFElement(value, self.field)
return self.field.elements[value]
def inverse(self):
value = self.field.inverse(self.value)
#return GFElement(value, self.field)
return self.field.elements[value]
def __floordiv__(self, other):
return self * other.inverse()
__truediv__ = __floordiv__
def __repr__(self):
return 'GFElement({0.value}, {0.field})'.format(self)
class GFArray:
"Array of elements from Galois Field GF(2^m)"
def __init__(self, data, field):
"""
Create an array with elements from the given field.
Parameters
----------
data : array_like of ints
field : instance of Galois Field GF(2^m)
"""
if isinstance(data, GFArray):
self.array = data.array[:]
else:
self.array = np.asarray(data, dtype='int')
self.field = field
if np.any(self.array < 0) or np.any(self.array >= field.order):
raise ValueError('Elements of {} must be in range [0, {}].'.format(
field, field.order - 1))
@classmethod
def zeros(cls, shape, field):
"Return a new GFArray of given shape, filled with zeros."
array = np.zeros(shape, dtype='int')
return GFArray(array, field)
def __repr__(self):
return 'GFArray({}, {})'.format(
np.array2string(self.array, prefix='GFArray '), self.field)
@property
def shape(self):
return self.array.shape
@property
def ndim(self):
return self.array.ndim
def _check_operand_fields(self, other):
if self.field != other.field:
raise TypeError(
'operands have different fields: {} and {}'.format(
self.field, other.field))
def __getitem__(self, key):
return GFArray(self.array[key], self.field)
def __eq__(self, other):
return (self.field == other.field) and np.all(self.array == other.array)
def __add__(self, other):
self._check_operand_fields(other)
return GFArray(self.array ^ other.array, self.field)
__radd__ = __add__
def __iadd__(self, other):
self._check_operand_fields(other)
self.array ^= other.array
return self
__sub__ = __add__
__rsub__ = __add__
__isub__ = __iadd__
def __pos__(self):
"Return +self"
return self
def __neg__(self):
"Return -self"
return self
def __mul__(self, other):
if isinstance(other, (int, np.integer)):
_other = GFArray([other], self.field)
else:
_other = other
y = self.field.multiply(self.array, _other.array)
return GFArray(y, self.field)
__rmul__ = __mul__
#__matmul__ = dot
def sum(self):
return GFArray(self.field.sum(self.array), self.field)
def dot(self, other):
field = self.field
#other = GFArray(other, field)
if self.ndim == 0 or other.ndim == 0:
return self * other
if self.ndim == 1 and other.ndim == 1:
return (self * other).sum()
if self.shape[self.ndim - 1] != other.shape[self.ndim - 2]:
raise ValueError('shapes {} and {} not aligned'.format(
self.shape, other.shape))
if self.ndim == 1 and other.ndim > 1:
ncols = other.shape[1]
y = np.zeros(ncols, dtype='int')
for c in range(ncols):
y[c] = field.sum(field.multiply(self.array[:], other.array[:, c]))
elif self.ndim > 1 and other.ndim == 1:
nrows = self.shape[0]
y = np.zeros(nrows, dtype='int')
for r in range(nrows):
y[r] = field.sum(field.multiply(self.array[r,:], other.array[:]))
else:
nrows = self.shape[0]
ncols = other.shape[1]
y = np.zeros((nrows, ncols), dtype='int')
for r in range(nrows):
for c in range(ncols):
y[r, c] = field.sum(field.multiply(self.array[r,:], other.array[:,c]))
return GFArray(y, field)
#class GFAry(np.ndarray):
# def __new__(cls, input_array, field=None):
# obj = np.asarray(input_array).view(cls)
# obj.field = field
# return obj
# #def __array_finalize__(self, obj):
# #if obj is None: return
# #self.field = getattr(obj, 'field', None)
# def __array_finalize__(self, obj):
# #print('In __array_finalize__:')
# #print(' self is %s' % repr(self))
# #print(' obj is %s' % repr(obj))
# if obj is None: return
# self.info = getattr(obj, 'info', None)
# def __array_wrap__(self, out_arr, context=None):
# print('In __array_wrap__:')
# print(' self is %s' % repr(self))
# print(' arr is %s' % repr(out_arr))
# # then just call the parent
# return np.ndarray.__array_wrap__(self, out_arr, context)
|
|
#! /usr/bin/env python
#-----------------------------------------------------------------------
# COPYRIGHT_BEGIN
# Copyright (C) 2016-2017, FixFlyer, LLC.
# All rights reserved.
# COPYRIGHT_END
#-----------------------------------------------------------------------
"""Logging support."""
import os
import time
import psutil
# Emergency message level.
EMERG = 0
# Alert message level.
ALERT = 1
# Critical message level.
CRIT = 2
# Error message level.
ERR = 3
# Warning message level.
WARNING = 4
# Notice message level.
NOTICE = 5
# Informational message level.
INFO = 6
# Debugging message level.
DEBUG = 7
# Level names.
NAMES = ["EMERG", "ALERT", "CRIT", "ERR", "WARNING", "NOTICE", "INFO", "DEBUG"]
# Level numbers.
LEVELS = {"EMERG": EMERG,
"ALERT": ALERT,
"CRIT": CRIT,
"ERR": ERR,
"WARNING": WARNING,
"NOTICE": NOTICE,
"INFO": INFO,
"DEBUG": DEBUG}
def get_user_name():
user = os.environ.get('USER', os.environ.get('LOGNAME'))
if user:
return user
if os.name == "posix":
import pwd
user = pwd.getpwuid(os.geteuid()).pw_name
if user:
return user
return "unknown"
class Logger(object):
"""Logger interface.
The API library logs informational messages describing its
internal state and events. By default, these messages are logged
to a file in the host system's temporary directory.
It's often desirable to redirect this logging to whatever facility
is used by the client application. The library's logging can be
redirected by using an adaptor that implements this interface.
"""
@staticmethod
def string_to_level(level):
"""Convert a string level name to its integer value.
@param[in] level
A level name, matching the constants defined above (EMERG, etc).
@retval @c None
No level name matched @p level.
@returns
Integer level value."""
return LEVELS.get(level)
@staticmethod
def level_to_string(level):
"""Convert an integer level into its string name.
@param[in] level
Integer level value.
@retval None
The supplied @p level value is invalid.
@returns
String name for the specified level."""
if level < 0 or level >= len(NAMES):
return None
return NAMES[level]
def create_log(self, log):
"""Create a new log instance.
@param[in] log
Name of log instance.
@retval 0
Successful."""
# pylint: disable=unused-argument,no-self-use
return # pragma: no cover
def set_log_level(self, name, level):
"""Set current minimum log level.
@param[in] name
Name of a log instance.
@param[in] level
Minimum log level of messages to be emitted.
@retval 0
Successful.
@retval ENOENT
@p log does not exist."""
# pylint: disable=unused-argument,no-self-use
return # pragma: no cover
def get_log_level(self, name):
"""Get the current minimum log level.
When logging a message, the caller specifies a level of
importance, in the range of zero (most important) to 7 (least
important). Messages whose level is greater than the value
returned from this function are not emitted.
@param[in] name
Name of a log instance.
@retval 0
Successful.
@retval ENOENT
@p log does not exist."""
# pylint: disable=unused-argument,no-self-use
return # pragma: no cover
def log(self, log, level, message):
"""Log a message.
When implementing a Logger, providing this function is
required.
@param[in] log
Name of a log instance.
@param[in] level
Level of importance of this message: 0 is most important,
7 is least important.
@param[in] message
The string to be logged.
@retval 0
Successful."""
# pylint: disable=unused-argument,no-self-use
return # pragma: no cover
def logf(self, log, level, template, *params):
"""Log a message, constructed printf()-style.
When implementing a Logger, providing this function is
optional. The interface class provides an implementation
which formats the string, and passes it to the single-string
variant of log().
@param[in] log
Name of a log instance.
@param[in] level
Level of importance of this message: 0 is most important, 7
is least important.
@param[in] template
printf()-style template string used to create the message.
@param[in] params
Parameters to be substituted into the @p format string.
@retval 0
Successful."""
return self.log(log, level, template % params)
class FileLog(object):
"""Simple, file-based, log implementation."""
def __init__(self, name, directory):
self._name = name
self._directory = directory
self._level = DEBUG
self._file = None
return
def __del__(self):
if self._file:
self.close()
return
def set_level(self, level):
"""Set current minimum log level.
@param[in] level
Minimum log level of messages to be emitted.
@retval 0
Successful.
@retval ENOENT
@p log does not exist."""
self._level = level
return
def get_level(self):
"""Get the current minimum log level.
When logging a message, the caller specifies a level of
importance, in the range of zero (most important) to 7 (least
important). Messages whose level is greater than the value
returned from this function are not emitted.
@retval 0
Successful.
@retval ENOENT
@p log does not exist."""
return self._level
def open(self):
"""Open file for this log instance."""
proc = psutil.Process(os.getpid()).name()
filename = "flyer-%s-%s-%s.log" % (get_user_name(), proc, self._name)
path = os.path.join(self._directory, filename)
self._file = open(path, "a")
self.raw_write("SYSTEM",
"Opened log %s for %s (pid %u)." % (self._name,
proc,
os.getpid()))
return
def close(self):
"""Close and clean up this log instance."""
if not self._file:
return
self.raw_write("SYSTEM", "Closing log")
self._file.close()
self._file = None
return
def log(self, level, message):
"""Log a pre-formatted message."""
if level > self._level:
return
return self.raw_write(NAMES[level], message)
def raw_write(self, level, message):
"""Write a log message to a file."""
if not self._file:
return
now = time.time()
timestamp = time.strftime("%Y-%m-%d %H:%M:%S.", time.localtime(now))
us = (now - int(now)) * 1000000
timestamp += "%06u" % us
self._file.write("%s %-7s %s\n" % (timestamp, level, message))
self._file.flush()
return
class FileLogger(Logger):
"""Default logging implementation."""
def __init__(self, directory):
"""Constructor."""
super(FileLogger, self).__init__()
self._directory = directory
self._logs = {}
return
def __del__(self):
"""Destructor."""
for name in self._logs:
log = self._logs[name]
log.close()
self._logs = None
return
def create_log(self, name):
"""Create a new log instance.
@param[in] name
Name of log instance.
@returns
None Successful."""
self._logs[name] = FileLog(name, self._directory)
return
def set_log_level(self, name, level):
"""Set current minimum log level.
@param[in] name
Name of a log instance.
@param[in] level
Minimum log level of messages to be emitted.
@retval 0
Successful.
@retval ENOENT
@p log does not exist."""
log = self._logs[name]
return log.set_level(level)
def get_log_level(self, name):
"""Get the current minimum log level.
When logging a message, the caller specifies a level of
importance, in the range of zero (most important) to 7 (least
important). Messages whose level is greater than the value
returned from this function are not emitted.
@param[in] name
Name of a log instance.
@retval 0
Successful.
@retval ENOENT
@p log does not exist."""
log = self._logs[name]
return log.get_level()
def log(self, name, level, message):
"""Log a message.
When implementing a Logger, providing this function is
required.
@param[in] name
Name of a log instance.
@param[in] level
Level of importance of this message: 0 is most important,
7 is least important.
@param[in] message
The string to be logged.
@retval 0
Successful."""
log = self._logs[name]
return log.log(level, message)
|
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
from ..autocorr import integrated_time
__all__ = ["Backend"]
class Backend(object):
"""The default backend that stores the data in memory as numpy arrays.
The backend can be subscripted to access the data.
Attributes:
acceptance: An array of ``nwalkers`` integer acceptance counts.
acceptance_fraction: An array of ``nwalkers`` acceptance fractions.
coords: An array of ``(niter, nwalkers, ndim)`` coordinates.
log_prior: An array of ``(niter, nwalkers)`` log prior evaluations.
log_likelihood: An array of ``(niter, nwalkers)`` log likelihood
evaluations.
log_probability: An array of ``(niter, nwalkers)`` log probability
evaluations.
"""
def __init__(self):
self._data = None
self.reset()
def __len__(self):
return self.niter
def reset(self):
"""Clear the chain and reset it to its default state."""
self.niter = 0
self.size = 0
self.nwalkers = None
self.dtype = None
del self._data
self._data = None
self._random_state = None
def check_dimensions(self, ensemble):
"""Check that an ensemble is consistent with the current chain.
Args:
ensemble (Ensemble): The ensemble to check.
Raises:
ValueError: If the dimension or data type of the ensemble is
inconsistent with the stored data.
"""
if self.nwalkers is None:
self.nwalkers = ensemble.nwalkers
if self.dtype is None:
self.dtype = ensemble.dtype
if self.nwalkers != ensemble.nwalkers:
raise ValueError("Dimension mismatch")
if self.dtype != ensemble.dtype:
raise ValueError("Data type mismatch")
def extend(self, n):
"""Extend the chain by a given number of steps.
Args:
n (int): The number of steps to extend the chain by.
"""
k = self.nwalkers
self.size = l = self.niter + n
if self._data is None:
self._data = np.empty((l, k), dtype=self.dtype)
self._acceptance = np.zeros(k, dtype=np.uint64)
else:
dl = l - self._data.shape[0]
if dl > 0:
self._data = np.concatenate((
self._data, np.empty((dl, k), dtype=self._data.dtype)
), axis=0)
def update(self, ensemble):
"""Append an ensemble to the chain.
Args:
ensemble (Ensemble): The ensemble to append.
"""
i = self.niter
if i >= self.size:
self.extend(i - self.size + 1)
for j, walker in enumerate(ensemble):
self._data[i, j] = walker.to_array()
self._acceptance += ensemble.acceptance
self._random_state = ensemble.random.get_state()
self.niter += 1
def __getitem__(self, name_and_index_or_slice):
if self.niter <= 0:
raise AttributeError("You need to run the chain first or store "
"the chain using the 'store' keyword "
"argument to Sampler.sample")
try:
name, index_or_slice = name_and_index_or_slice
except ValueError:
name = name_and_index_or_slice
index_or_slice = slice(None)
return self._data[name][:self.niter][index_or_slice]
def get_coords(self, **kwargs):
"""Get the stored chain of MCMC samples.
Args:
flat (Optional[bool]): Flatten the chain across the ensemble.
(default: ``False``)
thin (Optional[int]): Take only every ``thin`` steps from the
chain. (default: ``1``)
discard (Optional[int]): Discard the first ``discard`` steps in
the chain as burn-in. (default: ``0``)
Returns:
array[..., nwalkers, ndim]: The MCMC samples.
"""
return self.get_value("coords", **kwargs)
def get_log_prior(self, **kwargs):
"""Get the chain of log priors evaluated at the MCMC samples.
Args:
flat (Optional[bool]): Flatten the chain across the ensemble.
(default: ``False``)
thin (Optional[int]): Take only every ``thin`` steps from the
chain. (default: ``1``)
discard (Optional[int]): Discard the first ``discard`` steps in
the chain as burn-in. (default: ``0``)
Returns:
array[..., nwalkers]: The chain of log priors.
"""
return self.get_value("log_prior", **kwargs)
def get_log_likelihood(self, **kwargs):
"""Get the chain of log likelihoods evaluated at the MCMC samples.
Args:
flat (Optional[bool]): Flatten the chain across the ensemble.
(default: ``False``)
thin (Optional[int]): Take only every ``thin`` steps from the
chain. (default: ``1``)
discard (Optional[int]): Discard the first ``discard`` steps in
the chain as burn-in. (default: ``0``)
Returns:
array[..., nwalkers]: The chain of log likelihoods.
"""
return self.get_value("log_likelihood", **kwargs)
def get_log_probability(self, **kwargs):
"""Get the chain of log probabilities evaluated at the MCMC samples.
Args:
flat (Optional[bool]): Flatten the chain across the ensemble.
(default: ``False``)
thin (Optional[int]): Take only every ``thin`` steps from the
chain. (default: ``1``)
discard (Optional[int]): Discard the first ``discard`` steps in
the chain as burn-in. (default: ``0``)
Returns:
array[..., nwalkers]: The chain of log probabilities.
"""
return (
self.get_value("log_prior", **kwargs) +
self.get_value("log_likelihood", **kwargs)
)
def get_integrated_autocorr_time(self, **kwargs):
"""Get the integrated autocorrelation time for each dimension.
Any arguments are passed directly to :func:`autocorr.integrated_time`.
Returns:
array[ndim]: The estimated autocorrelation time in each dimension.
"""
return integrated_time(np.mean(self.get_value("coords"), axis=1),
**kwargs)
def get_value(self, name, flat=False, thin=1, discard=0):
v = self[name, discard::thin]
if flat:
s = list(v.shape[1:])
s[0] = np.prod(v.shape[:2])
return v.reshape(s)
return v
@property
def acceptance(self):
return self._acceptance
@property
def acceptance_fraction(self):
return self.acceptance / float(self.niter)
@property
def current_coords(self):
if self.niter <= 0:
raise AttributeError("You need to run the chain first or store "
"the chain using the 'store' keyword "
"argument to Sampler.sample")
return self._data["coords"][self.niter-1]
@property
def coords(self):
return self.get_coords()
@property
def log_prior(self):
return self.get_log_prior()
@property
def log_likelihood(self):
return self.get_log_likelihood()
@property
def log_probability(self):
return self.get_log_probability()
@property
def random_state(self):
return self._random_state
|
|
"""Setup script for Bokeh."""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENCE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import os, platform, re, shutil, site, subprocess, sys, time
from os.path import abspath, dirname, exists, isdir, join, realpath, relpath
try:
import colorama
def bright(text): return "%s%s%s" % (colorama.Style.BRIGHT, text, colorama.Style.RESET_ALL)
def dim(text): return "%s%s%s" % (colorama.Style.DIM, text, colorama.Style.RESET_ALL)
def white(text): return "%s%s%s" % (colorama.Fore.WHITE, text, colorama.Style.RESET_ALL)
def blue(text): return "%s%s%s" % (colorama.Fore.BLUE, text, colorama.Style.RESET_ALL)
def red(text): return "%s%s%s" % (colorama.Fore.RED, text, colorama.Style.RESET_ALL)
def green(text): return "%s%s%s" % (colorama.Fore.GREEN, text, colorama.Style.RESET_ALL)
def yellow(text): return "%s%s%s" % (colorama.Fore.YELLOW, text, colorama.Style.RESET_ALL)
except ImportError:
def bright(text): return text
def dim(text): return text
def white(text) : return text
def blue(text) : return text
def red(text) : return text
def green(text) : return text
def yellow(text) : return text
if 'nightly' in sys.argv:
from setuptools import setup
sys.argv.remove('nightly')
with open('__conda_version__.txt', 'r') as f:
version = f.read().rstrip()
vers_file = os.path.join('bokeh', '__conda_version__.py')
with open(vers_file, 'w') as f:
f.write("conda_version=" + "'" + version + "'")
else:
from distutils.core import setup
from distutils import dir_util
# Our own imports
import versioneer
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
ROOT = dirname(realpath(__file__))
BOKEHJSROOT = join(ROOT, 'bokehjs')
BOKEHJSBUILD = join(BOKEHJSROOT, 'build')
CSS = join(BOKEHJSBUILD, 'css')
JS = join(BOKEHJSBUILD, 'js')
SERVER = join(ROOT, 'bokeh/server')
if sys.version_info[0] < 3:
input = raw_input
# -----------------------------------------------------------------------------
# Local utilities
# -----------------------------------------------------------------------------
versioneer.versionfile_source = 'bokeh/_version.py'
versioneer.versionfile_build = 'bokeh/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'Bokeh-' # dirname like 'myproject-1.2.0'
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
package_data = []
def package_path(path, filters=()):
if not os.path.exists(path):
raise RuntimeError("packaging non-existent path: %s" % path)
elif os.path.isfile(path):
package_data.append(relpath(path, 'bokeh'))
else:
for path, dirs, files in os.walk(path):
path = relpath(path, 'bokeh')
for f in files:
if not filters or f.endswith(filters):
package_data.append(join(path, f))
# You can't install Bokeh in a virtualenv because the lack of getsitepackages()
# This is an open bug: https://github.com/pypa/virtualenv/issues/355
# And this is an intended PR to fix it: https://github.com/pypa/virtualenv/pull/508
# Workaround to fix our issue: https://github.com/bokeh/bokeh/issues/378
def getsitepackages():
"""Returns a list containing all global site-packages directories
(and possibly site-python)."""
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
prefixes = [sys.prefix, sys.exec_prefix]
sitepackages = []
seen = set()
for prefix in prefixes:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys.prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
if sys.version_info[0] >= 3:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
sitepackages.append(os.path.abspath(sitedir))
sitepackages = [p for p in sitepackages if os.path.isdir(p)]
return sitepackages
def check_remove_bokeh_install(site_packages):
bokeh_path = join(site_packages, "bokeh")
if not (exists(bokeh_path) and isdir(bokeh_path)):
return
prompt = "Found existing bokeh install: %s\nRemove it? [y|N] " % bokeh_path
val = input(prompt)
if val == "y":
print("Removing old bokeh install...", end=" ")
try:
shutil.rmtree(bokeh_path)
print("Done")
except (IOError, OSError):
print("Unable to remove old bokeh at %s, exiting" % bokeh_path)
sys.exit(-1)
else:
print("Not removing old bokeh install")
sys.exit(1)
def remove_bokeh_pth(path_file):
if exists(path_file):
try:
os.remove(path_file)
except (IOError, OSError):
print("Unable to remove old path file at %s, exiting" % path_file)
sys.exit(-1)
return True
return False
BUILD_EXEC_FAIL_MSG = bright(red("Failed.")) + """
ERROR: subprocess.Popen(%r) failed to execute:
%s
Have you run `npm install` from the bokehjs subdirectory?
For more information, see the Dev Guide:
http://bokeh.pydata.org/en/latest/docs/dev_guide.html
"""
BUILD_FAIL_MSG = bright(red("Failed.")) + """
ERROR: 'gulp build' returned error message:
%s
"""
BUILD_SIZE_FAIL_MSG = """
ERROR: could not determine sizes:
%s
"""
BUILD_SUCCESS_MSG = bright(green("Success!")) + """
Build output:
%s"""
def build_js():
print("Building BokehJS... ", end="")
sys.stdout.flush()
os.chdir('bokehjs')
if sys.platform != "win32":
cmd = [join('node_modules', '.bin', 'gulp'), 'build']
else:
cmd = [join('node_modules', '.bin', 'gulp.cmd'), 'build']
t0 = time.time()
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
print(BUILD_EXEC_FAIL_MSG % (cmd, e))
sys.exit(1)
finally:
os.chdir('..')
result = proc.wait()
t1 = time.time()
if result != 0:
indented_msg = ""
msg = proc.stderr.read().decode('ascii', errors='ignore')
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_FAIL_MSG % red(msg))
sys.exit(1)
indented_msg = ""
msg = proc.stdout.read().decode('ascii', errors='ignore')
pat = re.compile(r"(\[.*\]) (.*)", re.DOTALL)
for line in msg.strip().split("\n"):
stamp, txt = pat.match(line).groups()
indented_msg += " " + dim(green(stamp)) + " " + dim(txt) + "\n"
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_SUCCESS_MSG % indented_msg)
print("Build time: %s" % bright(yellow("%0.1f seconds" % (t1-t0))))
print()
print("Build artifact sizes:")
try:
blddir = join("bokehjs", "build")
bkjs_size = os.stat(join(blddir, "js", "bokeh.js")).st_size / 2**10
bkjs_min_size = os.stat(join(blddir, "js", "bokeh.min.js")).st_size / 2**10
bkcss_size = os.stat(join(blddir, "css", "bokeh.css")).st_size / 2**10
bkcss_min_size = os.stat(join(blddir, "css", "bokeh.min.css")).st_size / 2**10
print(" - bokeh.js : %6.1f KB" % bkjs_size)
print(" - bokeh.css : %6.1f KB" % bkcss_size)
print(" - bokeh.min.js : %6.1f KB" % bkjs_min_size)
print(" - bokeh.min.css : %6.1f KB" % bkcss_min_size)
except Exception as e:
print(BUILD_SIZE_FAIL_MSG % e)
def install_js():
target_jsdir = join(SERVER, 'static', 'js')
target_cssdir = join(SERVER, 'static', 'css')
STATIC_ASSETS = [
join(JS, 'bokeh.js'),
join(JS, 'bokeh.min.js'),
join(CSS, 'bokeh.css'),
join(CSS, 'bokeh.min.css'),
]
if not all([exists(a) for a in STATIC_ASSETS]):
print("""
ERROR: Cannot install BokehJS: files missing in `./bokehjs/build`.
Please build BokehJS by running setup.py with the `--build_js` option.
Dev Guide: http://bokeh.pydata.org/docs/dev_guide.html#bokehjs.
""")
sys.exit(1)
if exists(target_jsdir):
shutil.rmtree(target_jsdir)
shutil.copytree(JS, target_jsdir)
if exists(target_cssdir):
shutil.rmtree(target_cssdir)
shutil.copytree(CSS, target_cssdir)
def clean():
print("Removing prior-built items...", end=" ")
dir_util.remove_tree('build/lib/bokeh')
print("Done")
def get_user_jsargs():
print("""
Bokeh includes a JavaScript library (BokehJS) that has its own
build process. How would you like to handle BokehJS:
1) build and install fresh BokehJS
2) install last built BokehJS from bokeh/bokehjs/build
""")
mapping = {"1": True, "2": False}
value = input("Choice? ")
while value not in mapping:
print("Input '%s' not understood. Valid choices: 1, 2\n" % value)
value = input("Choice? ")
return mapping[value]
def parse_jsargs():
options = ('install', 'develop', 'sdist', 'egg_info', 'build')
installing = any(arg in sys.argv for arg in options)
if '--build_js' in sys.argv:
if not installing:
print("Error: Option '--build_js' only valid with 'install', 'develop', 'sdist', or 'build', exiting.")
sys.exit(1)
jsbuild = True
sys.argv.remove('--build_js')
elif '--install_js' in sys.argv:
# Note that --install_js can be used by itself (without sdist/install/develop)
jsbuild = False
sys.argv.remove('--install_js')
else:
if installing:
jsbuild = get_user_jsargs()
else:
jsbuild = False
return jsbuild
# -----------------------------------------------------------------------------
# Main script
# -----------------------------------------------------------------------------
# Set up this checkout or source archive with the right BokehJS files.
if sys.version_info[:2] < (2, 6):
raise RuntimeError("Bokeh requires python >= 2.6")
# Lightweight command to only install js and nothing more - developer mode
if len(sys.argv) == 2 and sys.argv[-1] == '--install_js':
install_js()
sys.exit(0)
# check for 'sdist' and make sure we always do a BokehJS build when packaging
if "sdist" in sys.argv:
if "--install_js" in sys.argv:
print("Removing '--install_js' incompatible with 'sdist'")
sys.argv.remove('--install_js')
if "--build_js" not in sys.argv:
print("Adding '--build_js' required for 'sdist'")
sys.argv.append('--build_js')
# check for package install, set jsinstall to False to skip prompt
jsinstall = True
if not exists(join(ROOT, 'MANIFEST.in')):
if "--build_js" in sys.argv or "--install_js" in sys.argv:
print("BokehJS source code is not shipped in sdist packages; "
"building/installing from the bokehjs source directory is disabled. "
"To build or develop BokehJS yourself, you must clone the full "
"Bokeh repository from https://github.com/bokeh/bokeh")
if "--build_js" in sys.argv:
sys.argv.remove('--build_js')
if "--install_js" in sys.argv:
sys.argv.remove('--install_js')
jsbuild = False
jsinstall = False
else:
jsbuild = parse_jsargs()
if jsbuild:
build_js()
if jsinstall:
install_js()
sampledata_suffixes = ('.csv', '.conf', '.gz', '.json', '.png', '.ics')
package_path(join(SERVER, 'static'))
package_path(join(SERVER, 'templates'))
package_path(join(ROOT, 'bokeh', '_templates'))
package_path(join(ROOT, 'bokeh', 'sampledata'), sampledata_suffixes)
package_path(join(ROOT, 'bokeh', 'server', 'redis.conf'))
package_path(join(SERVER, 'tests', 'config'))
package_path(join(SERVER, 'tests', 'data'))
scripts = ['bokeh-server', 'websocket_worker.py']
if '--user' in sys.argv:
site_packages = site.USER_SITE
else:
site_packages = getsitepackages()[0]
path_file = join(site_packages, "bokeh.pth")
path = abspath(dirname(__file__))
print()
if 'develop' in sys.argv:
check_remove_bokeh_install(site_packages)
with open(path_file, "w+") as f:
f.write(path)
print("Installing Bokeh for development:")
print(" - writing path '%s' to %s" % (path, path_file))
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % yellow("PACKAGED"))
sys.exit()
elif 'clean' in sys.argv:
clean()
elif 'install' in sys.argv:
pth_removed = remove_bokeh_pth(path_file)
print("Installing Bokeh:")
if pth_removed:
print(" - removed path file at %s" % path_file)
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % bright(yellow("PACKAGED")))
elif '--help' in sys.argv:
if jsinstall:
print("Bokeh-specific options available with 'install' or 'develop':")
print()
print(" --build_js build and install a fresh BokehJS")
print(" --install_js install only last previously built BokehJS")
else:
print("Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'")
print()
print()
REQUIRES = [
'Flask>=0.10.1',
'Jinja2>=2.7',
'MarkupSafe>=0.18',
'Werkzeug>=0.9.1',
'greenlet>=0.4.1',
'itsdangerous>=0.21',
'python-dateutil>=2.1',
'requests>=1.2.3',
'six>=1.5.2',
'pygments>=1.6',
'pystache>=0.5.3',
'markdown>=2.3.1',
'PyYAML>=3.10',
'pyzmq>=14.3.1',
'tornado>=4.0.1',
# cli
# 'click>=3.3',
# tests
# 'nose>=1.3.0',
# 'mock>=1.0.1',
'colorama>=0.2.7'
]
if sys.version_info[:2] == (2, 6):
REQUIRES.append('argparse>=1.1')
# if sys.platform != "win32":
# REQUIRES.append('redis>=2.7.6')
if platform.python_implementation() != "PyPy":
# You need to install PyPy's fork of NumPy to make it work:
# pip install git+https://bitbucket.org/pypy/numpy.git
# Also pandas is not yet working with PyPy .
REQUIRES.extend([
'numpy>=1.7.1',
'pandas>=0.11.0'
])
_version = versioneer.get_version()
_cmdclass = versioneer.get_cmdclass()
setup(
name='bokeh',
version=_version,
cmdclass=_cmdclass,
packages=[
'bokeh',
'bokeh.models',
'bokeh.models.tests',
'bokeh.models.widgets',
'bokeh.charts',
'bokeh.charts.builder',
'bokeh.charts.builder.tests',
'bokeh.charts.tests',
'bokeh.crossfilter',
'bokeh.mplexporter',
'bokeh.mplexporter.renderers',
'bokeh.sampledata',
'bokeh.server',
'bokeh.server.models',
'bokeh.server.views',
'bokeh.server.blaze',
'bokeh.server.utils',
'bokeh.server.tests',
'bokeh.sphinxext',
'bokeh.tests',
'bokeh.transforms',
'bokeh.util',
'bokeh.util.tests',
'bokeh.validation',
],
package_data={'bokeh': package_data},
author='Continuum Analytics',
author_email='[email protected]',
url='http://github.com/bokeh/bokeh',
description='Statistical and novel interactive HTML plots for Python',
license='New BSD',
scripts=scripts,
zip_safe=False,
install_requires=REQUIRES
)
|
|
'''
BookController module is a module that defines the BookController class. The BookController interfaces with
BookRepository and the UI, as we use a MVC structure in this program.
'''
from undo.Operation import *
from sorting.filter import *
from undo.Undo import Undo
from controller.controller import Controller
from domain.BookException import BookException
from domain.Book import Book
from copy import deepcopy
class BookController(Controller):
'''
Class BookController interfaces with the UI and performs actions on the BookRepository using methods that
safely validate all input.
Every BookController object has a _repo property in which it holds a BookRepository type object. This object
contains a list of all Book items and performs operations on this list of books through the BookController.
'''
def __init__(self, repo):
'''
Constructor for the BookController object. This constructor defines the _repo property. The idea behind the
controller is the following:
There are methods that perform operations on the _repo property of a controller object, in order to avoid
direct manipulation of the rpeository from the UI class.
_repo: a Repository type object that is handled by the methods defined in this class.
'''
self._repo = repo
self._undoController = None
def getRepo(self):
return self._repo
def loadFile(self, filePath, fileValidator):
'''
Loads file into the repository contained by this class.
'''
if self._repo.loadFile(filePath, fileValidator) == False:
return False
return True
def addUndoController(self, undoController):
'''
Adds undo controller.
'''
self._undoController = undoController
def searchById(self, _id):
'''
This method looks inside the repository for a book that exists with the same id as _id and passes it back
to the user interface.
Input:
self - object defined by this class
_id - integer
Output:
Book - Book type object
False - if no book is found
'''
if self._repo.findId(_id) != False:
return self._repo.elementFromId(_id)
return False
def searchByTitle(self, titleToSearch):
'''
Method passes titleToSearch to the repository's searchByTitle() method which returns either false for
no results or a list of Books for multiple results, or a single Book object for one result.
Input:
self - object defined by this class
titleToSearch - string
Output:
Book or list of Books for matching results
False for no results
'''
rlist = []
result = self._repo.getElementList()
rlist = filterList(result, lambda x: x.getTitle() == titleToSearch)
if rlist != []:
if len(rlist) == 1:
return rlist[0]
else:
return rlist
else:
return False
def modifyBookAuthor(self, bookElement, newAuthor):
'''
Searches repository for corresponding bookElement and replaces its author. This is done by first removing the entry
from the repository, constructing a new one and adding it.
Input:
self - object defined by this class
bookElement - Book type object
newAuthor - string
Output:
True/False
'''
if self._repo.findId(bookElement.getId()) == False:
return False
newBook = Book(bookElement.getId(), bookElement.getTitle(), bookElement.getDescription(), newAuthor)
self._repo.removeElement(bookElement)
self._repo.addElement(newBook)
newOperation = ModifyOperation(self._repo, bookElement, newBook)
self._undoController._addOperation(newOperation)
return True
def modifyBookTitle(self, bookElement, newTitle):
'''
Searches repository for corresponding bookElement and replaces its title. This is done by first removing the entry
from the repository, constructing a new one and adding it.
Input:
self - object defined by this class
bookElement - Book type object
newTitle - string
Output:
True/False
'''
if self._repo.findId(bookElement.getId()) == False:
return False
newBook = Book(bookElement.getId(), newTitle, bookElement.getDescription(), bookElement.getAuthor())
self._repo.removeElement(bookElement)
self._repo.addElement(newBook)
newOperation = ModifyOperation(self._repo, bookElement, newBook)
self._undoController._addOperation(newOperation)
return True
def modifyNumberOf(self, _id, num):
'''
Modifies number of books with ID == _id
Input:
self - object defined by this class
_id - integer
num - value of books with id == _id
Output:
True if book with id == _id was found and modified.
False if book with id == _id was not found.
'''
books = self._repo.getElementList()
for i in books:
if i.getId() == _id:
self._repo.setNumberOf(_id, num)
return True
return False
def removeElement(self, bookElement):
'''
Method passes bookElement to the repository and asks for removal.
Input:
self - object defined by this class
bookElement - Book type object
'''
self._repo.removeElement(bookElement)
newOperation = RemoveOperation(self._repo, bookElement)
self._undoController._addOperation(newOperation)
return True
def addBook(self, book):
'''
Adds a book to repository.
Input:
book - book type object
Output:
True/False
'''
if not self.checkIdExists(book.getId()):
try:
self._repo.addElement(book)
newOperation = AddOperation(self._repo, book)
self._undoController._addOperation(newOperation)
return True
except BookException as e:
return e
else:
return False
def checkIdExists(self, _id):
'''
Returns true if book with id == _id is found in repository.
Input:
self - object defined by this class
_id - integer
Output:
True/False
'''
if self.findExistingId(_id) != False:
raise BookException("Id already exists!")
return True
return False
def findExistingId(self, _id):
'''
Returns true if a book in repository has book.id() == _id.
Input:
self - object defined by this class
_id - integer
Output:
True / False
'''
if self.searchById(_id) != False:
return True
return False
def getAllBooks(self):
'''
Getter for bookElements in repository.
Output:
list of Book type objects
'''
return self._repo.getElementList()
def getAllNumbers(self):
'''
Getter for number of books, used for rental managing.
Output:
dictionary of _id -> number
'''
return self._repo.getNumberList()
def getNumberOf(self, _id):
'''
Returns the number of copies of book with id == _id inside the repository.
Input:
self - object defined by this class
_id - integer
Output:
nlist[_id] - integer found in dictionary _id -> number of copies
'''
nlist = self._repo.getNumberList()
return nlist[_id]
|
|
from bisect import bisect
import gtk
import gobject
from gtk.gdk import Rectangle, CONTROL_MASK, SHIFT_MASK
from gtk import keysyms
from uxie.utils import send_focus_change
icon_sizes = None
class DrawItem(object):
__slots__ = ['ix', 'iy', 'iwidth', 'iheight',
'tx', 'ty', 'twidth', 'theight', 'width', 'height', 'x', 'y']
def __init__(self, view, icell, tcell):
global icon_sizes
if not icon_sizes:
icons_sizes = icell.get_size(view)
self.ix, self.iy, self.iwidth, self.iheight = icons_sizes
self.tx, self.ty, self.twidth, self.theight = tcell.get_size(view)
self.tx += self.ix + self.iwidth
if self.theight > self.iheight:
self.height = self.theight
self.iy += (self.theight - self.iheight) / 2
if self.theight < self.iheight:
self.height = self.iheight
self.ty += (self.iheight - self.theight) / 2
self.width = self.tx + self.twidth
class FmdIconView(gtk.EventBox):
__gsignals__ = {
"expose-event": "override",
"realize": "override",
"size-request": "override",
"size-allocate": "override",
"key-press-event": "override",
"set-scroll-adjustments": (
gobject.SIGNAL_RUN_LAST | gobject.SIGNAL_ACTION,
gobject.TYPE_NONE, (gtk.Adjustment, gtk.Adjustment)
),
"item-activated": (
gobject.SIGNAL_RUN_LAST | gobject.SIGNAL_ACTION,
gobject.TYPE_NONE, (object,)
),
}
def __init__(self):
gtk.EventBox.__init__(self)
self.set_can_focus(True)
self.add_events(gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.KEY_PRESS_MASK)
self.set_set_scroll_adjustments_signal("set-scroll-adjustments")
self.model = None
self.icon_renderer = None
self.text_renderer = None
self.cell_attrs = {}
self.item_cache = {}
self.prev_allocation = None
self.columns = []
self.column_first_item = {}
self.cursor = None
self.item_draw_queue = []
self.needed_full_redraw = False
def set_attributes(self, cell, **kwargs):
self.cell_attrs[cell] = kwargs
def _prepare_cell(self, cell, row):
for k, v in self.cell_attrs.get(cell, {}).items():
cell.set_property(k, row[v])
def unselect_all(self):
if self.model:
for path in self.model.selection:
self._queue_path_draw(path)
self.model.clear_selection()
def set_cursor(self, path, select=True, select_between=False):
prev = self.cursor
self.cursor = path
if self.model:
if select:
self.unselect_all()
self.model.select(path)
if self.cursor not in self.item_cache:
return
if prev:
self._queue_path_draw(prev)
if select_between:
cursor = self.cursor
remove_selection = self.model.is_selected(cursor) and self.model.is_selected(prev)
if prev > self.cursor:
prev, cursor = cursor, prev
for path in self._foreach_path(prev, cursor):
if remove_selection and path != self.cursor:
self.model.unselect(path)
else:
self.model.select(path)
self._queue_path_draw(path)
self._queue_path_draw(self.cursor)
self.scroll_to_path(self.cursor)
def get_cursor(self):
return self.cursor
def start_editing(self, path):
event = gtk.gdk.Event(gtk.gdk.NOTHING)
item = self.item_cache[path]
xoffset = int(self._hadj.value)
area = Rectangle(item.x + item.tx - xoffset, item.y + item.ty, item.twidth, item.theight)
path = ','.join(map(str, path))
entry = self.text_renderer.start_editing(event, self, path, area, area, 0)
entry.start_editing(event)
window = gtk.Window(gtk.WINDOW_POPUP)
window.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_UTILITY)
window.add(entry)
entry.show()
entry.realize()
entry.size_allocate(area)
win = self.window
window.window.reparent(win, 0, 0)
entry.size_allocate(area)
window.resize(item.twidth, item.theight)
window.move(item.x + item.tx - xoffset, item.y + item.ty)
window.show()
send_focus_change(entry, True)
return entry
def _draw_item(self, item, row, xoffset, earea):
flags = 0
if self.model.is_selected(row.path):
flags = gtk.CELL_RENDERER_SELECTED
self.style.paint_flat_box(self.window, gtk.STATE_SELECTED, gtk.SHADOW_NONE,
earea, self, 'fmd icon text', item.x + item.tx - xoffset, item.y + item.ty,
item.twidth, item.theight)
self._prepare_cell(self.icon_renderer, row)
area = Rectangle(item.x + item.ix - xoffset, item.y + item.iy, item.iwidth, item.iheight)
self.icon_renderer.render(self.window, self, area, area, earea, flags)
self._prepare_cell(self.text_renderer, row)
area = Rectangle(item.x + item.tx - xoffset, item.y + item.ty, item.twidth, item.theight)
self.text_renderer.render(self.window, self, area, area, earea, flags)
if row.path == self.cursor:
self.style.paint_focus(self.window, gtk.STATE_NORMAL,
earea, self, 'fmd icon text focus', item.x + item.tx - xoffset, item.y + item.ty,
item.twidth, item.theight)
def do_expose_event(self, event):
if not self.model:
return True
earea = event.area
xoffset = int(self._hadj.value)
margin = self.style_get_property('margin')
if not self.needed_full_redraw and self.item_draw_queue:
processed = {}
while self.item_draw_queue:
path, item = self.item_draw_queue.pop(0)
if path in processed: continue
self._draw_item(item, self.model[path], xoffset, earea)
processed[path] = True
else:
self.item_draw_queue[:] = []
self.needed_full_redraw = False
idx = bisect(self.columns, xoffset + margin) - 1
for path in self._foreach_path(self.column_first_item[self.columns[idx]]):
r = self.model[path]
item = self.item_cache[r.path]
if item.x - xoffset > earea.width:
break
self._draw_item(item, r, xoffset, earea)
return True
def do_size_request(self, req):
if self.model:
req.width = 500
req.height = 500
def do_size_allocate(self, allocation):
self.allocation = allocation
if self.flags() & gtk.REALIZED:
self.window.move_resize(*allocation)
if allocation != self.prev_allocation:
self.prev_allocation = allocation
self.update_item_cache()
def do_set_scroll_adjustments(self, h_adjustment, v_adjustment):
if h_adjustment:
self._hscroll_handler_id = h_adjustment.connect(
"value-changed", self.hscroll_value_changed)
self._hadj = h_adjustment
def hscroll_value_changed(self, *args):
self.needed_full_redraw = True
self.queue_draw()
def set_model(self, model):
self.model = model
self.update_item_cache()
def update_item_cache(self):
self.item_cache.clear()
self.columns[:] = []
if not self.model:
return
hs = self.style_get_property('hspacing')
vs = self.style_get_property('vspacing')
margin = self.style_get_property('margin')
x = y = margin
maxy = self.allocation.height - margin
mx = 0
self.columns.append(x)
self.column_first_item[x] = (0,)
for r in self.model:
self._prepare_cell(self.icon_renderer, r)
self._prepare_cell(self.text_renderer, r)
item = self.item_cache[r.path] = DrawItem(self, self.icon_renderer, self.text_renderer)
ny = y + item.height + vs
if ny > maxy:
x += mx + hs
self.columns.append(x)
self.column_first_item[x] = r.path
mx = 0
y = margin
ny = y + item.height + vs
if item.width > mx:
mx = item.width
item.x = x
item.y = y
y = ny
self._hadj.configure(0, 0, x+mx, self.allocation.width*0.1, self.allocation.width*0.9,
self.allocation.width)
def do_realize(self):
gtk.DrawingArea.do_realize(self)
self.window.set_background(self.style.base[gtk.STATE_NORMAL])
def _queue_path_draw(self, path):
try:
item = self.item_cache[path]
except KeyError:
return
self.item_draw_queue.append((path, item))
xoffset = int(self._hadj.value)
self.window.invalidate_rect(Rectangle(item.x - xoffset, item.y,
item.width, item.height), False)
def _foreach_path(self, fpath, tpath=None):
tpath = tpath or (len(self.model)-1,)
return ((r,) for r in xrange(fpath[0], tpath[0]+1))
def _find_nearest_path_on_same_line(self, path, direction):
item = self.item_cache[path]
idx = bisect(self.columns, item.x) + direction - 1
if idx < 0:
return 0,
elif idx >= len(self.columns):
return len(self.model) - 1,
path = self.column_first_item[self.columns[idx]]
rpath = None
dy = 0
for path in self._foreach_path(path):
it = self.item_cache[path]
ndy = abs(it.y - item.y)
if ndy == 0:
return path
if rpath and ndy > dy:
return rpath
rpath = path
dy = ndy
return path
def scroll_to_path(self, path, align=None):
item = self.item_cache[path]
maxx = self.allocation.width
xoffset = int(self._hadj.value)
margin = self.style_get_property('margin')
x1 = item.x - xoffset - margin
x2 = x1 + item.width
if align is None:
if 0 <= x1 <= maxx and 0 <= x2 <= maxx:
return
elif x1 < 0:
dx = x1
elif x2 > maxx:
dx = min(x1, x2 - maxx)
else:
dx = 0
self._hadj.value = max(0, xoffset + dx)
def do_key_press_event(self, event):
keyval = event.keyval
state = event.state
if state | SHIFT_MASK | CONTROL_MASK == SHIFT_MASK | CONTROL_MASK:
do_select_between = state == SHIFT_MASK
do_select = not do_select_between and state != CONTROL_MASK
if keyval == keysyms.Down:
if not self.cursor:
self.set_cursor((0,))
elif self.cursor[0] + 1 < len(self.model):
self.set_cursor((self.cursor[0] + 1,), do_select, do_select_between)
return True
if keyval == keysyms.Up:
if self.cursor and self.cursor[0] > 0:
self.set_cursor((self.cursor[0] - 1,), do_select, do_select_between)
return True
if keyval == keysyms.Right:
if not self.cursor:
self.set_cursor((0,))
else:
cursor = self._find_nearest_path_on_same_line(self.cursor, 1)
if cursor:
self.set_cursor(cursor, do_select, do_select_between)
return True
if keyval == keysyms.Left:
if self.cursor:
cursor = self._find_nearest_path_on_same_line(self.cursor, -1)
if cursor:
self.set_cursor(cursor, do_select, do_select_between)
return True
if keyval == keysyms.Return and not state:
if self.cursor:
self.emit('item-activated', self.cursor)
return True
if keyval == keysyms.space and state == CONTROL_MASK:
if self.cursor:
self.model.invert_selection(self.cursor)
self._queue_path_draw(self.cursor)
return True
return False
def refresh(self, full=True):
if full:
self.update_item_cache()
self.needed_full_redraw = True
self.queue_draw()
gobject.type_register(FmdIconView)
gtk.widget_class_install_style_property(FmdIconView, ('hspacing', gobject.TYPE_INT,
'Horizontal spacing', 'Horizontal spacing between items', gobject.G_MININT, gobject.G_MAXINT,
10, gobject.PARAM_READWRITE))
gtk.widget_class_install_style_property(FmdIconView, ('vspacing', gobject.TYPE_INT,
'Vertical spacing', 'Vertical spacing between items', gobject.G_MININT, gobject.G_MAXINT,
2, gobject.PARAM_READWRITE))
gtk.widget_class_install_style_property(FmdIconView, ('margin', gobject.TYPE_INT,
'Margin', 'Margin to view boundaries', gobject.G_MININT, gobject.G_MAXINT,
3, gobject.PARAM_READWRITE))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from bson.objectid import ObjectId
import functools
import six
import sys
import traceback
import dicom
import numpy as np
from girder import events
from girder.plugins.jobs.constants import JobStatus
from girder.utility.model_importer import ModelImporter
from PIL import Image
def run(job):
jobModel = ModelImporter.model('job', 'jobs')
jobModel.updateJob(job, status=JobStatus.RUNNING)
try:
newFile = createThumbnail(**job['kwargs'])
log = 'Created thumbnail file %s.' % newFile['_id']
jobModel.updateJob(job, status=JobStatus.SUCCESS, log=log)
except Exception:
t, val, tb = sys.exc_info()
log = '%s: %s\n%s' % (t.__name__, repr(val), traceback.extract_tb(tb))
jobModel.updateJob(job, status=JobStatus.ERROR, log=log)
raise
def createThumbnail(width, height, crop, fileId, attachToType, attachToId):
"""
Creates the thumbnail. Validation and access control must be done prior
to the invocation of this method.
"""
fileModel = ModelImporter.model('file')
file = fileModel.load(fileId, force=True)
streamFn = functools.partial(fileModel.download, file, headers=False)
event = events.trigger('thumbnails.create', info={
'file': file,
'width': width,
'height': height,
'crop': crop,
'attachToType': attachToType,
'attachToId': attachToId,
'streamFn': streamFn
})
if len(event.responses):
resp = event.responses[-1]
newFile = resp['file']
if event.defaultPrevented:
if resp.get('attach', True):
newFile = attachThumbnail(
file, newFile, attachToType, attachToId, width, height)
return newFile
else:
file = newFile
streamFn = functools.partial(
fileModel.download, file, headers=False)
if 'assetstoreId' not in file:
# TODO we could thumbnail link files if we really wanted.
raise Exception('File %s has no assetstore.' % fileId)
stream = streamFn()
data = b''.join(stream())
image = _getImage(file['mimeType'], file['exts'], data)
if not width:
width = int(height * image.size[0] / image.size[1])
elif not height:
height = int(width * image.size[1] / image.size[0])
elif crop:
x1 = y1 = 0
x2, y2 = image.size
wr = float(image.size[0]) / width
hr = float(image.size[1]) / height
if hr > wr:
y1 = int(y2 / 2 - height * wr / 2)
y2 = int(y2 / 2 + height * wr / 2)
else:
x1 = int(x2 / 2 - width * hr / 2)
x2 = int(x2 / 2 + width * hr / 2)
image = image.crop((x1, y1, x2, y2))
image.thumbnail((width, height), Image.ANTIALIAS)
uploadModel = ModelImporter.model('upload')
out = six.BytesIO()
image.convert('RGB').save(out, 'JPEG', quality=85)
size = out.tell()
out.seek(0)
thumbnail = uploadModel.uploadFromFile(
out, size=size, name='_thumb.jpg', parentType=attachToType,
parent={'_id': ObjectId(attachToId)}, user=None, mimeType='image/jpeg',
attachParent=True)
return attachThumbnail(
file, thumbnail, attachToType, attachToId, width, height)
def attachThumbnail(file, thumbnail, attachToType, attachToId, width, height):
"""
Add the required information to the thumbnail file and the resource it
is being attached to, and save the documents.
:param file: The file from which the thumbnail was derived.
:type file: dict
:param thumbnail: The newly generated thumbnail file document.
:type thumbnail: dict
:param attachToType: The type to which the thumbnail is being attached.
:type attachToType: str
:param attachToId: The ID of the document to attach the thumbnail to.
:type attachToId: str or ObjectId
:param width: Thumbnail width.
:type width: int
:param height: Thumbnail height.
:type height: int
:returns: The updated thumbnail file document.
"""
parentModel = ModelImporter.model(attachToType)
parent = parentModel.load(attachToId, force=True)
parent['_thumbnails'] = parent.get('_thumbnails', [])
parent['_thumbnails'].append(thumbnail['_id'])
parentModel.save(parent)
thumbnail['attachedToType'] = attachToType
thumbnail['attachedToId'] = parent['_id']
thumbnail['isThumbnail'] = True
thumbnail['derivedFrom'] = {
'type': 'file',
'id': file['_id'],
'process': 'thumbnail',
'width': width,
'height': height
}
return ModelImporter.model('file').save(thumbnail)
def _getImage(mimeType, extension, data):
"""
Check extension of image and opens it.
:param extension: The extension of the image that needs to be opened.
:param data: The image file stream.
"""
if (extension and extension[-1] == 'dcm') or mimeType == 'application/dicom':
# Open the dicom image
dicomData = dicom.read_file(six.BytesIO(data))
return scaleDicomLevels(dicomData)
else:
# Open other types of images
return Image.open(six.BytesIO(data))
def scaleDicomLevels(dicomData):
"""
Adjust dicom levels so image is viewable.
:param dicomData: The image data to be processed.
"""
offset = dicomData.RescaleIntercept
imageData = dicomData.pixel_array
if len(imageData.shape) == 3:
minimum = imageData[0].min() + offset
maximum = imageData[0].max() + offset
finalImage = _scaleIntensity(imageData[0], maximum-minimum, (maximum+minimum)/2)
return Image.fromarray(finalImage).convert("I")
else:
minimum = imageData.min() + offset
maximum = imageData.max() + offset
finalImage = _scaleIntensity(imageData, maximum-minimum, (maximum+minimum)/2)
return Image.fromarray(finalImage).convert("I")
def _scaleIntensity(img, window, level, maxc=255):
"""Change window and level data in image.
:param img: numpy array representing an image
:param window: the window for the transformation
:param level: the level for the transformation
:param maxc: what the maximum display color is
"""
m = maxc/(2.0*window)
o = m*(level-window)
return np.clip((m*img-o), 0, maxc).astype(np.uint8)
|
|
#
# Copyright 2010-2019 University of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Globus flavor of OAuth2. They require HTTP Basic authentication for token requests, and also provide group management.
"""
import base64
import urllib
import json
import web
from .providers import *
from ..util import *
from . import database
from . import oauth2
USE_GLOBUS_SDK = False
try:
import globus_sdk
USE_GLOBUS_SDK = True
except:
pass
__all__ = [
'GlobusAuthClientProvider',
'config_built_ins'
]
class GlobusAuth (database.DatabaseConnection2):
# this is the storage format version, not the software version
major = 2
minor = 0
def __init__(self, config):
database.DatabaseConnection2.__init__(self, config)
class GlobusGroupTokenProcessor(oauth2.GroupTokenProcessor):
default_accepted_roles=['admin', 'manager', 'member']
def __init__(self, issuer, expected_scopes, group_base_url, accepted_roles=None):
oauth2.GroupTokenProcessor.__init__(self, expected_scopes)
self.issuer = issuer
self.accepted_roles = accepted_roles if accepted_roles else self.default_accepted_roles
self.group_base_url = group_base_url
self.token = None
def set_token(self, token):
self.token = token
def get_raw_groups(self, group_request):
group_request.add_header('Authorization', 'Bearer ' + self.token.get('access_token'))
u = oauth2.OAuth2Login.open_url(group_request, "getting groups")
raw_groups = json.load(u)
u.close()
return(raw_groups)
def get_groups(self):
raise NotImplementedError()
def make_group(self, id, name):
return KeyedDict({ID : self.issuer + "/" + id,
DISPLAY_NAME : name})
class GlobusViewGroupTokenProcessor(GlobusGroupTokenProcessor):
default_base_url = "https://groups.api.globus.org/v2/groups/my_groups"
def __init__(self, issuer, group_base_url=None):
GlobusGroupTokenProcessor.__init__(self, issuer,
["urn:globus:auth:scope:groups.api.globus.org:view_my_groups_and_memberships"],
group_base_url if group_base_url else self.default_base_url)
def get_groups(self):
# web.debug("trying view_my_groups, token is {t}".format(t=str(self.token)))
final_groups = set()
if self.token != None:
group_request = urllib.request.Request(self.group_base_url)
raw_groups = self.get_raw_groups(group_request)
for g in raw_groups:
# Unlike the old API, this will only return
# "groups in which the user is an active member, manager, or admin"
# so no need to descend into memberships and check status/role
final_groups.add(self.make_group(g["id"], g.get("name")))
return final_groups
class GlobusLegacyGroupTokenProcessor(GlobusGroupTokenProcessor):
default_base_url="https://nexus.api.globusonline.org/groups"
def __init__(self, issuer, group_base_url=None, accepted_roles=None):
GlobusGroupTokenProcessor.__init__(self, issuer,
["urn:globus:auth:scope:nexus.api.globus.org:groups"],
group_base_url if group_base_url else self.default_base_url,
accepted_roles=accepted_roles)
self.group_args = {
'include_identity_set_properties' : 'true',
'my_roles' : ','.join(self.accepted_roles),
'my_statuses' : 'active',
'for_all_identities' : 'true'
}
def get_groups(self):
# web.debug("Using legacy Globus group processor")
final_groups = set()
if self.token != None:
urltuple = urllib.parse.urlsplit(self.group_base_url)
group_request = urllib.request.Request(urllib.parse.urlunsplit([urltuple[0], urltuple[1], urltuple[2], urllib.parse.urlencode(self.group_args), None]))
raw_groups = self.get_raw_groups(group_request)
for g in raw_groups:
group = self.make_group(g["id"], g.get("name"))
if g["my_status"] == "active":
final_groups.add(group)
else:
idprops = g.get("identity_set_properties")
if idprops != None:
for props in idprops.values():
if props.get("role") in self.accepted_roles and props.get("status") == "active":
final_groups.add(group)
break
return(final_groups)
class GlobusAuthLogin(oauth2.OAuth2Login):
def login(self, manager, context, db, **kwargs):
user_id = oauth2.OAuth2Login.login(self, manager, context, db, **kwargs)
other_tokens = self.payload.get('other_tokens')
dependent_tokens = self.payload.get('dependent_tokens')
dependent_tokens_source = self.payload.get('dependent_tokens_source')
group_base = self.provider.cfg.get('globus_auth_group_endpoint')
group_token_processor = None
context.globus_identities = set()
context.globus_identities.add(user_id)
identity_set = self.userinfo.get('identities_set')
issuer = self.userinfo.get('iss')
all_group_processors = [
GlobusViewGroupTokenProcessor(group_base_url=group_base, issuer=issuer),
GlobusLegacyGroupTokenProcessor(group_base_url=group_base, issuer=issuer)
]
context.client[IDENTITIES] = []
if identity_set != None:
for id in identity_set:
full_id = issuer + '/' + id
context.globus_identities.add(KeyedDict({ID : full_id}))
context.client[IDENTITIES].append(full_id)
if other_tokens != None:
for token in other_tokens:
self.add_to_wallet(context, issuer, token)
if group_token_processor is None:
for processor in all_group_processors:
if processor.token_recognized(token):
processor.set_token(token)
group_token_processor = processor
if dependent_tokens != None:
for token in dependent_tokens:
self.add_to_wallet(context, issuer, token)
if group_token_processor is None:
for processor in all_group_processors:
if processor.token_recognized(token):
processor.set_token(token)
group_token_processor = processor
# web.debug("wallet: " + str(context.wallet))
# web.debug("token processor: " + str(group_token_processor))
if group_token_processor is not None:
context.globus_groups = group_token_processor.get_groups()
self.provider.manage.update_last_login(manager, context, context.client[ID], db)
self.provider.manage.update_last_group_update(manager, context, context.client[ID], db)
return context.client
def add_extra_token_request_headers(self, token_request):
client_id = self.provider.cfg.get('client_id')
# web.debug("client id is {i}".format(i=client_id))
client_secret = self.provider.cfg.get('client_secret')
basic_auth_token = base64.b64encode((client_id + ':' + client_secret).encode())
token_request.add_header('Authorization', 'Basic ' + basic_auth_token.decode())
def make_userinfo_request(self, endpoint, access_token):
req = urllib.request.Request(endpoint, urllib.parse.urlencode({'token' : access_token, 'include' : 'identities_set'}).encode())
self.add_extra_token_request_headers(req)
return req
def payload_from_bearer_token(self, bearer_token, context, db):
oauth2.OAuth2Login.payload_from_bearer_token(self, bearer_token, context, db)
if USE_GLOBUS_SDK:
client = globus_sdk.ConfidentialAppAuthClient(self.provider.cfg.get('client_id'), self.provider.cfg.get('client_secret'))
# attempt to get dependent tokens
try:
# introspect_response = client.oauth2_token_introspect(bearer_token)
token_response = client.oauth2_get_dependent_tokens(bearer_token).data
if token_response != None and len(token_response) > 0:
self.payload['dependent_tokens_source'] = client.base_url
if self.payload['dependent_tokens_source'].endswith('/'):
self.payload['dependent_tokens_source'] = self.payload['dependent_tokens_source'][:-1]
if self.payload.get('dependent_tokens') == None:
self.payload['dependent_tokens'] = dict()
self.payload['dependent_tokens'] = token_response
except globus_sdk.exc.AuthAPIError as ex:
web.debug("WARNING: dependent token request returned {ex}".format(ex=ex))
else:
web.debug("WARNING: No globus_sdk installed; skipping dependent token request. This means no group info and an empty wallet for sessions authenticated by bearer token.")
# Sometimes Globus whitelist entries will have typos in the URLs ("//" instead of "/" is very common),
# and it can take a long time to get those fixed.
def my_uri(self):
override_uri = self.provider.cfg.get('globus_auth_override_full_redirect_uri')
if override_uri is not None and override_uri != '':
return override_uri
else:
return oauth2.OAuth2Login.my_uri(self)
class GlobusAuthClientProvider (oauth2.OAuth2ClientProvider):
key = 'globus_auth'
def __init__(self, config,
Login=GlobusAuthLogin,
Search=database.DatabaseClientSearch,
Manage=oauth2.OAuth2ClientManage,
Passwd=None):
oauth2.OAuth2ClientProvider.__init__(self, config, Login, Search, Manage, Passwd)
class GlobusAuthPreauthProvider (oauth2.OAuth2PreauthProvider):
key = 'globus_auth'
# Sometimes Globus whitelist entries will have typos in the URLs ("//" instead of "/" is very common),
# and it can take a long time to get those fixed.
def make_relative_uri(self, relative_uri):
override_uri = self.cfg.get('globus_auth_override_full_redirect_uri')
if override_uri is not None and override_uri != '':
return override_uri
else:
return oauth2.OAuth2PreauthProvider.make_relative_uri(self, relative_uri)
class GlobusAuthAttributeClient (AttributeClient):
def __init__(self, provider):
AttributeClient.__init__(self, provider)
def set_msg_context(self, manager, context, db=None):
if hasattr(context, 'globus_groups'):
context.attributes.update(group for group in context.globus_groups)
context.attributes.update(identity for identity in context.globus_identities)
class GlobusAuthAttributeProvider (database.DatabaseAttributeProvider):
"""
Globus groups and multiple identities
"""
key = 'globus_auth'
def __init__(self, config):
database.DatabaseAttributeProvider.__init__(self, config)
self.client = GlobusAuthAttributeClient(self)
class GlobusAuthSessionStateProvider(oauth2.OAuth2SessionStateProvider):
"""
OAuth2 session state plus Globus logout
"""
key = 'globus_auth'
def terminate(self, manager, context, db=None, preferred_final_url=None):
globus_args = ['client_id', 'redirect_name']
oauth2.OAuth2SessionStateProvider.terminate(self, manager, context, db)
logout_base = self.cfg.get('revocation_endpoint')
if logout_base == None:
raise oauth2.OAuth2ConfigurationError("No revocation endpoint configured")
rest_args = web.input()
args=dict()
for key in globus_args:
val=rest_args.get('logout_' + key)
if val == None:
val = self.cfg.get(self.key + '_logout_' + key)
if val != None:
args[key] = val
if preferred_final_url != None:
args['redirect_uri'] = preferred_final_url
globus_logout_url = logout_base + "?" + urllib.parse.urlencode(args)
retval = dict()
retval[LOGOUT_URL] = globus_logout_url
return retval
|
|
"""
Syslog server which allows handler methods to subscribe to syslog entries based
on regular expressions.
Author: Gregory Haynes <[email protected]> (2012)
"""
from multiprocessing import Pool, Queue, Process
from loggerglue.rfc5424 import SyslogEntry
from Queue import Empty as QueueEmpty
import asyncore
import socket
import os
import sys
import time
import pyparsing
import argparse
import sspps
import handler
import signal
import rsyslog_fix
import logwriter
import pwd
import grp
import daemon
class LogEntryHandlerMap(object):
def __init__(self, handlers=()):
self.handlers = handlers
def handlers_for(self, entry):
ret = []
for handler in self.handlers:
if handler.handles_entry(entry):
ret.append(handler)
return ret
class LogEntryWorker(object):
def __init__(self, work_queue, args, log_write_queue):
self.work_queue = work_queue
self.log_write_queue = log_write_queue
self.init_handler_map(args.handlersdir)
self.uid = args.workuser
self.gid = args.workgroup
@property
def runable(self):
return self.entryhandler_map != None
def init_handler_map(self, handlersdir):
self.plugin_loader = sspps.PluginLoader(handlersdir,
parent_class=handler.LogEntryHandler,
init_kwargs={'log_write_queue': self.log_write_queue})
try:
self.plugin_loader.load_all()
except OSError:
print 'Invalid plugin path \'%s\'.' % handlersdir
return None
self.entryhandler_map = LogEntryHandlerMap(self.plugin_loader.plugins)
def run(self):
if not self.runable:
print 'Process not runable, returning'
return False
# Drop privileges
os.setgroups([])
os.setgid(self.gid)
os.setuid(self.uid)
ppid = os.getppid()
while True:
try:
line = self.work_queue.get(timeout=0.5)
if not line:
'Parent process is asking us to exit'
return True
line = line.decode('utf-8').encode('ASCII', 'ignore')
except KeyboardInterrupt:
return False
except UnicodeDecodeError:
print 'Unicode Error, skipping entry'
continue
except QueueEmpty:
if os.getppid() != ppid:
return False
continue
try:
entry = SyslogEntry.from_line(line)
except pyparsing.exceptions.Exception:
continue
self.process_entry(entry)
def process_entry(self, entry):
handlers = self.entryhandler_map.handlers_for(entry)
for handler in handlers:
handler.trigger(entry)
def start_worker(work_queue, entryhandler_map, log_write_queue):
worker = LogEntryWorker(work_queue, entryhandler_map, log_write_queue)
return worker.run()
class SyslogClient(asyncore.dispatcher_with_send):
def __init__(self, sock, work_queue):
asyncore.dispatcher_with_send.__init__(self, sock)
self.work_queue = work_queue
self.buff = ''
def handle_read(self):
data = self.recv(1024)
if data:
self.buff += data
lines = self.buff.split('\n')
self.buff = lines[-1]
for line in lines[:-1]:
start_pos = line.find('<')
if start_pos != -1:
line = line[start_pos:]
self.work_queue.put(line, block=False)
class SyslogServer(asyncore.dispatcher):
def __init__(self, address, work_queue):
asyncore.dispatcher.__init__(self)
self.work_queue = work_queue
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind(address)
self.listen(5)
def handle_accept(self):
pair = self.accept()
if pair is None:
pass
else:
sock, addr = pair
print 'Incoming connection from %s' % repr(addr)
handler = SyslogClient(sock, self.work_queue)
def user_or_uid(arg):
try:
return int(arg)
except ValueError:
try:
return pwd.getpwnam(arg).pw_uid
except KeyError:
raise argparse.ArgumentTypeError('unknown user: %s' % arg)
def group_or_gid(arg):
try:
return int(arg)
except ValueError:
try:
return grp.getgrnam(arg).gr_gid
except KeyError:
raise argparse.ArgumentTypeError('unknown group: %s' % arg)
do_reload = False
def main():
# Argument parsing
parser = argparse.ArgumentParser(description='Framework to process syslog'\
' entries')
parser.add_argument('-n', '--numworkers',
help='Numer of worker processes',
type=int,
default=4)
parser.add_argument('-w', '--workqueuesize',
help='Size of worker queue',
type=int,
default=100)
parser.add_argument('-q', '--workuser',
help='User for worker processes to run as',
type=user_or_uid,
default='nobody')
parser.add_argument('-r', '--workgroup',
help='Group for worker processes to run as',
type=group_or_gid,
default='nogroup')
parser.add_argument('-c', '--logqueuesize',
help='Size of log write queue',
type=int,
default=100)
parser.add_argument('-f', '--maxfds',
help='Maximum number of file descriptors to open',
type=int,
default=1020)
parser.add_argument('-d', '--logdir',
help='Root directory for log files',
type=str,
default='/var/log')
parser.add_argument('-u', '--loguser',
help='User for log writer to run as',
type=user_or_uid,
default='syslog')
parser.add_argument('-x', '--loggroup',
help='Group for log writer to run as',
type=group_or_gid,
default='syslog')
parser.add_argument('-p', '--port',
help='Syslog server port',
type=int,
default=6514)
parser.add_argument('-l', '--listen',
help='Syslog listen address',
type=str,
default='localhost')
parser.add_argument('-m', '--handlersdir',
help='Director containing handler modules',
type=str,
default='/var/lib/syslogprocessor/handlers')
parser.add_argument('-D', '--daemonize',
help='Run as a daemon',
action="store_true")
args = parser.parse_args()
# Daemonize
if args.daemonize:
with daemon.DaemonContext():
daemon_main(args)
else:
daemon_main(args)
def daemon_main(args):
global do_reload
rsyslog_fix.fix()
# Create the work queue
work_queue = Queue(args.workqueuesize)
# log write queue
log_write_queue = Queue(args.logqueuesize)
# Start log writer process
log_writer = Process(target=logwriter.run_writer,
args=(log_write_queue, args))
log_writer.start()
# Our reload signal handler
def sigusr1_handler(signum, frame):
global do_reload
do_reload = True
signal.signal(signal.SIGUSR1, sigusr1_handler)
# Create the worker pool
pool = Pool(processes=args.numworkers,
initializer=start_worker,
initargs=(work_queue, args, log_write_queue))
server = SyslogServer((args.listen, args.port), work_queue)
try:
while True:
asyncore.loop(timeout=.2, count=1)
if do_reload:
print 'Starting reload'
# Cause children to exit
for i in range(args.numworkers):
work_queue.put(None)
# No more swimming
pool.close()
pool.join()
# Restart children
pool = Pool(processes=args.numworkers,
initializer=start_worker,
initargs=(work_queue, args))
print 'Reload complete'
do_reload = False
except KeyboardInterrupt:
print 'ctrl+c detected, exiting.'
pool.close()
sys.exit(os.EX_OSERR)
except Exception, e:
print 'Error, closing the pool'
pool.close()
raise e
if __name__ == '__main__':
main()
|
|
"""Support for OpenTherm Gateway devices."""
from datetime import date, datetime
import logging
import pyotgw
import pyotgw.vars as gw_vars
import voluptuous as vol
from homeassistant.components.binary_sensor import DOMAIN as COMP_BINARY_SENSOR
from homeassistant.components.climate import DOMAIN as COMP_CLIMATE
from homeassistant.components.sensor import DOMAIN as COMP_SENSOR
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_DATE,
ATTR_ID,
ATTR_MODE,
ATTR_TEMPERATURE,
ATTR_TIME,
CONF_DEVICE,
CONF_ID,
CONF_NAME,
EVENT_HOMEASSISTANT_STOP,
PRECISION_HALVES,
PRECISION_TENTHS,
PRECISION_WHOLE,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.device_registry import (
async_get_registry as async_get_dev_reg,
)
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import (
ATTR_CH_OVRD,
ATTR_DHW_OVRD,
ATTR_GW_ID,
ATTR_LEVEL,
CONF_CLIMATE,
CONF_FLOOR_TEMP,
CONF_PRECISION,
CONF_READ_PRECISION,
CONF_SET_PRECISION,
DATA_GATEWAYS,
DATA_OPENTHERM_GW,
DOMAIN,
SERVICE_RESET_GATEWAY,
SERVICE_SET_CH_OVRD,
SERVICE_SET_CLOCK,
SERVICE_SET_CONTROL_SETPOINT,
SERVICE_SET_GPIO_MODE,
SERVICE_SET_HOT_WATER_OVRD,
SERVICE_SET_HOT_WATER_SETPOINT,
SERVICE_SET_LED_MODE,
SERVICE_SET_MAX_MOD,
SERVICE_SET_OAT,
SERVICE_SET_SB_TEMP,
)
_LOGGER = logging.getLogger(__name__)
CLIMATE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_PRECISION): vol.In(
[PRECISION_TENTHS, PRECISION_HALVES, PRECISION_WHOLE]
),
vol.Optional(CONF_FLOOR_TEMP, default=False): cv.boolean,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: cv.schema_with_slug_keys(
{
vol.Required(CONF_DEVICE): cv.string,
vol.Optional(CONF_CLIMATE, default={}): CLIMATE_SCHEMA,
vol.Optional(CONF_NAME): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
PLATFORMS = [COMP_BINARY_SENSOR, COMP_CLIMATE, COMP_SENSOR]
async def options_updated(hass, entry):
"""Handle options update."""
gateway = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][entry.data[CONF_ID]]
async_dispatcher_send(hass, gateway.options_update_signal, entry)
async def async_setup_entry(hass, config_entry):
"""Set up the OpenTherm Gateway component."""
if DATA_OPENTHERM_GW not in hass.data:
hass.data[DATA_OPENTHERM_GW] = {DATA_GATEWAYS: {}}
gateway = OpenThermGatewayDevice(hass, config_entry)
hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][config_entry.data[CONF_ID]] = gateway
if config_entry.options.get(CONF_PRECISION):
migrate_options = dict(config_entry.options)
migrate_options.update(
{
CONF_READ_PRECISION: config_entry.options[CONF_PRECISION],
CONF_SET_PRECISION: config_entry.options[CONF_PRECISION],
}
)
del migrate_options[CONF_PRECISION]
hass.config_entries.async_update_entry(config_entry, options=migrate_options)
config_entry.add_update_listener(options_updated)
# Schedule directly on the loop to avoid blocking HA startup.
hass.loop.create_task(gateway.connect_and_subscribe())
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
register_services(hass)
return True
async def async_setup(hass, config):
"""Set up the OpenTherm Gateway component."""
if not hass.config_entries.async_entries(DOMAIN) and DOMAIN in config:
conf = config[DOMAIN]
for device_id, device_config in conf.items():
device_config[CONF_ID] = device_id
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=device_config
)
)
return True
def register_services(hass):
"""Register services for the component."""
service_reset_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
)
}
)
service_set_central_heating_ovrd_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_CH_OVRD): cv.boolean,
}
)
service_set_clock_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
# pylint: disable=unnecessary-lambda
vol.Optional(ATTR_DATE, default=lambda: date.today()): cv.date,
vol.Optional(ATTR_TIME, default=lambda: datetime.now().time()): cv.time,
}
)
service_set_control_setpoint_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_TEMPERATURE): vol.All(
vol.Coerce(float), vol.Range(min=0, max=90)
),
}
)
service_set_hot_water_setpoint_schema = service_set_control_setpoint_schema
service_set_hot_water_ovrd_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_DHW_OVRD): vol.Any(
vol.Equal("A"), vol.All(vol.Coerce(int), vol.Range(min=0, max=1))
),
}
)
service_set_gpio_mode_schema = vol.Schema(
vol.Any(
vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_ID): vol.Equal("A"),
vol.Required(ATTR_MODE): vol.All(
vol.Coerce(int), vol.Range(min=0, max=6)
),
}
),
vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_ID): vol.Equal("B"),
vol.Required(ATTR_MODE): vol.All(
vol.Coerce(int), vol.Range(min=0, max=7)
),
}
),
)
)
service_set_led_mode_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_ID): vol.In("ABCDEF"),
vol.Required(ATTR_MODE): vol.In("RXTBOFHWCEMP"),
}
)
service_set_max_mod_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_LEVEL): vol.All(
vol.Coerce(int), vol.Range(min=-1, max=100)
),
}
)
service_set_oat_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_TEMPERATURE): vol.All(
vol.Coerce(float), vol.Range(min=-40, max=99)
),
}
)
service_set_sb_temp_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_TEMPERATURE): vol.All(
vol.Coerce(float), vol.Range(min=0, max=30)
),
}
)
async def reset_gateway(call):
"""Reset the OpenTherm Gateway."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
mode_rst = gw_vars.OTGW_MODE_RESET
status = await gw_dev.gateway.set_mode(mode_rst)
gw_dev.status = status
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN, SERVICE_RESET_GATEWAY, reset_gateway, service_reset_schema
)
async def set_ch_ovrd(call):
"""Set the central heating override on the OpenTherm Gateway."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
await gw_dev.gateway.set_ch_enable_bit(1 if call.data[ATTR_CH_OVRD] else 0)
hass.services.async_register(
DOMAIN,
SERVICE_SET_CH_OVRD,
set_ch_ovrd,
service_set_central_heating_ovrd_schema,
)
async def set_control_setpoint(call):
"""Set the control setpoint on the OpenTherm Gateway."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
gw_var = gw_vars.DATA_CONTROL_SETPOINT
value = await gw_dev.gateway.set_control_setpoint(call.data[ATTR_TEMPERATURE])
gw_dev.status.update({gw_var: value})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN,
SERVICE_SET_CONTROL_SETPOINT,
set_control_setpoint,
service_set_control_setpoint_schema,
)
async def set_dhw_ovrd(call):
"""Set the domestic hot water override on the OpenTherm Gateway."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
gw_var = gw_vars.OTGW_DHW_OVRD
value = await gw_dev.gateway.set_hot_water_ovrd(call.data[ATTR_DHW_OVRD])
gw_dev.status.update({gw_var: value})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN,
SERVICE_SET_HOT_WATER_OVRD,
set_dhw_ovrd,
service_set_hot_water_ovrd_schema,
)
async def set_dhw_setpoint(call):
"""Set the domestic hot water setpoint on the OpenTherm Gateway."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
gw_var = gw_vars.DATA_DHW_SETPOINT
value = await gw_dev.gateway.set_dhw_setpoint(call.data[ATTR_TEMPERATURE])
gw_dev.status.update({gw_var: value})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN,
SERVICE_SET_HOT_WATER_SETPOINT,
set_dhw_setpoint,
service_set_hot_water_setpoint_schema,
)
async def set_device_clock(call):
"""Set the clock on the OpenTherm Gateway."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
attr_date = call.data[ATTR_DATE]
attr_time = call.data[ATTR_TIME]
await gw_dev.gateway.set_clock(datetime.combine(attr_date, attr_time))
hass.services.async_register(
DOMAIN, SERVICE_SET_CLOCK, set_device_clock, service_set_clock_schema
)
async def set_gpio_mode(call):
"""Set the OpenTherm Gateway GPIO modes."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
gpio_id = call.data[ATTR_ID]
gpio_mode = call.data[ATTR_MODE]
mode = await gw_dev.gateway.set_gpio_mode(gpio_id, gpio_mode)
gpio_var = getattr(gw_vars, f"OTGW_GPIO_{gpio_id}")
gw_dev.status.update({gpio_var: mode})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN, SERVICE_SET_GPIO_MODE, set_gpio_mode, service_set_gpio_mode_schema
)
async def set_led_mode(call):
"""Set the OpenTherm Gateway LED modes."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
led_id = call.data[ATTR_ID]
led_mode = call.data[ATTR_MODE]
mode = await gw_dev.gateway.set_led_mode(led_id, led_mode)
led_var = getattr(gw_vars, f"OTGW_LED_{led_id}")
gw_dev.status.update({led_var: mode})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN, SERVICE_SET_LED_MODE, set_led_mode, service_set_led_mode_schema
)
async def set_max_mod(call):
"""Set the max modulation level."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
gw_var = gw_vars.DATA_SLAVE_MAX_RELATIVE_MOD
level = call.data[ATTR_LEVEL]
if level == -1:
# Backend only clears setting on non-numeric values.
level = "-"
value = await gw_dev.gateway.set_max_relative_mod(level)
gw_dev.status.update({gw_var: value})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN, SERVICE_SET_MAX_MOD, set_max_mod, service_set_max_mod_schema
)
async def set_outside_temp(call):
"""Provide the outside temperature to the OpenTherm Gateway."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
gw_var = gw_vars.DATA_OUTSIDE_TEMP
value = await gw_dev.gateway.set_outside_temp(call.data[ATTR_TEMPERATURE])
gw_dev.status.update({gw_var: value})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN, SERVICE_SET_OAT, set_outside_temp, service_set_oat_schema
)
async def set_setback_temp(call):
"""Set the OpenTherm Gateway SetBack temperature."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
gw_var = gw_vars.OTGW_SB_TEMP
value = await gw_dev.gateway.set_setback_temp(call.data[ATTR_TEMPERATURE])
gw_dev.status.update({gw_var: value})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN, SERVICE_SET_SB_TEMP, set_setback_temp, service_set_sb_temp_schema
)
async def async_unload_entry(hass, entry):
"""Cleanup and disconnect from gateway."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
gateway = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][entry.data[CONF_ID]]
await gateway.cleanup()
return unload_ok
class OpenThermGatewayDevice:
"""OpenTherm Gateway device class."""
def __init__(self, hass, config_entry):
"""Initialize the OpenTherm Gateway."""
self.hass = hass
self.device_path = config_entry.data[CONF_DEVICE]
self.gw_id = config_entry.data[CONF_ID]
self.name = config_entry.data[CONF_NAME]
self.climate_config = config_entry.options
self.config_entry_id = config_entry.entry_id
self.status = {}
self.update_signal = f"{DATA_OPENTHERM_GW}_{self.gw_id}_update"
self.options_update_signal = f"{DATA_OPENTHERM_GW}_{self.gw_id}_options_update"
self.gateway = pyotgw.pyotgw()
self.gw_version = None
async def cleanup(self, event=None):
"""Reset overrides on the gateway."""
await self.gateway.set_control_setpoint(0)
await self.gateway.set_max_relative_mod("-")
await self.gateway.disconnect()
async def connect_and_subscribe(self):
"""Connect to serial device and subscribe report handler."""
self.status = await self.gateway.connect(self.hass.loop, self.device_path)
version_string = self.status[gw_vars.OTGW].get(gw_vars.OTGW_ABOUT)
self.gw_version = version_string[18:] if version_string else None
_LOGGER.debug(
"Connected to OpenTherm Gateway %s at %s", self.gw_version, self.device_path
)
dev_reg = await async_get_dev_reg(self.hass)
gw_dev = dev_reg.async_get_or_create(
config_entry_id=self.config_entry_id,
identifiers={(DOMAIN, self.gw_id)},
name=self.name,
manufacturer="Schelte Bron",
model="OpenTherm Gateway",
sw_version=self.gw_version,
)
if gw_dev.sw_version != self.gw_version:
dev_reg.async_update_device(gw_dev.id, sw_version=self.gw_version)
self.hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, self.cleanup)
async def handle_report(status):
"""Handle reports from the OpenTherm Gateway."""
_LOGGER.debug("Received report: %s", status)
self.status = status
async_dispatcher_send(self.hass, self.update_signal, status)
self.gateway.subscribe(handle_report)
|
|
""":mod:`sqlalchemy_imageattach.stores.s3` --- AWS S3_ backend storage
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The backend storage implementation for `Simple Storage Service <S3>`_
provided by `Amazon Web Services <AWS>`_.
.. _S3: http://aws.amazon.com/s3/
.. _AWS: http://aws.amazon.com/
"""
import base64
import calendar
import datetime
import email.utils
import hashlib
import hmac
import logging
try:
from urllib import request as urllib2
except ImportError:
import urllib2
from ..store import Store
from .fs import guess_extension
__all__ = ('BASE_URL_FORMAT', 'DEFAULT_MAX_AGE',
'S3Request', 'S3SandboxStore', 'S3Store')
#: (:class:`numbers.Integral`) The default ``max-age`` seconds of
#: :mailheader:`Cache-Control`. It's the default value of
#: :attr:`S3Store.max_age` attribute.
DEFAULT_MAX_AGE = 60 * 60 * 24 * 365
#: (:class:`str`) The format string of base url of AWS S3.
#: Contains no trailing slash.
#: Default is ``'https://{0}.s3.amazonaws.com'``.
BASE_URL_FORMAT = 'https://{0}.s3.amazonaws.com'
class S3Request(urllib2.Request):
"""HTTP request for S3 REST API which does authentication."""
logger = logging.getLogger(__name__ + '.S3Request')
def __init__(self, url, bucket, access_key, secret_key,
data=None, headers={}, method=None, content_type=None):
urllib2.Request.__init__(self, url, data=data, headers=headers)
self.bucket = bucket
self.access_key = access_key
self.secret_key = secret_key
self.method = method
if self.data is None:
self.content_md5 = ''
self.content_type = ''
else:
assert content_type
self.content_md5 = base64.b64encode(
hashlib.md5(self.data).digest()
).decode('ascii')
self.content_type = content_type
self.add_header('Content-md5', self.content_md5)
self.add_header('Content-type', content_type)
self.add_header('Content-length', len(self.data))
self.date = email.utils.formatdate(
calendar.timegm(datetime.datetime.utcnow().timetuple()),
usegmt=True
)
self.add_header('Date', self.date)
authorization = self.get_authorization()
self.logger.debug('get_authorization() = %r', authorization)
self.add_header('Authorization', authorization)
def get_method(self):
return self.method or urllib2.Request.get_method(self) or 'GET'
def get_path_with_query(self):
url = self.get_full_url()
return url[url.index('/', 8):]
def get_authorization(self):
return 'AWS {0}:{1}'.format(
self.access_key,
self.get_signature().decode('utf-8')
)
def get_signature(self):
sign = self.sign()
self.logger.debug('sign() = %r', sign)
d = hmac.new(
self.secret_key.encode('utf-8'),
sign.encode('utf-8'),
hashlib.sha1
)
return base64.b64encode(d.digest())
def sign(self):
return '\n'.join([
self.get_method().upper(),
self.content_md5,
self.content_type,
self.date,
self.canonicalize_headers() + self.canonicalize_resource()
])
def canonicalize_headers(self):
pairs = [(k.lower(), v)
for k, v in self.header_items()
if k.lower().startswith('x-amz-')]
pairs.sort(key=lambda pair: pair[0])
line = '{0}:{1}\n'.format
return ''.join(line(k, v) for k, v in pairs)
def canonicalize_resource(self):
# FIXME: query should be lexicographically sorted if multiple
return '/' + self.bucket + self.get_path_with_query()
class S3Store(Store):
"""Image storage backend implementation using S3_. It implements
:class:`~sqlalchemy_imageattach.store.Store` interface.
If you'd like to use it with Amazon CloudFront_, pass the base url of
the distribution to ``public_base_url``. Note that you should configure
*Forward Query Strings* to *Yes* when you create the distribution.
Because SQLAlchemy-ImageAttach will add query strings to public URLs
to invalidate cache when the image is updated.
:param bucket: the buckect name
:type bucket: :class:`basestring`
:type access_key: AWS access key for the bucket.
it can't be applied if ``name`` isn't string
:type access_key: :class:`basestring`
:type secret_key: AWS secret key for the bucket.
it can't be applied if ``name`` isn't string
:type secret_key: :class:`basestring`
:param max_age: the ``max-age`` seconds of :mailheader:`Cache-Control`.
default is :const:`DEFAULT_MAX_AGE`
:type max_age: :class:`numbers.Integral`
:param prefix: the optional key prefix to logically separate stores
with the same bucket. not used by default
:type prefix: :class:`basestring`
:param public_base_url: an optional url base for public urls.
useful when used with cdn
:type public_base_url: :class:`basestring`
.. versionchanged:: 0.8.1
Added ``public_base_url`` parameter.
.. _CloudFront: http://aws.amazon.com/cloudfront/
"""
logger = logging.getLogger(__name__ + '.S3Store')
#: (:class:`basestring`) The S3 bucket name.
bucket = None
#: (:class:`numbers.Integral`) The ``max-age`` seconds of
#: :mailheader:`Cache-Control`.
max_age = None
#: (:class:`basestring`) The optional key prefix to logically separate
#: stores with the same bucket.
prefix = None
#: (:class:`basestring`) The optional url base for public urls.
public_base_url = None
def __init__(self, bucket, access_key=None, secret_key=None,
max_age=DEFAULT_MAX_AGE, prefix='', public_base_url=None):
self.bucket = bucket
self.access_key = access_key
self.secret_key = secret_key
self.base_url = BASE_URL_FORMAT.format(bucket)
self.max_age = max_age
self.prefix = prefix.strip()
if self.prefix.endswith('/'):
self.prefix = self.prefix.rstrip('/')
if public_base_url is None:
self.public_base_url = self.base_url
elif public_base_url.endswith('/'):
self.public_base_url = public_base_url.rstrip('/')
else:
self.public_base_url = public_base_url
def get_key(self, object_type, object_id, width, height, mimetype):
key = '{0}/{1}/{2}x{3}{4}'.format(
object_type, object_id, width, height,
guess_extension(mimetype)
)
if self.prefix:
return '{0}/{1}'.format(self.prefix, key)
return key
def get_file(self, *args, **kwargs):
url = self.get_s3_url(*args, **kwargs)
request = self.make_request(url)
return urllib2.urlopen(request)
def get_s3_url(self, *args, **kwargs):
return '{0}/{1}'.format(
self.base_url,
self.get_key(*args, **kwargs)
)
def get_url(self, *args, **kwargs):
return '{0}/{1}'.format(
self.public_base_url,
self.get_key(*args, **kwargs)
)
def make_request(self, url, *args, **kwargs):
return S3Request(url, *args,
bucket=self.bucket,
access_key=self.access_key,
secret_key=self.secret_key,
**kwargs)
def upload_file(self, url, data, content_type, rrs, acl='public-read'):
headers = {
'Cache-Control': 'max-age=' + str(self.max_age),
'x-amz-acl': acl,
'x-amz-storage-class': 'REDUCED_REDUNDANCY' if rrs else 'STANDARD'
}
request = self.make_request(
url,
method='PUT',
data=data,
content_type=content_type,
headers=headers
)
while 1:
try:
urllib2.urlopen(request).read()
except urllib2.HTTPError as e:
if 400 <= e.code < 500:
self.logger.exception(e)
self.logger.debug(e.read())
raise
self.logger.debug(e)
continue
except IOError as e:
self.logger.debug(e)
continue
else:
break
def put_file(self, file, object_type, object_id, width, height, mimetype,
reproducible):
url = self.get_s3_url(object_type, object_id, width, height, mimetype)
self.upload_file(url, file.read(), mimetype, rrs=reproducible)
def delete_file(self, *args, **kwargs):
url = self.get_s3_url(*args, **kwargs)
request = self.make_request(url, method='DELETE')
urllib2.urlopen(request).read()
class S3SandboxStore(Store):
"""It stores images into physically two separated S3 buckets while
these look like logically exist in the same store. It takes two buckets
for *read-only* and *overwrite*: ``underlying`` and ``overriding``.
It's useful for development/testing purpose, because you can use
the production store in sandbox.
:param underlying: the name of *underlying* bucket for read-only
:type underlying: :class:`basestring`
:param overriding: the name of *overriding* bucket to record
overriding modifications
:type overriding: :class:`basestring`
:type access_key: AWS access key for the buckets.
it can't be applied if bucket names are not string
:type access_key: :class:`basestring`
:type secret_key: AWS secret key for the bucket.
it can't be applied if bucket names are not string
:type secret_key: :class:`basestring`
:param max_age: the ``max-age`` seconds of :mailheader:`Cache-Control`.
default is :const:`DEFAULT_MAX_AGE`
:type max_age: :class:`numbers.Integral`
:param overriding_prefix: means the same to :attr:`S3Store.prefix` but
it's only applied for ``overriding``
:type overriding_prefix: :class:`basestring`
:param underlying_prefix: means the same to :attr:`S3Store.prefix` but
it's only applied for ``underlying``
:type underlying_prefix: :class:`basestring`
"""
logger = logging.getLogger(__name__ + '.S3SandboxStore')
#: All keys marked as "deleted" have this mimetype as
#: its :mailheader:`Content-Type` header.
DELETED_MARK_MIMETYPE = \
'application/x-sqlalchemy-imageattach-sandbox-deleted'
#: (:class:`S3Store`) The *underlying* store for read-only.
underlying = None
#: (:class:`S3Store`) The *overriding* store to record overriding
#: modification.
overriding = None
def __init__(self, underlying, overriding,
access_key=None, secret_key=None, max_age=DEFAULT_MAX_AGE,
underlying_prefix='', overriding_prefix=''):
self.underlying = S3Store(underlying,
access_key=access_key, secret_key=secret_key,
max_age=max_age, prefix=underlying_prefix)
self.overriding = S3Store(overriding,
access_key=access_key, secret_key=secret_key,
max_age=max_age, prefix=overriding_prefix)
def get_file(self, *args, **kwargs):
try:
file_ = self.overriding.get_file(*args, **kwargs)
except IOError:
return self.underlying.get_file(*args, **kwargs)
if file_.info().get('Content-Type') == self.DELETED_MARK_MIMETYPE:
raise IOError('deleted')
return file_
def get_url(self, *args, **kwargs):
request = self.overriding.make_request(
self.overriding.get_url(*args, **kwargs),
method='HEAD'
)
store = self.overriding
try:
urllib2.urlopen(request)
except urllib2.HTTPError as e:
if e.code == 404:
store = self.underlying
return store.get_url(*args, **kwargs)
def put_file(self, *args, **kwargs):
self.overriding.put_file(*args, **kwargs)
def delete_file(self, object_type, object_id, width, height, mimetype):
args = object_type, object_id, width, height, mimetype
self.overriding.delete_file(*args)
url = self.overriding.get_s3_url(*args)
self.overriding.upload_file(
url,
data=b'',
content_type=self.DELETED_MARK_MIMETYPE,
rrs=True,
acl='private'
)
|
|
#
# Package analogous to 'threading.py' but using processes
#
# multiprocessing/__init__.py
#
# This package is intended to duplicate the functionality (and much of
# the API) of threading.py but uses processes instead of threads. A
# subpackage 'multiprocessing.dummy' has the same API but is a simple
# wrapper for 'threading'.
#
# Try calling `multiprocessing.doc.main()` to read the html
# documentation in in a webbrowser.
#
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__version__ = '0.70a1'
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array',
'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING',
]
__author__ = 'R. Oudkerk ([email protected])'
#
# Imports
#
import os
import sys
from multiprocessing.process import Process, current_process, active_children
from multiprocessing.util import SUBDEBUG, SUBWARNING
#
# Exceptions
#
class ProcessError(Exception):
pass
class BufferTooShort(ProcessError):
pass
class TimeoutError(ProcessError):
pass
class AuthenticationError(ProcessError):
pass
# This is down here because _multiprocessing uses BufferTooShort
import _multiprocessing
#
# Definitions not depending on native semaphores
#
def Manager():
'''
Returns a manager associated with a running server process
The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from multiprocessing.managers import SyncManager
m = SyncManager()
m.start()
return m
def Pipe(duplex=True):
'''
Returns two connection object connected by a pipe
'''
from multiprocessing.connection import Pipe
return Pipe(duplex)
def cpu_count():
'''
Returns the number of CPUs in the system
'''
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif 'bsd' in sys.platform or sys.platform == 'darwin':
comm = '/sbin/sysctl -n hw.ncpu'
if sys.platform == 'darwin':
comm = '/usr' + comm
try:
with os.popen(comm) as p:
num = int(p.read())
except ValueError:
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
raise NotImplementedError('cannot determine number of cpus')
def freeze_support():
'''
Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
from multiprocessing.forking import freeze_support
freeze_support()
def get_logger():
'''
Return package logger -- if it does not already exist then it is created
'''
from multiprocessing.util import get_logger
return get_logger()
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
from multiprocessing.util import log_to_stderr
return log_to_stderr(level)
def allow_connection_pickling():
'''
Install support for sending connections and sockets between processes
'''
from multiprocessing import reduction
#
# Definitions depending on native semaphores
#
def Lock():
'''
Returns a non-recursive lock object
'''
from multiprocessing.synchronize import Lock
return Lock()
def RLock():
'''
Returns a recursive lock object
'''
from multiprocessing.synchronize import RLock
return RLock()
def Condition(lock=None):
'''
Returns a condition object
'''
from multiprocessing.synchronize import Condition
return Condition(lock)
def Semaphore(value=1):
'''
Returns a semaphore object
'''
from multiprocessing.synchronize import Semaphore
return Semaphore(value)
def BoundedSemaphore(value=1):
'''
Returns a bounded semaphore object
'''
from multiprocessing.synchronize import BoundedSemaphore
return BoundedSemaphore(value)
def Event():
'''
Returns an event object
'''
from multiprocessing.synchronize import Event
return Event()
def Queue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import Queue
return Queue(maxsize)
def JoinableQueue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import JoinableQueue
return JoinableQueue(maxsize)
def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
'''
Returns a process pool object
'''
from multiprocessing.pool import Pool
return Pool(processes, initializer, initargs, maxtasksperchild)
def RawValue(typecode_or_type, *args):
'''
Returns a shared object
'''
from multiprocessing.sharedctypes import RawValue
return RawValue(typecode_or_type, *args)
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a shared array
'''
from multiprocessing.sharedctypes import RawArray
return RawArray(typecode_or_type, size_or_initializer)
def Value(typecode_or_type, *args, **kwds):
'''
Returns a synchronized shared object
'''
from multiprocessing.sharedctypes import Value
return Value(typecode_or_type, *args, **kwds)
def Array(typecode_or_type, size_or_initializer, **kwds):
'''
Returns a synchronized shared array
'''
from multiprocessing.sharedctypes import Array
return Array(typecode_or_type, size_or_initializer, **kwds)
#
#
#
if sys.platform == 'win32':
def set_executable(executable):
'''
Sets the path to a python.exe or pythonw.exe binary used to run
child processes on Windows instead of sys.executable.
Useful for people embedding Python.
'''
from multiprocessing.forking import set_executable
set_executable(executable)
__all__ += ['set_executable']
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from external.wip import work_in_progress
from rmgpy.molecule.adjlist import InvalidAdjacencyListError
from rmgpy.molecule.molecule import Molecule
from rmgpy.molecule.group import Group
import logging
logging.basicConfig(level=logging.DEBUG)
################################################################################
class TestGroupAdjLists(unittest.TestCase):
"""
Contains adjacency list unit tests of the Graph class.
"""
def setUp(self):
pass
def testFromOldAdjacencyList1(self):
"""
adjlist: Test the Group.fromAdjacencyList() method on an old style adjacency list.
"""
adjlist = """
1 *2 {Cs,Cd} 0 {2,{S,D}} {3,S}
2 *1 {Os,Od} 0 {1,{S,D}}
3 R!H {0,1} {1,S}
"""
group = Group().fromAdjacencyList(adjlist)
atom1, atom2, atom3 = group.atoms
self.assertTrue(group.hasBond(atom1, atom2))
self.assertTrue(group.hasBond(atom1, atom3))
self.assertFalse(group.hasBond(atom2, atom3))
bond12 = atom1.bonds[atom2]
bond13 = atom1.bonds[atom3]
self.assertTrue(atom1.label == '*2')
self.assertTrue(atom1.atomType[0].label in ['Cs', 'Cd'])
self.assertTrue(atom1.atomType[1].label in ['Cs', 'Cd'])
self.assertTrue(atom1.radicalElectrons == [0])
self.assertTrue(atom2.label == '*1')
self.assertTrue(atom2.atomType[0].label in ['Os', 'Od'])
self.assertTrue(atom2.atomType[1].label in ['Os', 'Od'])
self.assertTrue(atom2.radicalElectrons == [0])
self.assertTrue(atom3.label == '')
self.assertTrue(atom3.atomType[0].label == 'R!H')
self.assertTrue(atom3.radicalElectrons == [0, 1])
self.assertTrue(bond12.order == [1,2])
self.assertTrue(bond13.isSingle())
def testFromAdjacencyList(self):
"""
adjlist: Test the Group.fromAdjacencyList() method.
"""
adjlist = """
1 *2 [Cs,Cd] u0 {2,[S,D]} {3,S}
2 *1 [Os,Od] u0 {1,[S,D]}
3 R!H u0 {1,S}
"""
group = Group().fromAdjacencyList(adjlist)
atom1, atom2, atom3 = group.atoms
self.assertTrue(group.hasBond(atom1, atom2))
self.assertTrue(group.hasBond(atom1, atom3))
self.assertFalse(group.hasBond(atom2, atom3))
bond12 = atom1.bonds[atom2]
bond13 = atom1.bonds[atom3]
self.assertTrue(atom1.label == '*2')
self.assertTrue(atom1.atomType[0].label in ['Cs', 'Cd'])
self.assertTrue(atom1.atomType[1].label in ['Cs', 'Cd'])
self.assertTrue(atom1.radicalElectrons == [0])
self.assertTrue(atom2.label == '*1')
self.assertTrue(atom2.atomType[0].label in ['Os', 'Od'])
self.assertTrue(atom2.atomType[1].label in ['Os', 'Od'])
self.assertTrue(atom2.radicalElectrons == [0])
self.assertTrue(atom3.label == '')
self.assertTrue(atom3.atomType[0].label == 'R!H')
self.assertTrue(atom3.radicalElectrons == [0])
self.assertTrue(bond12.order == [1, 2])
self.assertTrue(bond13.isSingle())
def testFromAdjacencyList_multiplicity(self):
gp = Group().fromAdjacencyList(
"""
multiplicity [1]
1 C u0 p0 c0
"""
)
self.assertEqual(len(gp.multiplicity), 1)
self.assertEqual(gp.multiplicity[0], 1)
def testFromAdjacencyList_multiplicity_list(self):
gp = Group().fromAdjacencyList(
"""
multiplicity [ 1, 3, 5 ]
1 C u0 p0 c0
"""
)
self.assertEqual(len(gp.multiplicity), 3)
self.assertEqual(gp.multiplicity[0], 1)
self.assertEqual(gp.multiplicity[1], 3)
self.assertEqual(gp.multiplicity[2], 5)
def testToAdjacencyList(self):
"""
adjlist: Test the Group.toAdjacencyList() method.
"""
adjlist = """
1 *2 [Cs,Cd] u0 {2,[S,D]} {3,S}
2 *1 [Os,Od] u0 {1,[S,D]}
3 R!H u0 {1,S}
"""
group = Group().fromAdjacencyList(adjlist)
adjlist2 = group.toAdjacencyList()
self.assertEqual(adjlist.strip(), adjlist2.strip())
class TestMoleculeAdjLists(unittest.TestCase):
"""
adjlist: Contains adjacency list unit tests of the Molecule class.
"""
def setUp(self):
pass
def testFromAdjacencyList1(self):
"""
adjlist: Test the Molecule.fromAdjacencyList() method 1.
"""
# molecule 1
adjlist = """
1 *1 C u1 p0 c0 {2,S} {3,S} {4,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}
4 *2 N u0 p0 c+1 {1,S} {5,S} {6,D}
5 O u0 p3 c-1 {4,S}
6 O u0 p2 c0 {4,D}
"""
molecule = Molecule().fromAdjacencyList(adjlist)
self.assertTrue(molecule.multiplicity == 2)
atom1 = molecule.atoms[0]
atom2 = molecule.atoms[3]
atom3 = molecule.atoms[4]
atom4 = molecule.atoms[5]
self.assertTrue(molecule.hasBond(atom2, atom1))
self.assertTrue(molecule.hasBond(atom2, atom3))
self.assertTrue(molecule.hasBond(atom2, atom4))
self.assertFalse(molecule.hasBond(atom1, atom3))
self.assertFalse(molecule.hasBond(atom1, atom4))
bond21 = atom2.bonds[atom1]
bond23 = atom2.bonds[atom3]
bond24 = atom2.bonds[atom4]
self.assertTrue(atom1.label == '*1')
self.assertTrue(atom1.element.symbol == 'C')
self.assertTrue(atom1.radicalElectrons == 1)
self.assertTrue(atom1.charge == 0)
self.assertTrue(atom2.label == '*2')
self.assertTrue(atom2.element.symbol == 'N')
self.assertTrue(atom2.radicalElectrons == 0)
self.assertTrue(atom2.charge == 1)
self.assertTrue(atom3.label == '')
self.assertTrue(atom3.element.symbol == 'O')
self.assertTrue(atom3.radicalElectrons == 0)
self.assertTrue(atom3.charge == -1)
self.assertTrue(atom4.label == '')
self.assertTrue(atom4.element.symbol == 'O')
self.assertTrue(atom4.radicalElectrons == 0)
self.assertTrue(atom4.charge == 0)
self.assertTrue(bond21.isSingle())
self.assertTrue(bond23.isSingle())
self.assertTrue(bond24.isDouble())
def testFromAdjacencyList2(self):
"""
adjlist: Test the Molecule.fromAdjacencyList() method 2.
"""
# molecule 2
adjlist = """
1 *1 C u1 {2,S} {3,S} {4,S}
2 H u0 {1,S}
3 H u0 {1,S}
4 *2 N u0 p0 c+1 {1,S} {5,S} {6,D}
5 O u0 p3 c-1 {4,S}
6 O u0 p2 {4,D}
"""
molecule = Molecule().fromAdjacencyList(adjlist)
self.assertTrue(molecule.multiplicity == 2)
atom1 = molecule.atoms[0]
atom2 = molecule.atoms[3]
atom3 = molecule.atoms[4]
atom4 = molecule.atoms[5]
self.assertTrue(molecule.hasBond(atom2, atom1))
self.assertTrue(molecule.hasBond(atom2, atom3))
self.assertTrue(molecule.hasBond(atom2, atom4))
self.assertFalse(molecule.hasBond(atom1, atom3))
self.assertFalse(molecule.hasBond(atom1, atom4))
bond21 = atom2.bonds[atom1]
bond23 = atom2.bonds[atom3]
bond24 = atom2.bonds[atom4]
self.assertTrue(atom1.label == '*1')
self.assertTrue(atom1.element.symbol == 'C')
self.assertTrue(atom1.radicalElectrons == 1)
self.assertTrue(atom1.charge == 0)
self.assertTrue(atom2.label == '*2')
self.assertTrue(atom2.element.symbol == 'N')
self.assertTrue(atom2.radicalElectrons == 0)
self.assertTrue(atom2.charge == 1)
self.assertTrue(atom3.label == '')
self.assertTrue(atom3.element.symbol == 'O')
self.assertTrue(atom3.radicalElectrons == 0)
self.assertTrue(atom3.charge == -1)
self.assertTrue(atom4.label == '')
self.assertTrue(atom4.element.symbol == 'O')
self.assertTrue(atom4.radicalElectrons == 0)
self.assertTrue(atom4.charge == 0)
self.assertTrue(bond21.isSingle())
self.assertTrue(bond23.isSingle())
self.assertTrue(bond24.isDouble())
def testFromAdjacencyList3(self):
"""
adjlist: Test the Molecule.fromAdjacencyList() method 3.
"""
# molecule 3
adjlist = """
1 *1 C u1 {2,S} {3,S} {4,S}
2 H u0 {1,S}
3 H u0 {1,S}
4 *2 N u0 p0 c+1 {1,S} {5,S} {6,D}
5 O u0 p3 c-1 {4,S}
6 O u0 p2 {4,D}
"""
molecule = Molecule().fromAdjacencyList(adjlist)
self.assertTrue(molecule.multiplicity == 2)
atom1 = molecule.atoms[0]
atom2 = molecule.atoms[3]
atom3 = molecule.atoms[4]
atom4 = molecule.atoms[5]
self.assertTrue(molecule.hasBond(atom2, atom1))
self.assertTrue(molecule.hasBond(atom2, atom3))
self.assertTrue(molecule.hasBond(atom2, atom4))
self.assertFalse(molecule.hasBond(atom1, atom3))
self.assertFalse(molecule.hasBond(atom1, atom4))
bond21 = atom2.bonds[atom1]
bond23 = atom2.bonds[atom3]
bond24 = atom2.bonds[atom4]
self.assertTrue(atom1.label == '*1')
self.assertTrue(atom1.element.symbol == 'C')
self.assertTrue(atom1.radicalElectrons == 1)
self.assertTrue(atom1.charge == 0)
self.assertTrue(atom2.label == '*2')
self.assertTrue(atom2.element.symbol == 'N')
self.assertTrue(atom2.radicalElectrons == 0)
self.assertTrue(atom2.charge == 1)
self.assertTrue(atom3.label == '')
self.assertTrue(atom3.element.symbol == 'O')
self.assertTrue(atom3.radicalElectrons == 0)
self.assertTrue(atom3.charge == -1)
self.assertTrue(atom4.label == '')
self.assertTrue(atom4.element.symbol == 'O')
self.assertTrue(atom4.radicalElectrons == 0)
self.assertTrue(atom4.charge == 0)
self.assertTrue(bond21.isSingle())
self.assertTrue(bond23.isSingle())
self.assertTrue(bond24.isDouble())
def testFromAdjacencyList4(self):
"""
adjlist: Test the Molecule.fromAdjacencyList() method 4.
"""
# molecule 4
adjlist = """
1 *1 C u1 {2,S}
2 *2 N u0 p0 c+1 {1,S} {3,S} {4,D}
3 O u0 p3 c-1 {2,S}
4 O u0 p2 {2,D}
"""
molecule = Molecule().fromAdjacencyList(adjlist, saturateH=True)
self.assertTrue(molecule.multiplicity == 2)
atom1 = molecule.atoms[0]
atom2 = molecule.atoms[1]
atom3 = molecule.atoms[2]
atom4 = molecule.atoms[3]
self.assertTrue(molecule.hasBond(atom2, atom1))
self.assertTrue(molecule.hasBond(atom2, atom3))
self.assertTrue(molecule.hasBond(atom2, atom4))
self.assertFalse(molecule.hasBond(atom1, atom3))
self.assertFalse(molecule.hasBond(atom1, atom4))
bond21 = atom2.bonds[atom1]
bond23 = atom2.bonds[atom3]
bond24 = atom2.bonds[atom4]
self.assertTrue(atom1.label == '*1')
self.assertTrue(atom1.element.symbol == 'C')
self.assertTrue(atom1.radicalElectrons == 1)
self.assertTrue(atom1.charge == 0)
self.assertTrue(atom2.label == '*2')
self.assertTrue(atom2.element.symbol == 'N')
self.assertTrue(atom2.radicalElectrons == 0)
self.assertTrue(atom2.charge == 1)
self.assertTrue(atom3.label == '')
self.assertTrue(atom3.element.symbol == 'O')
self.assertTrue(atom3.radicalElectrons == 0)
self.assertTrue(atom3.charge == -1)
self.assertTrue(atom4.label == '')
self.assertTrue(atom4.element.symbol == 'O')
self.assertTrue(atom4.radicalElectrons == 0)
self.assertTrue(atom4.charge == 0)
self.assertTrue(bond21.isSingle())
self.assertTrue(bond23.isSingle())
self.assertTrue(bond24.isDouble())
def testFromAdjacencyList5(self):
"""
adjlist: Test if fromAdjacencyList works when saturateH is turned on
and test molecule is fused aromatics.
"""
# molecule 5
adjlist = """
1 * C u0 p0 c0 {2,B} {3,B} {4,B}
2 C u0 p0 c0 {1,B} {5,B} {6,B}
3 C u0 p0 c0 {1,B} {8,B} {13,S}
4 C u0 p0 c0 {1,B} {9,B}
5 C u0 p0 c0 {2,B} {10,B}
6 C u0 p0 c0 {2,B} {7,B}
7 C u0 p0 c0 {6,B} {8,B} {11,S}
8 C u0 p0 c0 {3,B} {7,B} {12,S}
9 C u0 p0 c0 {4,B} {10,B}
10 C u0 p0 c0 {5,B} {9,B}
11 H u0 p0 c0 {7,S}
12 H u0 p0 c0 {8,S}
13 H u0 p0 c0 {3,S}
"""
molecule = Molecule().fromAdjacencyList(adjlist, saturateH=True)
self.assertTrue(molecule.multiplicity == 1)
atom1 = molecule.atoms[0]
atom2 = molecule.atoms[1]
atom3 = molecule.atoms[2]
atom7 = molecule.atoms[6]
atom11 = molecule.atoms[10]
bond21 = atom2.bonds[atom1]
bond13 = atom1.bonds[atom3]
bond7_11 = atom7.bonds[atom11]
self.assertTrue(atom1.label == '*')
self.assertTrue(atom1.element.symbol == 'C')
self.assertTrue(atom1.radicalElectrons == 0)
self.assertTrue(atom1.charge == 0)
self.assertTrue(atom2.label == '')
self.assertTrue(atom2.element.symbol == 'C')
self.assertTrue(atom2.radicalElectrons == 0)
self.assertTrue(atom2.charge == 0)
self.assertTrue(bond21.isBenzene())
self.assertTrue(bond13.isBenzene())
self.assertTrue(bond7_11.isSingle())
def testVariousSpinAdjlists(self):
"""
adjlist: Test that molecules with old or intermediate adjacency list formats containing unusual
spin states can get converted to the proper new adjlist format.
"""
adjlist_2S = """
1 C 2S 0 {2,S} {3,S}
2 H 0 0 {1,S}
3 H 0 0 {1,S}
"""
adjlist_2S_new ="""
1 C u0 p1 c0 {2,S} {3,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}
"""
mol_2S = Molecule().fromAdjacencyList(adjlist_2S)
mol_2S_new = Molecule().fromAdjacencyList(adjlist_2S_new)
self.assertTrue(mol_2S.isIsomorphic(mol_2S_new))
adjlist_2T = """
1 C 2T 0 {2,S} {3,S}
2 H 0 0 {1,S}
3 H 0 0 {1,S}
"""
adjlist_2T_new ="""
1 C u2 p0 c0 {2,S} {3,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}
"""
mol_2T = Molecule().fromAdjacencyList(adjlist_2T)
mol_2T_new = Molecule().fromAdjacencyList(adjlist_2T_new)
self.assertTrue(mol_2T.isIsomorphic(mol_2T_new))
adjlist_3D = """
1 C 3D 0 {2,S}
2 H 0 0 {1,S}
"""
adjlist_3D_new = """
1 C u1 p1 c0 {2,S}
2 H u0 p0 c0 {1,S}
"""
mol_3D = Molecule().fromAdjacencyList(adjlist_3D)
mol_3D_new = Molecule().fromAdjacencyList(adjlist_3D_new)
self.assertTrue(mol_3D.isIsomorphic(mol_3D_new))
adjlist_3Q = """
1 N 3Q 1
"""
adjlist_3Q_new = """
1 N u3 p1 c0
"""
mol_3Q = Molecule().fromAdjacencyList(adjlist_3Q)
mol_3Q_new = Molecule().fromAdjacencyList(adjlist_3Q_new)
self.assertTrue(mol_3Q.isIsomorphic(mol_3Q_new))
adjlist_4S = """
1 C 4S 0
"""
adjlist_4S_new = """
1 C u0 p2 c0
"""
mol_4S = Molecule().fromAdjacencyList(adjlist_4S)
mol_4S_new = Molecule().fromAdjacencyList(adjlist_4S_new)
self.assertTrue(mol_4S.isIsomorphic(mol_4S_new))
adjlist_4T = """
1 C 4T 0
"""
adjlist_4T_new = """
1 C u2 p1 c0
"""
mol_4T = Molecule().fromAdjacencyList(adjlist_4T)
mol_4T_new = Molecule().fromAdjacencyList(adjlist_4T_new)
self.assertTrue(mol_4T.isIsomorphic(mol_4T_new))
adjlist_4V = """
1 C 4V 0
"""
adjlist_4V_new ="""
1 C u4 p0 c0
"""
mol_4V = Molecule().fromAdjacencyList(adjlist_4V)
mol_4V_new = Molecule().fromAdjacencyList(adjlist_4V_new)
self.assertTrue(mol_4V.isIsomorphic(mol_4V_new))
def testWildcardAdjlists(self):
"""
adjlist: Test that molecule adjlists containing wildcards raise an InvalidAdjacencyListError.
"""
# A molecule with a wildcard assignment
wildcardAdjlist1 = "1 C u1 px c0"
wildcardAdjlist2 = "1 C ux p2 c0"
wildcardAdjlist3 = "1 C u1 p2 cx"
wildcardAdjlist4 = "1 [C,N] u1 p2 c0"
with self.assertRaises(InvalidAdjacencyListError):
Molecule().fromAdjacencyList(wildcardAdjlist1)
with self.assertRaises(InvalidAdjacencyListError):
Molecule().fromAdjacencyList(wildcardAdjlist2)
with self.assertRaises(InvalidAdjacencyListError):
Molecule().fromAdjacencyList(wildcardAdjlist3)
with self.assertRaises(InvalidAdjacencyListError):
Molecule().fromAdjacencyList(wildcardAdjlist4)
def testIncorrectAdjlists(self):
"""
adjlist: Test that improperly formed adjlists raise an InvalidAdjacencyListError.
"""
# Carbon with 1 radical and 3 lone pairs = 7 total electrons. Should have -3 charge but doesn't
adjlist1 = "1 C u1 p3 c0"
with self.assertRaises(InvalidAdjacencyListError):
Molecule().fromAdjacencyList(adjlist1)
def testHelium(self):
"""
adjlist: Test that the adjlist reading and writing works with Helium.
"""
smiles = '[He]'
inchi = 'InChI=1S/He'
adjlist = '1 He u0 p1 c0'
adjlist_old = '1 He 0'
adjlist_intermediate = '1 He 0 1'
mol_smiles = Molecule().fromSMILES(smiles)
mol_inchi = Molecule().fromInChI(inchi)
mol = Molecule().fromAdjacencyList(adjlist)
mol_old = Molecule().fromAdjacencyList(adjlist_old)
mol_intermediate = Molecule().fromAdjacencyList(adjlist_intermediate)
# Isomorphic check
self.assertTrue(mol_smiles.isIsomorphic(mol))
self.assertTrue(mol_smiles.isIsomorphic(mol_inchi))
self.assertTrue(mol_smiles.isIsomorphic(mol_old))
self.assertTrue(mol_smiles.isIsomorphic(mol_intermediate))
# Adjlist check
self.assertEqual(mol_smiles.toAdjacencyList().strip(), adjlist)
self.assertEqual(mol_inchi.toAdjacencyList().strip(), adjlist)
self.assertEqual(mol.toAdjacencyList().strip(), adjlist)
self.assertEqual(mol_old.toAdjacencyList().strip(), adjlist)
self.assertEqual(mol_intermediate.toAdjacencyList().strip(), adjlist)
self.assertEqual(mol.toSMILES(),smiles)
self.assertEqual(mol.toInChI(),'InChI=1S/He')
def testToAdjacencyList(self):
"""
adjlist: Test the Molecule.toAdjacencyList() method.
"""
inter_adjlist = """
1 *1 C 1 0 {2,S} {3,S} {4,S}
2 H 0 0 {1,S}
3 H 0 0 {1,S}
4 *2 N 0 0 {1,S} {5,S} {6,D}
5 O 0 3 {4,S}
6 O 0 2 {4,D}
"""
adjlist = """
1 *1 C u1 p0 c0 {2,S} {3,S} {4,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}
4 *2 N u0 p0 c+1 {1,S} {5,S} {6,D}
5 O u0 p3 c-1 {4,S}
6 O u0 p2 c0 {4,D}
"""
molecule = Molecule().fromAdjacencyList(adjlist)
molecule2 = Molecule().fromAdjacencyList(inter_adjlist)
adjlist_1 = molecule.toAdjacencyList(removeH=False)
self.assertEqual(adjlist_1,molecule2.toAdjacencyList())
newMolecule = Molecule().fromAdjacencyList(adjlist_1)
self.assertTrue(molecule.isIsomorphic(newMolecule))
def testToAdjacencyListForNonIntegerBonds(self):
"""
Test the adjacency list can be created for molecules with bond orders
that don't fit into single, double, triple, or benzene
"""
from rmgpy.molecule.molecule import Atom, Bond, Molecule
atom1 = Atom(element='H',lonePairs=0)
atom2 = Atom(element='H',lonePairs=0)
bond = Bond(atom1, atom2, 0.5)
mol = Molecule(multiplicity=1)
mol.addAtom(atom1)
mol.addAtom(atom2)
mol.addBond(bond)
adjlist = mol.toAdjacencyList()
self.assertIn('H', adjlist)
self.assertIn('{1,0.5}',adjlist)
@work_in_progress
def testFromAdjacencyListForNonIntegerBonds(self):
"""
Test molecule can be created from the adjacency list for molecules with bond orders
that don't fit into single, double, triple, or benzene.
This test is a work in progress since currently reading one of these
objects thows an `InvalidAdjacencyListError`. Since the number radical
electrons is an integer, having fractional bonds leads to this error.
Fixing it would require switching radical electrons to floats.
"""
from rmgpy.molecule.molecule import Molecule
adjlist = """
1 H u1 p2 c0 {2,0.5}
2 H u1 p2 c0 {1,0.5}
"""
mol = Molecule().fromAdjacencyList(adjlist)
atom0 = mol.atoms[0]
atoms, bonds = atom0.bonds.items()
self.assertAlmostEqual(bonds[0].getOrderNum(), 0.5)
def testFromIntermediateAdjacencyList1(self):
"""
Test we can read an intermediate style adjacency list with implicit hydrogens 1
"""
adjList = """
1 O 0 2
""" # should be Water
molecule = Molecule().fromAdjacencyList(adjList, saturateH=True)
self.assertEqual(molecule.getFormula(), 'H2O')
def testFromOldAdjacencyList1(self):
"""
Test we can read an old style adjacency list with implicit hydrogens 1
"""
adjList = """
1 O 0
""" # should be Water
molecule = Molecule().fromAdjacencyList(adjList)
self.assertEqual(molecule.getFormula(), 'H2O')
def testFromOldAdjacencyList2(self):
"""
Test we can read an old style adjacency list with implicit hydrogens 2
"""
adjlist = """
1 C 2S
"""
adjlist_new = """
1 C u0 p1 c0 {2,S} {3,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}
"""
molecule = Molecule().fromAdjacencyList(adjlist)
molecule_new = Molecule().fromAdjacencyList(adjlist_new)
self.assertTrue(molecule.isIsomorphic(molecule_new))
def testFromOldAdjacencyList3(self):
"""
Test we can read an old style adjacency list with implicit hydrogens 3
"""
adjlist = """
1 C 0
"""
adjlist_new = """
1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {1,S}
5 H u0 p0 c0 {1,S}
"""
molecule = Molecule().fromAdjacencyList(adjlist)
molecule_new = Molecule().fromAdjacencyList(adjlist_new)
self.assertTrue(molecule.isIsomorphic(molecule_new))
def testFromOldAdjacencyList4(self):
"""
Test we can read an old style adjacency list with implicit hydrogens 4
"""
adjlist = """
1 O 2S
"""
adjlist_new = """
1 O u0 p3 c0
"""
molecule = Molecule().fromAdjacencyList(adjlist)
molecule_new = Molecule().fromAdjacencyList(adjlist_new)
self.assertTrue(molecule.isIsomorphic(molecule_new))
@work_in_progress
def testFromOldAdjacencyList5(self):
"""
Test we can read an old style adjacency list with implicit hydrogens 5
"""
adjlist = """
1 C 2S {2,T}
2 O 2S {1,T}
"""
adjlist_new = """
1 C u0 p1 c-1 {2,T}
2 O u0 p1 c+1 {1,T}
"""
molecule = Molecule().fromAdjacencyList(adjlist)
molecule_new = Molecule().fromAdjacencyList(adjlist_new)
self.assertTrue(molecule.isIsomorphic(molecule_new))
# Currently the fromOldAdjacencyList cannot correctly interpret CO written in this old form
# (I don't think any adjlists are actually formed this way.)
# Currently 'adjlist' will fail when the Molecule is determined to be non-neurtral in net charge.
def testFromOldAdjacencyList6(self):
"""
Test we can read an old style adjacency list with implicit hydrogens 1
"""
adjlist = """
1 C 4T
"""
adjlist_new = """
1 C u2 p1 c0
"""
molecule = Molecule().fromAdjacencyList(adjlist)
molecule_new = Molecule().fromAdjacencyList(adjlist_new)
self.assertTrue(molecule.isIsomorphic(molecule_new))
def testAdjacencyList(self):
"""
adjlist: Check the adjacency list read/write functions for a full molecule.
"""
molecule1 = Molecule().fromAdjacencyList("""
1 C u0 {2,D} {7,S} {8,S}
2 C u0 {1,D} {3,S} {9,S}
3 C u0 {2,S} {4,D} {10,S}
4 C u0 {3,D} {5,S} {11,S}
5 C u1 {4,S} {6,S} {12,S}
6 C u0 {5,S} {13,S} {14,S} {15,S}
7 H u0 {1,S}
8 H u0 {1,S}
9 H u0 {2,S}
10 H u0 {3,S}
11 H u0 {4,S}
12 H u0 {5,S}
13 H u0 {6,S}
14 H u0 {6,S}
15 H u0 {6,S}
""")
molecule2 = Molecule().fromSMILES('C=CC=C[CH]C')
self.assertTrue(molecule1.isIsomorphic(molecule2))
self.assertTrue(molecule2.isIsomorphic(molecule1))
#Test that charges are correctly stored and written with adjacency lists
adjlist3 = """
1 C u0 p1 c-1 {2,T}
2 O u0 p1 c+1 {1,T}
"""
molecule3 = Molecule().fromAdjacencyList(adjlist3)
self.assertEquals(molecule3.atoms[0].charge, -1)
self.assertEquals(molecule3.atoms[1].charge, 1)
adjlist4 = molecule3.toAdjacencyList()
self.assertEquals(adjlist3.strip(), adjlist4.strip())
def testGroupAdjacencyList(self):
"""
adjlist: Check the adjacency list read/write functions for a full molecule.
"""
adjlist = """1 C u0 {2,D}
2 O u1 p1 c[-1,0,+1] {1,D}
"""
group = Group().fromAdjacencyList("""
1 C u0 {2,D}
2 O u1 p1 c[-1,0,+1] {1,D}
""")
self.assertEqual(adjlist, group.toAdjacencyList())
def testToOldAjacencyList(self):
"""
adjlist: Check that we can convert back to old style adjacency list
"""
molecule2 = Molecule().fromSMILES('C=CC=C[CH]C')
string = """1 C 0 {2,D}
2 C 0 {1,D} {3,S}
3 C 0 {2,S} {4,D}
4 C 0 {3,D} {5,S}
5 C 1 {4,S} {6,S}
6 C 0 {5,S}"""
self.assertEqual(molecule2.toAdjacencyList(removeH=True,oldStyle=True).strip(),string.strip())
################################################################################
class TestConsistencyChecker(unittest.TestCase):
def test_check_hund_rule_fail(self):
with self.assertRaises(InvalidAdjacencyListError):
Molecule().fromAdjacencyList("""
multiplicity 1
1 C u2 p0 c0
""", saturateH=True)
def test_check_hund_rule_success(self):
try:
Molecule().fromAdjacencyList("""
multiplicity 3
1 C u2 p0 c0
""", saturateH=True)
except InvalidAdjacencyListError:
self.fail('InvalidAdjacencyListError thrown unexpectedly!')
def test_check_multiplicity(self):
"""
adjlist: Check that RMG allows different electron spins in the same molecule with multiplicity = 2s + 1
"""
# [N] radical:
try:
Molecule().fromAdjacencyList('''multiplicity 4
1 N u3 p1 c0''')
except InvalidAdjacencyListError:
self.fail('InvalidAdjacencyListError thrown unexpectedly for N tri-rad!')
# A general molecule with 4 radicals, multiplicity 5:
try:
Molecule().fromAdjacencyList('''multiplicity 5
1 O u1 p2 c0 {2,S}
2 C u1 p0 c0 {1,S} {3,S} {4,S}
3 H u0 p0 c0 {2,S}
4 N u1 p1 c0 {2,S} {5,S}
5 O u1 p2 c0 {4,S}''')
except InvalidAdjacencyListError:
self.fail('InvalidAdjacencyListError thrown unexpectedly for a molecule with 4 radicals, multiplicity 5')
# A general molecule with 4 radicals, multiplicity 3:
try:
Molecule().fromAdjacencyList('''multiplicity 3
1 O u1 p2 c0 {2,S}
2 C u1 p0 c0 {1,S} {3,S} {4,S}
3 H u0 p0 c0 {2,S}
4 N u1 p1 c0 {2,S} {5,S}
5 O u1 p2 c0 {4,S}''')
except InvalidAdjacencyListError:
self.fail('InvalidAdjacencyListError thrown unexpectedly for a molecule with 4 radicals, multiplicity 3')
# [N]=C=[N] singlet:
try:
Molecule().fromAdjacencyList('''multiplicity 1
1 N u1 p1 c0 {2,D}
2 C u0 p0 c0 {1,D} {3,D}
3 N u1 p1 c0 {2,D}''')
except InvalidAdjacencyListError:
self.fail('InvalidAdjacencyListError thrown unexpectedly for singlet [N]=C=[N]!')
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(verbosity=3))
|
|
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <[email protected]> (c) 2017-2022
# ryanss <[email protected]> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
import warnings
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, MO, FR
from holidays.constants import JAN, MAR, APR, MAY, JUN, JUL, AUG, OCT, NOV, DEC
from holidays.constants import MON, TUE, WED, THU, FRI, SAT, SUN, WEEKEND
from holidays.holiday_base import HolidayBase
class UnitedKingdom(HolidayBase):
# https://en.wikipedia.org/wiki/Public_holidays_in_the_United_Kingdom
# This class is extended by other countries (Ireland, Isle of Man, ...)
# It must be taken into account when adding or modifying holidays.
# Look at _country_specific() method for country specific behavior.
country = "UK"
def __init__(self, **kwargs):
# default state to UK
if "state" not in kwargs:
kwargs["state"] = "UK"
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# New Year's Day
if year >= 1974:
name = "New Year's Day"
self[date(year, JAN, 1)] = name
if self.observed and date(year, JAN, 1).weekday() == SUN:
self[date(year, JAN, 1) + rd(days=+1)] = name + " (Observed)"
elif self.observed and date(year, JAN, 1).weekday() == SAT:
self[date(year, JAN, 1) + rd(days=+2)] = name + " (Observed)"
# New Year Holiday
if self.state in ("UK", "Scotland"):
name = "New Year Holiday"
if self.state == "UK":
name += " [Scotland]"
self[date(year, JAN, 2)] = name
if self.observed and date(year, JAN, 2).weekday() in WEEKEND:
self[date(year, JAN, 2) + rd(days=+2)] = name + " (Observed)"
elif self.observed and date(year, JAN, 2).weekday() == MON:
self[date(year, JAN, 2) + rd(days=+1)] = name + " (Observed)"
# St. Patrick's Day
if self.state in ("UK", "Northern Ireland"):
name = "St. Patrick's Day"
if self.state == "UK":
name += " [Northern Ireland]"
self[date(year, MAR, 17)] = name
if self.observed and date(year, MAR, 17).weekday() in WEEKEND:
self[date(year, MAR, 17) + rd(weekday=MO)] = (
name + " (Observed)"
)
# TT bank holiday (first Friday in June)
if self.state == "Isle of Man":
self[date(year, JUN, 1) + rd(weekday=FR)] = "TT Bank Holiday"
# Tynwald Day
if self.state == "Isle of Man":
self[date(year, JUL, 5)] = "Tynwald Day"
# Battle of the Boyne
if self.state in ("UK", "Northern Ireland"):
name = "Battle of the Boyne"
if self.state == "UK":
name += " [Northern Ireland]"
self[date(year, JUL, 12)] = name
# Summer bank holiday (first Monday in August)
if self.state in ("UK", "Scotland"):
name = "Summer Bank Holiday"
if self.state == "UK":
name += " [Scotland]"
self[date(year, AUG, 1) + rd(weekday=MO)] = name
# St. Andrew's Day
if self.state in ("UK", "Scotland"):
name = "St. Andrew's Day"
if self.state == "UK":
name += " [Scotland]"
self[date(year, NOV, 30)] = name
# Christmas Day
name = "Christmas Day"
self[date(year, DEC, 25)] = name
if self.observed and date(year, DEC, 25).weekday() == SAT:
self[date(year, DEC, 27)] = name + " (Observed)"
elif self.observed and date(year, DEC, 25).weekday() == SUN:
self[date(year, DEC, 27)] = name + " (Observed)"
# Overwrite to modify country specific holidays
self._country_specific(year)
def _country_specific(self, year):
# UnitedKingdom exclusive holidays
# Good Friday
self[easter(year) + rd(weekday=FR(-1))] = "Good Friday"
# Easter Monday
if self.state != "Scotland":
name = "Easter Monday"
if self.state == "UK":
name += " [England/Wales/Northern Ireland]"
self[easter(year) + rd(weekday=MO)] = name
# May Day bank holiday (first Monday in May)
if year >= 1978:
name = "May Day"
if year == 2020:
# Moved to Friday to mark 75th anniversary of VE Day.
self[date(year, MAY, 8)] = name
else:
if year == 1995:
dt = date(year, MAY, 8)
else:
dt = date(year, MAY, 1)
if dt.weekday() == MON:
self[dt] = name
elif dt.weekday() == TUE:
self[dt + rd(days=+6)] = name
elif dt.weekday() == WED:
self[dt + rd(days=+5)] = name
elif dt.weekday() == THU:
self[dt + rd(days=+4)] = name
elif dt.weekday() == FRI:
self[dt + rd(days=+3)] = name
elif dt.weekday() == SAT:
self[dt + rd(days=+2)] = name
elif dt.weekday() == SUN:
self[dt + rd(days=+1)] = name
# Spring bank holiday (last Monday in May)
name = "Spring Bank Holiday"
if year == 2012:
self[date(year, JUN, 4)] = name
elif year == 2022:
self[date(year, JUN, 2)] = name
elif year >= 1971:
self[date(year, MAY, 31) + rd(weekday=MO(-1))] = name
# Late Summer bank holiday (last Monday in August)
if self.state not in ("Scotland") and year >= 1971:
name = "Late Summer Bank Holiday"
if self.state == "UK":
name += " [England/Wales/Northern Ireland]"
self[date(year, AUG, 31) + rd(weekday=MO(-1))] = name
# Boxing Day
name = "Boxing Day"
self[date(year, DEC, 26)] = name
if self.observed and date(year, DEC, 26).weekday() == SAT:
self[date(year, DEC, 28)] = name + " (Observed)"
elif self.observed and date(year, DEC, 26).weekday() == SUN:
self[date(year, DEC, 28)] = name + " (Observed)"
# Special holidays
if year == 1977:
self[date(year, JUN, 7)] = "Silver Jubilee of Elizabeth II"
elif year == 1981:
self[date(year, JUL, 29)] = "Wedding of Charles and Diana"
elif year == 1999:
self[date(year, DEC, 31)] = "Millennium Celebrations"
elif year == 2002:
self[date(year, JUN, 3)] = "Golden Jubilee of Elizabeth II"
elif year == 2011:
self[date(year, APR, 29)] = "Wedding of William and Catherine"
elif year == 2012:
self[date(year, JUN, 5)] = "Diamond Jubilee of Elizabeth II"
elif year == 2022:
self[date(year, JUN, 3)] = "Platinum Jubilee of Elizabeth II"
class UK(UnitedKingdom):
pass
class GB(UnitedKingdom):
pass
class GBR(UnitedKingdom):
pass
class England(UnitedKingdom):
def __init__(self, **kwargs):
warnings.warn(
"England is deprecated, use UK(state='England') instead.",
DeprecationWarning,
)
kwargs["state"] = "England"
UnitedKingdom.__init__(self, **kwargs)
class Wales(UnitedKingdom):
def __init__(self, **kwargs):
warnings.warn(
"Wales is deprecated, use UK(state='Wales') instead.",
DeprecationWarning,
)
kwargs["state"] = "Wales"
UnitedKingdom.__init__(self, **kwargs)
class Scotland(UnitedKingdom):
def __init__(self, **kwargs):
warnings.warn(
"Scotland is deprecated, use UK(state='Scotland') instead.",
DeprecationWarning,
)
kwargs["state"] = "Scotland"
UnitedKingdom.__init__(self, **kwargs)
class IsleOfMan(UnitedKingdom):
def __init__(self, **kwargs):
warnings.warn(
"IsleOfMan is deprecated, use UK(state='Isle of Man') instead.",
DeprecationWarning,
)
kwargs["state"] = "Isle of Man"
UnitedKingdom.__init__(self, **kwargs)
class NorthernIreland(UnitedKingdom):
def __init__(self, **kwargs):
warnings.warn(
"Northern Ireland is deprecated, use UK(state='Northern Ireland') "
"instead.",
DeprecationWarning,
)
kwargs["state"] = "Northern Ireland"
UnitedKingdom.__init__(self, **kwargs)
|
|
import datetime
import re
import hashlib
import random
from django.db import models, transaction
from django.utils.timezone import now as datetime_now
from django.utils.translation import ugettext_lazy as _
from django.template.loader import render_to_string
from django.contrib import auth
from django.conf import settings
SHA1_RE = re.compile('^[a-f0-9]{40}$')
# ------------------------------------------------------------------------------------------------ #
class RegistrationManager(models.Manager):
"""
Custom manager for the ``RegistrationProfile`` model.
The methods defined here provide shortcuts for account creation
and activation (including generation and emailing of activation
keys), and for cleaning out expired inactive accounts.
"""
def activate_user(self, activation_key):
"""
Validate an activation key and activate the corresponding ``User`` if valid.
If the key is valid and has not expired, return the ``User`` after activating.
If the key is not valid or has expired, return ``False``.
If the key is valid but the ``User`` is already active, return ``False``.
To prevent reactivation of an account which has been deactivated by site administrators,
the activation key is reset to the string constant ``RegistrationProfile.ACTIVATED``
after successful activation.
"""
# Make sure the key we're trying conforms to the pattern of a
# SHA1 hash; if it doesn't, no point trying to look it up in
# the database.
if SHA1_RE.search(activation_key):
try:
profile = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
#todo log????????
return False
if not profile.activation_key_expired():
user = profile.user
user.is_active = True
user.save()
profile.activation_key = self.model.ACTIVATED
profile.save()
return user
return False
def create_inactive_user(self, username, email, password, site, send_email=True):
"""
Create a new, inactive ``User``, generate a
``RegistrationProfile`` and email its activation key to the
``User``, returning the new ``User``.
By default, an activation email will be sent to the new
user. To disable this, pass ``send_email=False``.
"""
user = auth.get_user_model()
new_user = user.objects.create_user(username, email, password)
new_user.is_active = False
new_user.save()
registration_profile = self.create_profile(new_user)
if send_email:
registration_profile.send_activation_email(site)
return new_user
create_inactive_user = transaction.atomic(create_inactive_user)
def create_profile(self, user):
"""
Create a ``RegistrationProfile`` for a given
``User``, and return the ``RegistrationProfile``.
The activation key for the ``RegistrationProfile`` will be a
SHA1 hash, generated from a combination of the ``User``'s
username and a random salt.
"""
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
username = user.username
if isinstance(username, unicode):
username = username.encode('utf-8')
activation_key = hashlib.sha1(salt+username).hexdigest()
return self.create(user=user,
activation_key=activation_key)
def delete_expired_users(self):
"""
Remove expired instances of ``RegistrationProfile`` and their
associated ``User``s.
Accounts to be deleted are identified by searching for
instances of ``RegistrationProfile`` with expired activation
keys, and then checking to see if their associated ``User``
instances have the field ``is_active`` set to ``False``; any
``User`` who is both inactive and has an expired activation
key will be deleted.
It is recommended that this method be executed regularly as
part of your routine site maintenance; this application
provides a custom management command which will call this
method, accessible as ``manage.py cleanupregistration``.
Regularly clearing out accounts which have never been
activated serves two useful purposes:
1. It alleviates the ocasional need to reset a
``RegistrationProfile`` and/or re-send an activation email
when a user does not receive or does not act upon the
initial activation email; since the account will be
deleted, the user will be able to simply re-register and
receive a new activation key.
2. It prevents the possibility of a malicious user registering
one or more accounts and never activating them (thus
denying the use of those usernames to anyone else); since
those accounts will be deleted, the usernames will become
available for use again.
If you have a troublesome ``User`` and wish to disable their
account while keeping it in the database, simply delete the
associated ``RegistrationProfile``; an inactive ``User`` which
does not have an associated ``RegistrationProfile`` will not
be deleted.
"""
for profile in self.all():
try:
if profile.activation_key_expired():
user = profile.user
if not user.is_active:
user.delete()
profile.delete()
except django.contrib.auth.get_user_model().DoesNotExist:
profile.delete()
class RegistrationProfile(models.Model):
"""
A simple profile which stores an activation key for use during
user account registration.
Generally, you will not want to interact directly with instances
of this model; the provided manager includes methods
for creating and activating new accounts, as well as for cleaning
out accounts which have never been activated.
While it is possible to use this model as the value of the
``AUTH_PROFILE_MODULE`` setting, it's not recommended that you do
so. This model's sole purpose is to store data temporarily during
account registration and activation.
"""
ACTIVATED = u"ALREADY_ACTIVATED"
user = models.ForeignKey(settings.AUTH_USER_MODEL, unique=True, verbose_name=_('user'))
activation_key = models.CharField(_('activation key'), max_length=40)
objects = RegistrationManager()
class Meta:
verbose_name = _('registration profile')
verbose_name_plural = _('registration profiles')
def __unicode__(self):
return u"Registration information for %s" % self.user
def activation_key_expired(self):
"""
Determine whether this ``RegistrationProfile``'s activation
key has expired, returning a boolean -- ``True`` if the key
has expired.
Key expiration is determined by a two-step process:
1. If the user has already activated, the key will have been
reset to the string constant ``ACTIVATED``. Re-activating
is not permitted, and so this method returns ``True`` in
this case.
2. Otherwise, the date the user signed up is incremented by
the number of days specified in the setting
``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of
days after signup during which a user is allowed to
activate their account); if the result is less than or
equal to the current date, the key has expired and this
method returns ``True``.
"""
expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)
return self.activation_key == self.ACTIVATED or \
(self.user.date_joined + expiration_date <= datetime_now())
activation_key_expired.boolean = True
def send_activation_email(self, site):
"""
Send an activation email to the user associated with this ``RegistrationProfile``.
The activation email will make use of two templates:
``accounts/activation_email_subject.txt``
This template will be used for the subject line of the email. Because it is used as
the subject line of an email, this template's output **must** be only a single line of
text; output longer than one line will be forcibly joined into only a single line.
``accounts/activation_email.txt``
This template will be used for the body of the email.
These templates will each receive the following context
variables:
``activation_key``
The activation key for the new account.
``expiration_days``
The number of days remaining during which the account may be activated.
``site``
An object representing the site on which the user registered; depending on whether
``django.contrib.sites`` is installed, this may be an instance of either
``django.contrib.sites.models.Site`` (if the sites application is installed) or
``django.contrib.sites.models.RequestSite`` (if not). Consult the documentation
for the Django sites framework for details regarding these objects' interfaces.
"""
ctx_dict = {'activation_key': self.activation_key,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
'site': site}
subject = render_to_string('accounts/activation_email_subject.txt',
ctx_dict)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('accounts/activation_email.txt',
ctx_dict)
self.user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
|
|
# Copyright (C) 2013 eBay Inc.
# Copyright (C) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for qaulity_of_service_specs table."""
import time
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
def fake_qos_specs_get_by_name(context, name, session=None, inactive=False):
pass
class QualityOfServiceSpecsTableTestCase(test.TestCase):
"""Test case for QualityOfServiceSpecs model."""
def setUp(self):
super(QualityOfServiceSpecsTableTestCase, self).setUp()
self.ctxt = context.RequestContext(user_id='user_id',
project_id='project_id',
is_admin=True)
def tearDown(self):
super(QualityOfServiceSpecsTableTestCase, self).tearDown()
def _create_qos_specs(self, name, values=None):
"""Create a transfer object."""
if values:
specs = dict(name=name, qos_specs=values)
else:
specs = {'name': name,
'qos_specs': {
'consumer': 'back-end',
'key1': 'value1',
'key2': 'value2'}}
return db.qos_specs_create(self.ctxt, specs)['id']
def test_qos_specs_create(self):
# If there is qos specs with the same name exists,
# a QoSSpecsExists exception will be raised.
name = 'QoSSpecsCreationTest'
self._create_qos_specs(name)
self.assertRaises(exception.QoSSpecsExists,
db.qos_specs_create, self.ctxt, dict(name=name))
specs_id = self._create_qos_specs('NewName')
query_id = db.qos_specs_get_by_name(
self.ctxt, 'NewName')['id']
self.assertEqual(specs_id, query_id)
def test_qos_specs_get(self):
value = dict(consumer='front-end',
key1='foo', key2='bar')
specs_id = self._create_qos_specs('Name1', value)
fake_id = 'fake-UUID'
self.assertRaises(exception.QoSSpecsNotFound,
db.qos_specs_get, self.ctxt, fake_id)
specs = db.qos_specs_get(self.ctxt, specs_id)
expected = dict(name='Name1', id=specs_id, consumer='front-end')
del value['consumer']
expected.update(dict(specs=value))
self.assertDictMatch(specs, expected)
def test_qos_specs_get_all(self):
value1 = dict(consumer='front-end',
key1='v1', key2='v2')
value2 = dict(consumer='back-end',
key3='v3', key4='v4')
value3 = dict(consumer='back-end',
key5='v5', key6='v6')
spec_id1 = self._create_qos_specs('Name1', value1)
spec_id2 = self._create_qos_specs('Name2', value2)
spec_id3 = self._create_qos_specs('Name3', value3)
specs = db.qos_specs_get_all(self.ctxt)
self.assertEqual(len(specs), 3,
"Unexpected number of qos specs records")
expected1 = dict(name='Name1', id=spec_id1, consumer='front-end')
expected2 = dict(name='Name2', id=spec_id2, consumer='back-end')
expected3 = dict(name='Name3', id=spec_id3, consumer='back-end')
del value1['consumer']
del value2['consumer']
del value3['consumer']
expected1.update(dict(specs=value1))
expected2.update(dict(specs=value2))
expected3.update(dict(specs=value3))
self.assertIn(expected1, specs)
self.assertIn(expected2, specs)
self.assertIn(expected3, specs)
def test_qos_specs_get_by_name(self):
name = str(int(time.time()))
value = dict(consumer='front-end',
foo='Foo', bar='Bar')
specs_id = self._create_qos_specs(name, value)
specs = db.qos_specs_get_by_name(self.ctxt, name)
del value['consumer']
expected = {'name': name,
'id': specs_id,
'consumer': 'front-end',
'specs': value}
self.assertDictMatch(specs, expected)
def test_qos_specs_delete(self):
name = str(int(time.time()))
specs_id = self._create_qos_specs(name)
db.qos_specs_delete(self.ctxt, specs_id)
self.assertRaises(exception.QoSSpecsNotFound, db.qos_specs_get,
self.ctxt, specs_id)
def test_qos_specs_item_delete(self):
name = str(int(time.time()))
value = dict(consumer='front-end',
foo='Foo', bar='Bar')
specs_id = self._create_qos_specs(name, value)
del value['consumer']
del value['foo']
expected = {'name': name,
'id': specs_id,
'consumer': 'front-end',
'specs': value}
db.qos_specs_item_delete(self.ctxt, specs_id, 'foo')
specs = db.qos_specs_get_by_name(self.ctxt, name)
self.assertDictMatch(specs, expected)
def test_associate_type_with_qos(self):
self.assertRaises(exception.VolumeTypeNotFound,
db.volume_type_qos_associate,
self.ctxt, 'Fake-VOLID', 'Fake-QOSID')
type_id = volume_types.create(self.ctxt, 'TypeName')['id']
specs_id = self._create_qos_specs('FakeQos')
db.volume_type_qos_associate(self.ctxt, type_id, specs_id)
res = db.qos_specs_associations_get(self.ctxt, specs_id)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['id'], type_id)
self.assertEqual(res[0]['qos_specs_id'], specs_id)
def test_qos_associations_get(self):
self.assertRaises(exception.QoSSpecsNotFound,
db.qos_specs_associations_get,
self.ctxt, 'Fake-UUID')
type_id = volume_types.create(self.ctxt, 'TypeName')['id']
specs_id = self._create_qos_specs('FakeQos')
res = db.qos_specs_associations_get(self.ctxt, specs_id)
self.assertEqual(len(res), 0)
db.volume_type_qos_associate(self.ctxt, type_id, specs_id)
res = db.qos_specs_associations_get(self.ctxt, specs_id)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['id'], type_id)
self.assertEqual(res[0]['qos_specs_id'], specs_id)
type0_id = volume_types.create(self.ctxt, 'Type0Name')['id']
db.volume_type_qos_associate(self.ctxt, type0_id, specs_id)
res = db.qos_specs_associations_get(self.ctxt, specs_id)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['qos_specs_id'], specs_id)
self.assertEqual(res[1]['qos_specs_id'], specs_id)
def test_qos_specs_disassociate(self):
type_id = volume_types.create(self.ctxt, 'TypeName')['id']
specs_id = self._create_qos_specs('FakeQos')
db.volume_type_qos_associate(self.ctxt, type_id, specs_id)
res = db.qos_specs_associations_get(self.ctxt, specs_id)
self.assertEqual(res[0]['id'], type_id)
self.assertEqual(res[0]['qos_specs_id'], specs_id)
db.qos_specs_disassociate(self.ctxt, specs_id, type_id)
res = db.qos_specs_associations_get(self.ctxt, specs_id)
self.assertEqual(len(res), 0)
res = db.volume_type_get(self.ctxt, type_id)
self.assertEqual(res['qos_specs_id'], None)
def test_qos_specs_disassociate_all(self):
specs_id = self._create_qos_specs('FakeQos')
type1_id = volume_types.create(self.ctxt, 'Type1Name')['id']
type2_id = volume_types.create(self.ctxt, 'Type2Name')['id']
type3_id = volume_types.create(self.ctxt, 'Type3Name')['id']
db.volume_type_qos_associate(self.ctxt, type1_id, specs_id)
db.volume_type_qos_associate(self.ctxt, type2_id, specs_id)
db.volume_type_qos_associate(self.ctxt, type3_id, specs_id)
res = db.qos_specs_associations_get(self.ctxt, specs_id)
self.assertEqual(len(res), 3)
db.qos_specs_disassociate_all(self.ctxt, specs_id)
res = db.qos_specs_associations_get(self.ctxt, specs_id)
self.assertEqual(len(res), 0)
def test_qos_specs_update(self):
name = 'FakeName'
specs_id = self._create_qos_specs(name)
value = dict(key2='new_value2', key3='value3')
self.assertRaises(exception.QoSSpecsNotFound, db.qos_specs_update,
self.ctxt, 'Fake-UUID', value)
db.qos_specs_update(self.ctxt, specs_id, value)
specs = db.qos_specs_get(self.ctxt, specs_id)
self.assertEqual(specs['specs']['key2'], 'new_value2')
self.assertEqual(specs['specs']['key3'], 'value3')
|
|
import unittest
import os
import subprocess
import logging
import re
import time
import json
import requests
import xmlrunner
from pyinfraboxutils.db import connect_db
from pyinfraboxutils.token import encode_project_token
from pyinfraboxutils.secrets import encrypt_secret
class Test(unittest.TestCase):
job_id = '1514af82-3c4f-4bb5-b1da-a89a0ced5e6f'
user_id = '2514af82-3c4f-4bb5-b1da-a89a0ced5e6b'
build_id = '3514af82-3c4f-4bb5-b1da-a89a0ced5e6a'
project_id = '4514af82-3c4f-4bb5-b1da-a89a0ced5e6f'
token_id = '5514af82-3c4f-4bb5-b1da-a89a0ced5e6f'
def setUp(self):
conn = connect_db()
cur = conn.cursor()
cur.execute('''DELETE FROM job''')
cur.execute('''DELETE FROM auth_token''')
cur.execute('''DELETE FROM collaborator''')
cur.execute('''DELETE FROM project''')
cur.execute('''DELETE FROM "user"''')
cur.execute('''DELETE FROM source_upload''')
cur.execute('''DELETE FROM build''')
cur.execute('''DELETE FROM test_run''')
cur.execute('''DELETE FROM job_stat''')
cur.execute('''DELETE FROM measurement''')
cur.execute('''DELETE FROM test''')
cur.execute('''DELETE FROM job_markup''')
cur.execute('''DELETE FROM secret''')
cur.execute('''INSERT INTO "user"(id, github_id, avatar_url, name,
email, github_api_token, username)
VALUES(%s, 1, 'avatar', 'name', 'email', 'token', 'login')''', (self.user_id,))
cur.execute('''INSERT INTO project(name, type, id, public)
VALUES('test', 'upload', %s, true)''', (self.project_id,))
cur.execute('''INSERT INTO collaborator(project_id, user_id, owner)
VALUES(%s, %s, true)''', (self.project_id, self.user_id,))
cur.execute('''INSERT INTO auth_token(project_id, id, description, scope_push, scope_pull)
VALUES(%s, %s, 'asd', true, true)''', (self.project_id, self.token_id,))
cur.execute('''INSERT INTO secret(project_id, name, value)
VALUES(%s, 'SECRET_ENV', %s)''', (self.project_id, encrypt_secret('hello world')))
conn.commit()
os.environ['INFRABOX_CLI_TOKEN'] = encode_project_token(self.token_id, self.project_id)
print os.environ['INFRABOX_CLI_TOKEN']
self.root_url = os.environ['INFRABOX_ROOT_URL']
def _api_get(self, url):
headers = {'Authorization': 'bearer ' + os.environ['INFRABOX_CLI_TOKEN']}
retries = 600
while True:
try:
return requests.get(url, headers=headers, verify=False)
except Exception as e:
logging.exception(e)
time.sleep(1)
retries -= 1
if retries < 0:
raise e
def _get_build(self):
url = '%s/api/v1/projects/%s/builds/' % (self.root_url, self.project_id)
result = self._api_get(url).json()
return result[0]
def _get_jobs(self):
build = self._get_build()
url = '%s/api/v1/projects/%s/builds/%s/jobs/' % (self.root_url, self.project_id, build['id'])
jobs = self._api_get(url).json()
return jobs
def _wait_build(self):
while True:
time.sleep(5)
jobs = self._get_jobs()
active = False
for j in jobs:
if j['state'] not in ('finished', 'error', 'killed', 'skipped', 'failure'):
active = True
if not active:
return
def _print_job_logs(self):
self._wait_build()
jobs = self._get_jobs()
for j in jobs:
url = '%s/api/v1/projects/%s/jobs/%s/console' % (self.root_url,
self.project_id,
j['id'])
r = self._api_get(url)
ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
logs = ansi_escape.sub('', r.text)
print logs
def _get_job(self, job_name):
jobs = self._get_jobs()
for j in jobs:
data = json.dumps(j, indent=4)
if j['name'] == job_name:
return j
data = json.dumps(jobs, indent=4)
raise Exception('Job "%s" not found in: %s' % (job_name, data))
def _wait_job(self, job_name):
while True:
j = self._get_job(job_name)
if j['state'] in ('finished', 'error', 'killed', 'skipped', 'failure'):
return j
time.sleep(5)
def expect_job(self, job_name, state='finished', message=None, parents=None, dockerfile=None):
j = self._get_job(job_name)
data = json.dumps(j, indent=4)
self.assertEqual(j['state'], state, data)
if message:
self.assertIn(message, j['message'], data)
if dockerfile:
self.assertEqual(j['docker_file'], dockerfile, data)
if parents:
actual_parents = {}
for p in j.get('depends_on', []):
actual_parents[p['job']] = p
for p in parents:
self.assertTrue(p in actual_parents, data)
def run_it(self, cwd):
command = ['infrabox', '--ca-bundle', 'false', 'push']
output = None
try:
output = subprocess.check_output(command, cwd=cwd)
except subprocess.CalledProcessError as e:
output = e.output
print output
self._print_job_logs()
def test_docker_job(self):
self.run_it('/infrabox/context/infrabox/test/e2e/tests/docker_job')
self.expect_job('test')
def test_docker_multiple_jobs(self):
self.run_it('/infrabox/context/infrabox/test/e2e/tests/docker_multiple_jobs')
self.expect_job('test-1', parents=['Create Jobs'])
self.expect_job('test-2', parents=['Create Jobs'])
self.expect_job('test-3', parents=['Create Jobs'])
self.expect_job('test-4', parents=['test-1', 'test-2'])
self.expect_job('test-5', parents=['test-2', 'test-3'])
def test_workflow_nested(self):
self.run_it('/infrabox/context/infrabox/test/e2e/tests/workflow_nested')
self.expect_job('flow', parents=['flow/sub-2', 'flow/sub-3'])
self.expect_job('flow/sub-1', parents=['Create Jobs'], dockerfile='Dockerfile_flow')
self.expect_job('flow/sub-2', parents=['flow/sub-2/nested-2', 'flow/sub-2/nested-3'])
self.expect_job('flow/sub-2/nested-1',
parents=['flow/sub-1'],
dockerfile='Dockerfile_nested')
self.expect_job('flow/sub-2/nested-2',
parents=['flow/sub-2/nested-1'],
dockerfile='Dockerfile_nested')
self.expect_job('flow/sub-2/nested-3',
parents=['flow/sub-2/nested-1'],
dockerfile='Dockerfile_nested')
self.expect_job('flow/sub-3',
parents=['flow/sub-1'],
dockerfile='Dockerfile_flow')
def test_docker_compose_job(self):
self.run_it('/infrabox/context/infrabox/test/e2e/tests/docker_compose_job')
self.expect_job('test')
def test_docker_job_archive(self):
self.run_it('/infrabox/context/infrabox/test/e2e/tests/docker_job_archive')
self.expect_job('test')
def test_docker_compose_invalid_compose_file(self):
self.run_it('/infrabox/context/infrabox/test/e2e/tests/docker_compose_invalid_compose_file')
self.expect_job('Create Jobs',
state='failure',
message='version not found')
def test_failed_job(self):
self.run_it('/infrabox/context/infrabox/test/e2e/tests/failed_job')
self.expect_job('test', state='failure')
def test_malicious_job(self):
self.run_it('/infrabox/context/infrabox/test/e2e/tests/malicious_job')
self.expect_job('test')
def test_workflow_recursive(self):
self.run_it('/infrabox/context/infrabox/test/e2e/tests/workflow_recursive')
self.expect_job('Create Jobs', state='failure', message='Recursive include detected')
def test_workflow_simple_job(self):
self.run_it('/infrabox/context/infrabox/test/e2e/tests/workflow_simple_job')
self.expect_job('flow', parents=['flow/test-sub'])
self.expect_job('flow/test-sub', parents=['Create Jobs'])
def test_image_input_output(self):
self.run_it('/infrabox/context/infrabox/test/e2e/tests/docker_image_input_output')
self.expect_job('consumer')
def test_input_output(self):
self.run_it('/infrabox/context/infrabox/test/e2e/tests/docker_input_output')
self.expect_job('consumer')
def test_infrabox_context(self):
self.run_it('/infrabox/context/infrabox/test/e2e/tests/infrabox_context')
self.expect_job('root')
self.expect_job('sub1')
self.expect_job('sub1/sub1')
def test_secure_env(self):
self.run_it('/infrabox/context/infrabox/test/e2e/tests/docker_secure_env')
self.expect_job('test')
def test_secure_env_not_found(self):
self.run_it('/infrabox/context/infrabox/test/e2e/tests/docker_secure_env_not_found')
self.expect_job('Create Jobs', state='failure', message="Secret 'UNKNOWN_SECRET' not found")
def test_insecure_env(self):
self.run_it('/infrabox/context/infrabox/test/e2e/tests/docker_insecure_env')
self.expect_job('test')
def main():
root_url = os.environ['INFRABOX_ROOT_URL']
print "ROOT_URL: %s" % root_url
while True:
time.sleep(1)
r = None
try:
r = requests.get(root_url, verify=False)
if r.status_code in (200, 404):
break
print r.text
except Exception as e:
print e
print "Server not yet ready"
print "Connecting to DB"
connect_db() # Wait for DB
print "Starting tests"
with open('results.xml', 'wb') as output:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output=output), buffer=False)
if __name__ == '__main__':
main()
|
|
import warnings
import re
from flask import (request, redirect, flash, abort, json, Response,
get_flashed_messages)
from jinja2 import contextfunction
from wtforms.fields import HiddenField
from wtforms.fields.core import UnboundField
from wtforms.validators import ValidationError, Required
from flask_admin.babel import gettext
from flask_admin.base import BaseView, expose
from flask_admin.form import BaseForm, FormOpts, rules
from flask_admin.model import filters, typefmt
from flask_admin.actions import ActionsMixin
from flask_admin.helpers import (get_form_data, validate_form_on_submit,
get_redirect_target, flash_errors)
from flask_admin.tools import rec_getattr
from flask_admin._backwards import ObsoleteAttr
from flask_admin._compat import iteritems, OrderedDict, as_unicode
from .helpers import prettify_name, get_mdict_item_or_list
from .ajax import AjaxModelLoader
from .fields import ListEditableFieldList
# Used to generate filter query string name
filter_char_re = re.compile('[^a-z0-9 ]')
filter_compact_re = re.compile(' +')
class ViewArgs(object):
"""
List view arguments.
"""
def __init__(self, page=None, sort=None, sort_desc=None, search=None, filters=None, extra_args=None):
self.page = page
self.sort = sort
self.sort_desc = bool(sort_desc)
self.search = search
self.filters = filters
if not self.search:
self.search = None
self.extra_args = extra_args or dict()
def clone(self, **kwargs):
if self.filters:
flt = list(self.filters)
else:
flt = None
kwargs.setdefault('page', self.page)
kwargs.setdefault('sort', self.sort)
kwargs.setdefault('sort_desc', self.sort_desc)
kwargs.setdefault('search', self.search)
kwargs.setdefault('filters', flt)
kwargs.setdefault('extra_args', dict(self.extra_args))
return ViewArgs(**kwargs)
class BaseModelView(BaseView, ActionsMixin):
"""
Base model view.
This view does not make any assumptions on how models are stored or managed, but expects the following:
1. The provided model is an object
2. The model contains properties
3. Each model contains an attribute which uniquely identifies it (i.e. a primary key for a database model)
4. It is possible to retrieve a list of sorted models with pagination applied from a data source
5. You can get one model by its identifier from the data source
Essentially, if you want to support a new data store, all you have to do is:
1. Derive from the `BaseModelView` class
2. Implement various data-related methods (`get_list`, `get_one`, `create_model`, etc)
3. Implement automatic form generation from the model representation (`scaffold_form`)
"""
# Permissions
can_create = True
"""Is model creation allowed"""
can_edit = True
"""Is model editing allowed"""
can_delete = True
"""Is model deletion allowed"""
# Templates
list_template = 'admin/model/list.html'
"""Default list view template"""
edit_template = 'admin/model/edit.html'
"""Default edit template"""
create_template = 'admin/model/create.html'
"""Default create template"""
# Customizations
column_list = ObsoleteAttr('column_list', 'list_columns', None)
"""
Collection of the model field names for the list view.
If set to `None`, will get them from the model.
For example::
class MyModelView(BaseModelView):
column_list = ('name', 'last_name', 'email')
"""
column_exclude_list = ObsoleteAttr('column_exclude_list',
'excluded_list_columns', None)
"""
Collection of excluded list column names.
For example::
class MyModelView(BaseModelView):
column_exclude_list = ('last_name', 'email')
"""
column_formatters = ObsoleteAttr('column_formatters', 'list_formatters', dict())
"""
Dictionary of list view column formatters.
For example, if you want to show price multiplied by
two, you can do something like this::
class MyModelView(BaseModelView):
column_formatters = dict(price=lambda v, c, m, p: m.price*2)
or using Jinja2 `macro` in template::
from flask_admin.model.template import macro
class MyModelView(BaseModelView):
column_formatters = dict(price=macro('render_price'))
# in template
{% macro render_price(model, column) %}
{{ model.price * 2 }}
{% endmacro %}
The Callback function has the prototype::
def formatter(view, context, model, name):
# `view` is current administrative view
# `context` is instance of jinja2.runtime.Context
# `model` is model instance
# `name` is property name
pass
"""
column_type_formatters = ObsoleteAttr('column_type_formatters', 'list_type_formatters', None)
"""
Dictionary of value type formatters to be used in the list view.
By default, two types are formatted:
1. ``None`` will be displayed as an empty string
2. ``bool`` will be displayed as a checkmark if it is ``True``
If you don't like the default behavior and don't want any type formatters
applied, just override this property with an empty dictionary::
class MyModelView(BaseModelView):
column_type_formatters = dict()
If you want to display `NULL` instead of an empty string, you can do
something like this::
from flask_admin.model import typefmt
MY_DEFAULT_FORMATTERS = dict(typefmt.BASE_FORMATTERS)
MY_DEFAULT_FORMATTERS.update({
type(None): typefmt.null_formatter
})
class MyModelView(BaseModelView):
column_type_formatters = MY_DEFAULT_FORMATTERS
Type formatters have lower priority than list column formatters.
The callback function has following prototype::
def type_formatter(view, value):
# `view` is current administrative view
# `value` value to format
pass
"""
column_labels = ObsoleteAttr('column_labels', 'rename_columns', None)
"""
Dictionary where key is column name and value is string to display.
For example::
class MyModelView(BaseModelView):
column_labels = dict(name='Name', last_name='Last Name')
"""
column_descriptions = None
"""
Dictionary where key is column name and
value is description for `list view` column or add/edit form field.
For example::
class MyModelView(BaseModelView):
column_descriptions = dict(
full_name='First and Last name'
)
"""
column_sortable_list = ObsoleteAttr('column_sortable_list',
'sortable_columns',
None)
"""
Collection of the sortable columns for the list view.
If set to `None`, will get them from the model.
For example::
class MyModelView(BaseModelView):
column_sortable_list = ('name', 'last_name')
If you want to explicitly specify field/column to be used while
sorting, you can use a tuple::
class MyModelView(BaseModelView):
column_sortable_list = ('name', ('user', 'user.username'))
When using SQLAlchemy models, model attributes can be used instead
of strings::
class MyModelView(BaseModelView):
column_sortable_list = ('name', ('user', User.username))
"""
column_default_sort = None
"""
Default sort column if no sorting is applied.
Example::
class MyModelView(BaseModelView):
column_default_sort = 'user'
You can use tuple to control ascending descending order. In following example, items
will be sorted in descending order::
class MyModelView(BaseModelView):
column_default_sort = ('user', True)
"""
column_searchable_list = ObsoleteAttr('column_searchable_list',
'searchable_columns',
None)
"""
A collection of the searchable columns. It is assumed that only
text-only fields are searchable, but it is up to the model
implementation to decide.
Example::
class MyModelView(BaseModelView):
column_searchable_list = ('name', 'email')
"""
column_editable_list = None
"""
Collection of the columns which can be edited from the list view.
For example::
class MyModelView(BaseModelView):
column_editable_list = ('name', 'last_name')
"""
column_choices = None
"""
Map choices to columns in list view
Example::
class MyModelView(BaseModelView):
column_choices = {
'my_column': [
('db_value', 'display_value'),
]
}
"""
column_filters = None
"""
Collection of the column filters.
Can contain either field names or instances of :class:`~flask_admin.model.filters.BaseFilter` classes.
Example::
class MyModelView(BaseModelView):
column_filters = ('user', 'email')
"""
named_filter_urls = False
"""
Set to True to use human-readable names for filters in URL parameters.
False by default so as to be robust across translations.
Changing this parameter will break any existing URLs that have filters.
"""
column_display_pk = ObsoleteAttr('column_display_pk',
'list_display_pk',
False)
"""
Controls if the primary key should be displayed in the list view.
"""
form = None
"""
Form class. Override if you want to use custom form for your model.
Will completely disable form scaffolding functionality.
For example::
class MyForm(Form):
name = StringField('Name')
class MyModelView(BaseModelView):
form = MyForm
"""
form_base_class = BaseForm
"""
Base form class. Will be used by form scaffolding function when creating model form.
Useful if you want to have custom contructor or override some fields.
Example::
class MyBaseForm(Form):
def do_something(self):
pass
class MyModelView(BaseModelView):
form_base_class = MyBaseForm
"""
form_args = None
"""
Dictionary of form field arguments. Refer to WTForms documentation for
list of possible options.
Example::
from wtforms.validators import required
class MyModelView(BaseModelView):
form_args = dict(
name=dict(label='First Name', validators=[required()])
)
"""
form_columns = None
"""
Collection of the model field names for the form. If set to `None` will
get them from the model.
Example::
class MyModelView(BaseModelView):
form_columns = ('name', 'email')
"""
form_excluded_columns = ObsoleteAttr('form_excluded_columns',
'excluded_form_columns',
None)
"""
Collection of excluded form field names.
For example::
class MyModelView(BaseModelView):
form_excluded_columns = ('last_name', 'email')
"""
form_overrides = None
"""
Dictionary of form column overrides.
Example::
class MyModelView(BaseModelView):
form_overrides = dict(name=wtf.FileField)
"""
form_widget_args = None
"""
Dictionary of form widget rendering arguments.
Use this to customize how widget is rendered without using custom template.
Example::
class MyModelView(BaseModelView):
form_widget_args = {
'description': {
'rows': 10,
'style': 'color: black'
}
}
Changing the format of a DateTimeField will require changes to both form_widget_args and form_args.
Example::
form_args = dict(
start=dict(format='%Y-%m-%d %I:%M %p') # changes how the input is parsed by strptime (12 hour time)
)
form_widget_args = dict(
start={'data-date-format': u'yyyy-mm-dd HH:ii P', 'data-show-meridian': 'True'} # changes how the DateTimeField displays the time
)
"""
form_extra_fields = None
"""
Dictionary of additional fields.
Example::
class MyModelView(BaseModelView):
form_extra_fields = {
password: PasswordField('Password')
}
You can control order of form fields using ``form_columns`` property. For example::
class MyModelView(BaseModelView):
form_columns = ('name', 'email', 'password', 'secret')
form_extra_fields = {
password: PasswordField('Password')
}
In this case, password field will be put between email and secret fields that are autogenerated.
"""
form_ajax_refs = None
"""
Use AJAX for foreign key model loading.
Should contain dictionary, where key is field name and value is either a dictionary which
configures AJAX lookups or backend-specific `AjaxModelLoader` class instance.
For example, it can look like::
class MyModelView(BaseModelView):
form_ajax_refs = {
'user': {
'fields': ('first_name', 'last_name', 'email')
'page_size': 10
}
}
Or with SQLAlchemy backend like this::
class MyModelView(BaseModelView):
form_ajax_refs = {
'user': QueryAjaxModelLoader('user', db.session, User, fields=['email'], page_size=10)
}
If you need custom loading functionality, you can implement your custom loading behavior
in your `AjaxModelLoader` class.
"""
form_rules = None
"""
List of rendering rules for model creation form.
This property changed default form rendering behavior and makes possible to rearrange order
of rendered fields, add some text between fields, group them, etc. If not set, will use
default Flask-Admin form rendering logic.
Here's simple example which illustrates how to use::
from flask_admin.form import rules
class MyModelView(ModelView):
form_rules = [
# Define field set with header text and four fields
rules.FieldSet(('first_name', 'last_name', 'email', 'phone'), 'User'),
# ... and it is just shortcut for:
rules.Header('User'),
rules.Field('first_name'),
rules.Field('last_name'),
# ...
# It is possible to create custom rule blocks:
MyBlock('Hello World'),
# It is possible to call macros from current context
rules.Macro('my_macro', foobar='baz')
]
"""
form_edit_rules = None
"""
Customized rules for the edit form. Override `form_rules` if present.
"""
form_create_rules = None
"""
Customized rules for the create form. Override `form_rules` if present.
"""
# Actions
action_disallowed_list = ObsoleteAttr('action_disallowed_list',
'disallowed_actions',
[])
"""
Set of disallowed action names. For example, if you want to disable
mass model deletion, do something like this:
class MyModelView(BaseModelView):
action_disallowed_list = ['delete']
"""
# Various settings
page_size = 20
"""
Default page size for pagination.
"""
def __init__(self, model,
name=None, category=None, endpoint=None, url=None, static_folder=None,
menu_class_name=None, menu_icon_type=None, menu_icon_value=None):
"""
Constructor.
:param model:
Model class
:param name:
View name. If not provided, will use the model class name
:param category:
View category
:param endpoint:
Base endpoint. If not provided, will use the model name + 'view'.
For example if model name was 'User', endpoint will be
'userview'
:param url:
Base URL. If not provided, will use endpoint as a URL.
:param menu_class_name:
Optional class name for the menu item.
:param menu_icon_type:
Optional icon. Possible icon types:
- `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon
- `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon
- `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory
- `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL
:param menu_icon_value:
Icon glyph name or URL, depending on `menu_icon_type` setting
"""
# If name not provided, it is model name
if name is None:
name = '%s' % self._prettify_class_name(model.__name__)
# If endpoint not provided, it is model name
if endpoint is None:
endpoint = model.__name__.lower()
super(BaseModelView, self).__init__(name, category, endpoint, url, static_folder,
menu_class_name=menu_class_name,
menu_icon_type=menu_icon_type,
menu_icon_value=menu_icon_value)
self.model = model
# Actions
self.init_actions()
# Scaffolding
self._refresh_cache()
# Caching
def _refresh_forms_cache(self):
# Forms
self._form_ajax_refs = self._process_ajax_references()
if self.form_widget_args is None:
self.form_widget_args = {}
self._create_form_class = self.get_create_form()
self._edit_form_class = self.get_edit_form()
self._delete_form_class = self.get_delete_form()
# List View In-Line Editing
if self.column_editable_list:
self._list_form_class = self.get_list_form()
else:
self.column_editable_list = {}
def _refresh_filters_cache(self):
self._filters = self.get_filters()
if self._filters:
self._filter_groups = OrderedDict()
self._filter_args = {}
for i, flt in enumerate(self._filters):
if flt.name not in self._filter_groups:
self._filter_groups[flt.name] = []
self._filter_groups[flt.name].append({
'index': i,
'arg': self.get_filter_arg(i, flt),
'operation': as_unicode(flt.operation()),
'options': flt.get_options(self) or None,
'type': flt.data_type
})
self._filter_args[self.get_filter_arg(i, flt)] = (i, flt)
else:
self._filter_groups = None
self._filter_args = None
def _refresh_form_rules_cache(self):
if self.form_create_rules:
self._form_create_rules = rules.RuleSet(self, self.form_create_rules)
else:
self._form_create_rules = None
if self.form_edit_rules:
self._form_edit_rules = rules.RuleSet(self, self.form_edit_rules)
else:
self._form_edit_rules = None
if self.form_rules:
form_rules = rules.RuleSet(self, self.form_rules)
if not self._form_create_rules:
self._form_create_rules = form_rules
if not self._form_edit_rules:
self._form_edit_rules = form_rules
def _refresh_cache(self):
"""
Refresh various cached variables.
"""
# List view
self._list_columns = self.get_list_columns()
self._sortable_columns = self.get_sortable_columns()
# Labels
if self.column_labels is None:
self.column_labels = {}
# Forms
self._refresh_forms_cache()
# Search
self._search_supported = self.init_search()
# Choices
if self.column_choices:
self._column_choices_map = dict([
(column, dict(choices))
for column, choices in self.column_choices.items()
])
else:
self.column_choices = self._column_choices_map = dict()
# Type formatters
if self.column_type_formatters is None:
self.column_type_formatters = dict(typefmt.BASE_FORMATTERS)
if self.column_descriptions is None:
self.column_descriptions = dict()
# Filters
self._refresh_filters_cache()
# Form rendering rules
self._refresh_form_rules_cache()
# Process form rules
self._validate_form_class(self._form_edit_rules, self._edit_form_class)
self._validate_form_class(self._form_create_rules, self._create_form_class)
# Primary key
def get_pk_value(self, model):
"""
Return PK value from a model object.
"""
raise NotImplementedError()
# List view
def scaffold_list_columns(self):
"""
Return list of the model field names. Must be implemented in
the child class.
Expected return format is list of tuples with field name and
display text. For example::
['name', 'first_name', 'last_name']
"""
raise NotImplementedError('Please implement scaffold_list_columns method')
def get_column_name(self, field):
"""
Return a human-readable column name.
:param field:
Model field name.
"""
if self.column_labels and field in self.column_labels:
return self.column_labels[field]
else:
return self._prettify_name(field)
def get_list_columns(self):
"""
Returns a list of the model field names. If `column_list` was
set, returns it. Otherwise calls `scaffold_list_columns`
to generate the list from the model.
"""
columns = self.column_list
if columns is None:
columns = self.scaffold_list_columns()
# Filter excluded columns
if self.column_exclude_list:
columns = [c for c in columns if c not in self.column_exclude_list]
return [(c, self.get_column_name(c)) for c in columns]
def scaffold_sortable_columns(self):
"""
Returns dictionary of sortable columns. Must be implemented in
the child class.
Expected return format is a dictionary, where keys are field names and
values are property names.
"""
raise NotImplementedError('Please implement scaffold_sortable_columns method')
def get_sortable_columns(self):
"""
Returns a dictionary of the sortable columns. Key is a model
field name and value is sort column (for example - attribute).
If `column_sortable_list` is set, will use it. Otherwise, will call
`scaffold_sortable_columns` to get them from the model.
"""
if self.column_sortable_list is None:
return self.scaffold_sortable_columns() or dict()
else:
result = dict()
for c in self.column_sortable_list:
if isinstance(c, tuple):
result[c[0]] = c[1]
else:
result[c] = c
return result
def init_search(self):
"""
Initialize search. If data provider does not support search,
`init_search` will return `False`.
"""
return False
# Filter helpers
def scaffold_filters(self, name):
"""
Generate filter object for the given name
:param name:
Name of the field
"""
return None
def is_valid_filter(self, filter):
"""
Verify that the provided filter object is valid.
Override in model backend implementation to verify if
the provided filter type is allowed.
:param filter:
Filter object to verify.
"""
return isinstance(filter, filters.BaseFilter)
def handle_filter(self, filter):
"""
Postprocess (add joins, etc) for a filter.
:param filter:
Filter object to postprocess
"""
return filter
def get_filters(self):
"""
Return a list of filter objects.
If your model backend implementation does not support filters,
override this method and return `None`.
"""
if self.column_filters:
collection = []
for n in self.column_filters:
if self.is_valid_filter(n):
collection.append(self.handle_filter(n))
else:
flt = self.scaffold_filters(n)
if flt:
collection.extend(flt)
else:
raise Exception('Unsupported filter type %s' % n)
return collection
else:
return None
def get_filter_arg(self, index, flt):
"""
Given a filter `flt`, return a unique name for that filter in
this view.
Does not include the `flt[n]_` portion of the filter name.
:param index:
Filter index in _filters array
:param flt:
Filter instance
"""
if self.named_filter_urls:
name = ('%s %s' % (flt.name, as_unicode(flt.operation()))).lower()
name = filter_char_re.sub('', name)
name = filter_compact_re.sub('_', name)
return name
else:
return str(index)
# Form helpers
def scaffold_form(self):
"""
Create `form.BaseForm` inherited class from the model. Must be
implemented in the child class.
"""
raise NotImplementedError('Please implement scaffold_form method')
def scaffold_list_form(self, custom_fieldlist=ListEditableFieldList,
validators=None):
"""
Create form for the `index_view` using only the columns from
`self.column_editable_list`.
:param validators:
`form_args` dict with only validators
{'name': {'validators': [required()]}}
:param custom_fieldlist:
A WTForm FieldList class. By default, `ListEditableFieldList`.
Must be implemented in the child class.
"""
raise NotImplementedError('Please implement scaffold_list_form method')
def get_form(self):
"""
Get form class.
If ``self.form`` is set, will return it and will call
``self.scaffold_form`` otherwise.
Override to implement customized behavior.
"""
if self.form is not None:
return self.form
return self.scaffold_form()
def get_list_form(self):
"""
Get form class for the editable list view.
Uses only validators from `form_args` to build the form class.
Allows overriding the editable list view field/widget. For example::
from flask_admin.model.fields import ListEditableFieldList
from flask_admin.model.widgets import XEditableWidget
class CustomWidget(XEditableWidget):
def get_kwargs(self, subfield, kwargs):
if subfield.type == 'TextAreaField':
kwargs['data-type'] = 'textarea'
kwargs['data-rows'] = '20'
# elif: kwargs for other fields
return kwargs
class CustomFieldList(ListEditableFieldList):
widget = CustomWidget()
class MyModelView(BaseModelView):
def get_list_form(self):
return self.scaffold_list_form(CustomFieldList)
"""
if self.form_args:
# get only validators, other form_args can break FieldList wrapper
validators = dict(
(key, {'validators': value["validators"]})
for key, value in iteritems(self.form_args)
if value.get("validators")
)
else:
validators = None
return self.scaffold_list_form(validators=validators)
def get_create_form(self):
"""
Create form class for model creation view.
Override to implement customized behavior.
"""
return self.get_form()
def get_edit_form(self):
"""
Create form class for model editing view.
Override to implement customized behavior.
"""
return self.get_form()
def get_delete_form(self):
"""
Create form class for model delete view.
Override to implement customized behavior.
"""
class DeleteForm(self.form_base_class):
id = HiddenField(validators=[Required()])
url = HiddenField()
return DeleteForm
def create_form(self, obj=None):
"""
Instantiate model creation form and return it.
Override to implement custom behavior.
"""
return self._create_form_class(get_form_data(), obj=obj)
def edit_form(self, obj=None):
"""
Instantiate model editing form and return it.
Override to implement custom behavior.
"""
return self._edit_form_class(get_form_data(), obj=obj)
def delete_form(self):
"""
Instantiate model delete form and return it.
Override to implement custom behavior.
The delete form originally used a GET request, so delete_form
accepts both GET and POST request for backwards compatibility.
"""
if request.form:
return self._delete_form_class(request.form)
elif request.args:
# allow request.args for backward compatibility
return self._delete_form_class(request.args)
else:
return self._delete_form_class()
def list_form(self, obj=None):
"""
Instantiate model editing form for list view and return it.
Override to implement custom behavior.
"""
return self._list_form_class(get_form_data(), obj=obj)
def validate_form(self, form):
"""
Validate the form on submit.
:param form:
Form to validate
"""
return validate_form_on_submit(form)
def _get_ruleset_missing_fields(self, ruleset, form):
missing_fields = []
if ruleset:
visible_fields = ruleset.visible_fields
for field in form:
if field.name not in visible_fields:
missing_fields.append(field.name)
return missing_fields
def _show_missing_fields_warning(self, text):
warnings.warn(text)
def _validate_form_class(self, ruleset, form_class, remove_missing=True):
form_fields = []
for name, obj in iteritems(form_class.__dict__):
if isinstance(obj, UnboundField):
form_fields.append(name)
missing_fields = []
if ruleset:
visible_fields = ruleset.visible_fields
for field_name in form_fields:
if field_name not in visible_fields:
missing_fields.append(field_name)
if missing_fields:
self._show_missing_fields_warning('Fields missing from ruleset: %s' % (','.join(missing_fields)))
if remove_missing:
self._remove_fields_from_form_class(missing_fields, form_class)
def _validate_form_instance(self, ruleset, form, remove_missing=True):
missing_fields = self._get_ruleset_missing_fields(ruleset=ruleset, form=form)
if missing_fields:
self._show_missing_fields_warning('Fields missing from ruleset: %s' % (','.join(missing_fields)))
if remove_missing:
self._remove_fields_from_form_instance(missing_fields, form)
def _remove_fields_from_form_instance(self, field_names, form):
for field_name in field_names:
form.__delitem__(field_name)
def _remove_fields_from_form_class(self, field_names, form_class):
for field_name in field_names:
delattr(form_class, field_name)
# Helpers
def is_sortable(self, name):
"""
Verify if column is sortable.
Not case-sensitive.
:param name:
Column name.
"""
return name.lower() in (x.lower() for x in self._sortable_columns)
def is_editable(self, name):
"""
Verify if column is editable.
:param name:
Column name.
"""
return name in self.column_editable_list
def _get_column_by_idx(self, idx):
"""
Return column index by
"""
if idx is None or idx < 0 or idx >= len(self._list_columns):
return None
return self._list_columns[idx]
def _get_default_order(self):
"""
Return default sort order
"""
if self.column_default_sort:
if isinstance(self.column_default_sort, tuple):
return self.column_default_sort
else:
return self.column_default_sort, False
return None
# Database-related API
def get_list(self, page, sort_field, sort_desc, search, filters):
"""
Return a paginated and sorted list of models from the data source.
Must be implemented in the child class.
:param page:
Page number, 0 based. Can be set to None if it is first page.
:param sort_field:
Sort column name or None.
:param sort_desc:
If set to True, sorting is in descending order.
:param search:
Search query
:param filters:
List of filter tuples. First value in a tuple is a search
index, second value is a search value.
"""
raise NotImplementedError('Please implement get_list method')
def get_one(self, id):
"""
Return one model by its id.
Must be implemented in the child class.
:param id:
Model id
"""
raise NotImplementedError('Please implement get_one method')
# Exception handler
def handle_view_exception(self, exc):
if isinstance(exc, ValidationError):
flash(as_unicode(exc))
return True
if self._debug:
raise
return False
# Model event handlers
def on_model_change(self, form, model, is_created):
"""
Perform some actions after a model is created or updated.
Called from create_model and update_model in the same transaction
(if it has any meaning for a store backend).
By default does nothing.
:param form:
Form used to create/update model
:param model:
Model that will be created/updated
:param is_created:
Will be set to True if model was created and to False if edited
"""
pass
def _on_model_change(self, form, model, is_created):
"""
Compatibility helper.
"""
try:
self.on_model_change(form, model, is_created)
except TypeError:
msg = ('%s.on_model_change() now accepts third ' +
'parameter is_created. Please update your code') % self.model
warnings.warn(msg)
self.on_model_change(form, model)
def after_model_change(self, form, model, is_created):
"""
Perform some actions after a model was created or updated and
committed to the database.
Called from create_model after successful database commit.
By default does nothing.
:param form:
Form used to create/update model
:param model:
Model that was created/updated
:param is_created:
True if model was created, False if model was updated
"""
pass
def on_model_delete(self, model):
"""
Perform some actions before a model is deleted.
Called from delete_model in the same transaction
(if it has any meaning for a store backend).
By default do nothing.
"""
pass
def after_model_delete(self, model):
"""
Perform some actions after a model was deleted and
committed to the database.
Called from delete_model after successful database commit
(if it has any meaning for a store backend).
By default does nothing.
:param model:
Model that was deleted
"""
pass
def on_form_prefill (self, form, id):
"""
Perform additional actions to pre-fill the edit form.
Called from edit_view, if the current action is rendering
the form rather than receiving client side input, after
default pre-filling has been performed.
By default does nothing.
You only need to override this if you have added custom
fields that depend on the database contents in a way that
Flask-admin can't figure out by itself. Fields that were
added by name of a normal column or relationship should
work out of the box.
:param form:
Form instance
:param id:
id of the object that is going to be edited
"""
pass
def create_model(self, form):
"""
Create model from the form.
Returns `True` if operation succeeded.
Must be implemented in the child class.
:param form:
Form instance
"""
raise NotImplementedError()
def update_model(self, form, model):
"""
Update model from the form.
Returns `True` if operation succeeded.
Must be implemented in the child class.
:param form:
Form instance
:param model:
Model instance
"""
raise NotImplementedError()
def delete_model(self, model):
"""
Delete model.
Returns `True` if operation succeeded.
Must be implemented in the child class.
:param model:
Model instance
"""
raise NotImplementedError()
# Various helpers
def _prettify_name(self, name):
"""
Prettify pythonic variable name.
For example, 'hello_world' will be converted to 'Hello World'
:param name:
Name to prettify
"""
return prettify_name(name)
def get_empty_list_message(self):
return gettext('There are no items in the table.')
# URL generation helpers
def _get_list_filter_args(self):
if self._filters:
filters = []
for n in request.args:
if not n.startswith('flt'):
continue
if '_' not in n:
continue
pos, key = n[3:].split('_', 1)
if key in self._filter_args:
idx, flt = self._filter_args[key]
value = request.args[n]
if flt.validate(value):
filters.append((pos, (idx, flt.name, value)))
else:
flash(gettext('Invalid Filter Value: %(value)s', value=value))
# Sort filters
return [v[1] for v in sorted(filters, key=lambda n: n[0])]
return None
def _get_list_extra_args(self):
"""
Return arguments from query string.
"""
return ViewArgs(page=request.args.get('page', 0, type=int),
sort=request.args.get('sort', None, type=int),
sort_desc=request.args.get('desc', None, type=int),
search=request.args.get('search', None),
filters=self._get_list_filter_args())
# URL generation helpers
def _get_list_url(self, view_args):
"""
Generate page URL with current page, sort column and
other parameters.
:param view:
View name
:param view_args:
ViewArgs object with page number, filters, etc.
"""
page = view_args.page or None
desc = 1 if view_args.sort_desc else None
kwargs = dict(page=page, sort=view_args.sort, desc=desc, search=view_args.search)
kwargs.update(view_args.extra_args)
if view_args.filters:
for i, pair in enumerate(view_args.filters):
idx, flt_name, value = pair
key = 'flt%d_%s' % (i, self.get_filter_arg(idx, self._filters[idx]))
kwargs[key] = value
return self.get_url('.index_view', **kwargs)
# Actions
def is_action_allowed(self, name):
"""
Override this method to allow or disallow actions based
on some condition.
The default implementation only checks if the particular action
is not in `action_disallowed_list`.
"""
return name not in self.action_disallowed_list
def _get_field_value(self, model, name):
"""
Get unformatted field value from the model
"""
return rec_getattr(model, name)
@contextfunction
def get_list_value(self, context, model, name):
"""
Returns the value to be displayed in the list view
:param context:
:py:class:`jinja2.runtime.Context`
:param model:
Model instance
:param name:
Field name
"""
column_fmt = self.column_formatters.get(name)
if column_fmt is not None:
value = column_fmt(self, context, model, name)
else:
value = self._get_field_value(model, name)
choices_map = self._column_choices_map.get(name, {})
if choices_map:
return choices_map.get(value) or value
type_fmt = None
for typeobj, formatter in self.column_type_formatters.items():
if isinstance(value, typeobj):
type_fmt = formatter
break
if type_fmt is not None:
value = type_fmt(self, value)
return value
# AJAX references
def _process_ajax_references(self):
"""
Process `form_ajax_refs` and generate model loaders that
will be used by the `ajax_lookup` view.
"""
result = {}
if self.form_ajax_refs:
for name, options in iteritems(self.form_ajax_refs):
if isinstance(options, dict):
result[name] = self._create_ajax_loader(name, options)
elif isinstance(options, AjaxModelLoader):
result[name] = options
else:
raise ValueError('%s.form_ajax_refs can not handle %s types' % (self, type(options)))
return result
def _create_ajax_loader(self, name, options):
"""
Model backend will override this to implement AJAX model loading.
"""
raise NotImplementedError()
# Views
@expose('/')
def index_view(self):
"""
List view
"""
if self.column_editable_list:
form = self.list_form()
else:
form = None
if self.can_delete:
delete_form = self.delete_form()
else:
delete_form = None
# Grab parameters from URL
view_args = self._get_list_extra_args()
# Map column index to column name
sort_column = self._get_column_by_idx(view_args.sort)
if sort_column is not None:
sort_column = sort_column[0]
# Get count and data
count, data = self.get_list(view_args.page, sort_column, view_args.sort_desc,
view_args.search, view_args.filters)
# Calculate number of pages
num_pages = count // self.page_size
if count % self.page_size != 0:
num_pages += 1
# Various URL generation helpers
def pager_url(p):
# Do not add page number if it is first page
if p == 0:
p = None
return self._get_list_url(view_args.clone(page=p))
def sort_url(column, invert=False):
desc = None
if invert and not view_args.sort_desc:
desc = 1
return self._get_list_url(view_args.clone(sort=column, sort_desc=desc))
# Actions
actions, actions_confirmation = self.get_actions_list()
clear_search_url = self._get_list_url(view_args.clone(page=0,
sort=view_args.sort,
sort_desc=view_args.sort_desc,
search=None,
filters=None))
return self.render(
self.list_template,
data=data,
form=form,
delete_form=delete_form,
# List
list_columns=self._list_columns,
sortable_columns=self._sortable_columns,
editable_columns=self.column_editable_list,
# Pagination
count=count,
pager_url=pager_url,
num_pages=num_pages,
page=view_args.page,
# Sorting
sort_column=view_args.sort,
sort_desc=view_args.sort_desc,
sort_url=sort_url,
# Search
search_supported=self._search_supported,
clear_search_url=clear_search_url,
search=view_args.search,
# Filters
filters=self._filters,
filter_groups=self._filter_groups,
active_filters=view_args.filters,
# Actions
actions=actions,
actions_confirmation=actions_confirmation,
# Misc
enumerate=enumerate,
get_pk_value=self.get_pk_value,
get_value=self.get_list_value,
return_url=self._get_list_url(view_args),
)
@expose('/new/', methods=('GET', 'POST'))
def create_view(self):
"""
Create model view
"""
return_url = get_redirect_target() or self.get_url('.index_view')
if not self.can_create:
return redirect(return_url)
form = self.create_form()
if not hasattr(form, '_validated_ruleset') or not form._validated_ruleset:
self._validate_form_instance(ruleset=self._form_create_rules, form=form)
if self.validate_form(form):
if self.create_model(form):
flash(gettext('Record was successfully created.'))
if '_add_another' in request.form:
return redirect(request.url)
else:
return redirect(return_url)
form_opts = FormOpts(widget_args=self.form_widget_args,
form_rules=self._form_create_rules)
return self.render(self.create_template,
form=form,
form_opts=form_opts,
return_url=return_url)
@expose('/edit/', methods=('GET', 'POST'))
def edit_view(self):
"""
Edit model view
"""
return_url = get_redirect_target() or self.get_url('.index_view')
if not self.can_edit:
return redirect(return_url)
id = get_mdict_item_or_list(request.args, 'id')
if id is None:
return redirect(return_url)
model = self.get_one(id)
if model is None:
return redirect(return_url)
form = self.edit_form(obj=model)
if not hasattr(form, '_validated_ruleset') or not form._validated_ruleset:
self._validate_form_instance(ruleset=self._form_create_rules, form=form)
if self.validate_form(form):
if self.update_model(form, model):
flash(gettext('Record was successfully saved.'))
if '_continue_editing' in request.form:
return redirect(request.url)
else:
return redirect(return_url)
if request.method == 'GET':
self.on_form_prefill(form, id)
form_opts = FormOpts(widget_args=self.form_widget_args,
form_rules=self._form_edit_rules)
return self.render(self.edit_template,
model=model,
form=form,
form_opts=form_opts,
return_url=return_url)
@expose('/delete/', methods=('POST',))
def delete_view(self):
"""
Delete model view. Only POST method is allowed.
"""
return_url = get_redirect_target() or self.get_url('.index_view')
if not self.can_delete:
return redirect(return_url)
form = self.delete_form()
if self.validate_form(form):
# id is Required()
id = form.id.data
model = self.get_one(id)
if model is None:
return redirect(return_url)
# message is flashed from within delete_model if it fails
if self.delete_model(model):
flash(gettext('Record was successfully deleted.'))
return redirect(return_url)
else:
flash_errors(form, message='Failed to delete record. %(error)s')
return redirect(return_url)
@expose('/action/', methods=('POST',))
def action_view(self):
"""
Mass-model action view.
"""
return self.handle_action()
@expose('/ajax/lookup/')
def ajax_lookup(self):
name = request.args.get('name')
query = request.args.get('query')
offset = request.args.get('offset', type=int)
limit = request.args.get('limit', 10, type=int)
loader = self._form_ajax_refs.get(name)
if not loader:
abort(404)
data = [loader.format(m) for m in loader.get_list(query, offset, limit)]
return Response(json.dumps(data), mimetype='application/json')
@expose('/ajax/update/', methods=('POST',))
def ajax_update(self):
"""
Edits a single column of a record in list view.
"""
if not self.column_editable_list:
abort(404)
record = None
form = self.list_form()
# prevent validation issues due to submitting a single field
# delete all fields except the field being submitted
for field in form:
# only the submitted field has a positive last_index
if getattr(field, 'last_index', 0):
record = self.get_one(str(field.last_index))
elif field.name == 'csrf_token':
pass
else:
form.__delitem__(field.name)
if record is None:
return gettext('Failed to update record. %(error)s', error=''), 500
if self.validate_form(form):
if self.update_model(form, record):
# Success
return gettext('Record was successfully saved.')
else:
# Error: No records changed, or problem saving to database.
msgs = ", ".join([msg for msg in get_flashed_messages()])
return gettext('Failed to update record. %(error)s',
error=msgs), 500
else:
for field in form:
for error in field.errors:
# return validation error to x-editable
if isinstance(error, list):
return ", ".join(error), 500
else:
return error, 500
|
|
#/u/GoldenSights
import praw
import time
import sqlite3
import datetime
import random
USERAGENT = """
/u/GoldenSights T3 data collection: Gathering Submission data for
statistical analysis.
More info at https://github.com/voussoir/reddit/tree/master/T3
"""
r = praw.Reddit(USERAGENT)
print('Connected to reddit.')
sql = sqlite3.connect('D:/T3/t3.db')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS meta(label TEXT, data TEXT)')
cur.execute(('CREATE TABLE IF NOT EXISTS posts(idint INT, idstr TEXT, '
'created INT, self INT, nsfw INT, author TEXT, title TEXT, '
'url TEXT, selftext TEXT, score INT, subreddit TEXT, distinguish INT, '
'textlen INT, num_comments INT)'))
DISTINGUISHMAP = {0:"user", 1:"moderator", 2:"admin"}
DISTINGUISHMAP_R = {"user":0, "moderator":1, "admin":2}
LOWERBOUND = 9999000
# 5yba0
UPPERBOUND = 164790958
# 2q41im
# 1,679,616 = 10000
# 9,999,000 = 5yba0
# 60,466,176 = 100000
# 120,932,352 = 200000
# 164,790,958 = 2q41im
# 0 - idint
# 1 - idstr
# 2 - created
# 3 - self
# 4 - nsfw
# 5 - author
# 6 - title
# 7 - url
# 8 - selftext
# 9 - score
# 10 - subreddit
# 11 - distinguished
# 12 - textlen
# 13 - num_comments
class Post:
''' Used to map the indices of DB entries to names '''
def __init__(self, data):
self.idint = data[0]
self.idstr = data[1]
self.created_utc = data[2]
self.is_self = True if data[3] == 1 else False
self.over_18 = True if data[4] == 1 else False
self.author = data[5]
self.title = data[6]
self.url = data[7]
self.selftext = data[8]
self.score = data[9]
self.subreddit = data[10]
self.distinguished = DISTINGUISHMAP[data[11]]
self.distinguished_int = data[11]
self.textlen = data[12]
self.num_comments = data[13]
def base36encode(number, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):
"""Converts an integer to a base36 string."""
if not isinstance(number, (int)):
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def base36decode(number):
return int(number, 36)
def b36(i):
if type(i) == int:
return base36encode(i)
if type(i) == str:
return base36decode(i)
def human(timestamp):
day = datetime.datetime.utcfromtimestamp(timestamp)
human = datetime.datetime.strftime(day, "%b %d %Y %H:%M:%S UTC")
return human
def process(itemid, log=True, kill=True, updates=False):
if isinstance(itemid, str):
itemid = [itemid]
if isinstance(itemid, list):
if isinstance(itemid[0], str):
itemid = verify_t3(itemid)
try:
if not updates:
itemid = remove_existing(itemid)
temp = itemid[:]
except Exception:
return
itemid = r.get_info(thing_id=itemid)
try:
len(itemid)
except:
print(temp, "DEAD")
if kill:
for item in temp:
logdead(item)
return
for index in range(len(itemid)):
item = itemid[index]
item.idint = b36(item.id)
item.idstr = item.id
if item.distinguished is None:
item.distinguished = 0
else:
item.distinguished = DISTINGUISHMAP_R[item.distinguished]
item.url = "self" if item.is_self else item.url
item.created_utc = int(item.created_utc)
item.is_self = 1 if item.is_self else 0
item.over_18 = 1 if item.over_18 else 0
item.sub = item.subreddit.display_name
item.textlen = len(item.selftext)
try:
item.auth = item.author.name
except AttributeError:
item.auth = "[deleted]"
item = [item.idint, item.idstr, item.created_utc,
item.is_self, item.over_18, item.auth, item.title,
item.url, item.selftext, item.score, item.sub,
item.distinguished, item.textlen, item.num_comments]
itemid[index] = item
if log:
logdb(itemid)
else:
return itemid
if len(itemid) < len(temp):
process(temp)
# 0 - idint
# 1 - idstr
# 2 - created
# 3 - self
# 4 - nsfw
# 5 - author
# 6 - title
# 7 - url
# 8 - selftext
# 9 - score
# 10 - subreddit
# 11 - distinguished
# 12 - textlen
# 13 - num_comments
def logdb(items):
for item in items:
cur.execute('SELECT * FROM posts WHERE idint=?', [item[0]])
if cur.fetchone():
cur.execute('DELETE FROM posts WHERE idint=?', [item[0]])
cur.execute('INSERT INTO posts VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', item)
sql.commit()
def logdead(i):
#If an ID is dead, let's at least add it to the db.
i = i.replace('t3_', '')
data = [b36(i), i, 0, 0, 0, '?', '?', '?', '?', 0, '?', 0, 0, -1]
logdb([data])
def verify_t3(items):
for index in range(len(items)):
i = items[index]
if 't3_' not in i:
items[index] = 't3_' + i
return items
def remove_existing(items):
done = False
items = verify_t3(items)
while not done:
done = True
for item in items:
cur.execute('SELECT * FROM posts WHERE idint=?', [b36(item[3:])])
f = cur.fetchone()
if f:
items.remove(item)
done = False
break
if len(items) == 0:
raise Exception("Nothing new")
return items
def processrange(lower, upper, kill=True, updates=False):
if isinstance(lower, str):
lower = b36(lower)
if isinstance(upper, int):
upper = lower + upper
if isinstance(upper, str):
upper = b36(upper)
if upper <= lower:
print("Upper must be higher than lower")
return
ids = [b36(x) for x in range(lower, upper)]
processchunks(ids, kill, updates)
def processchunks(ids, kill=True, updates=False):
while len(ids) > 0:
p = ids[:100]
print("%s >>> %s (%d)" % (p[0], p[-1], len(ids)))
ids = ids[100:]
process(p, kill=kill, updates=updates)
def lastitem():
cur.execute('SELECT * FROM posts ORDER BY idint DESC LIMIT 1')
return cur.fetchone()[1]
def show():
filea = open('show/missing.txt', 'w')
fileb = open('show/stats.txt', 'w')
totalcount = 0
totaltitle = 0
totalselftext = 0
totalscore = 0
deadcount = 0
selfcount = 0
nsfwcount = 0
distinguishcount_m = 0
distinguishcount_a = 0
commentcount = 0
subredditcounts = {}
dead = []
cur.execute('SELECT * FROM posts')
post = cur.fetchone()
while post:
post = Post(post)
totalcount += 1
if post.created_utc == 0:
deadcount += 1
dead.append(post.idstr)
post = cur.fetchone()
continue
if post.is_self:
selfcount += 1
totalselftext += post.textlen
if post.over_18:
nsfwcount += 1
if post.distinguished_int == 1:
distinguishcount_m += 1
elif post.distinguished_int == 2:
distinguishcount_a += 1
totalscore += post.score
totaltitle += len(post.title)
if post.num_comments > 0:
commentcount += 1
try:
subredditcounts[post.subreddit] += 1
except KeyError:
subredditcounts[post.subreddit] = 1
post = cur.fetchone()
for deaditem in dead:
print(deaditem, file=filea)
filea.close()
currenttime = datetime.datetime.now()
currenttime = datetime.datetime.strftime(currenttime, "%B %d %Y %H:%M:%S")
currenttimes = "Updated %s\n" % currenttime
counts = '{0:,}'.format(totalcount)
mainstats = '%s posts collected; ' % counts
mainstats += '%s dead.\n' % '{0:,}'.format(deadcount)
linkcount = (totalcount - deadcount) - selfcount
selfs = '{0:,}'.format(selfcount)
links = '{0:,}'.format(linkcount)
selfstats = '%s linkposts; %s selfposts\n' % (links, selfs)
readmefile = open('README.md', 'r')
readmelines = readmefile.readlines()
readmefile.close()
readmelines[3] = currenttimes
readmelines[4] = mainstats
readmelines[5] = selfstats
readmefile = open('README.md', 'w')
readmefile.write(''.join(readmelines))
readmefile.close()
subkeys = list(subredditcounts.keys())
subkeys.sort(key=subredditcounts.get, reverse=True)
print('Total: %s' % '{0:,}'.format(totalcount), file=fileb)
print('Dead: %s' % '{0:,}'.format(deadcount), file=fileb)
print('Self: %s' % '{0:,}'.format(selfcount), file=fileb)
print('Link: %s' % '{0:,}'.format(linkcount), file=fileb)
print('NSFW: %s' % '{0:,}'.format(nsfwcount), file=fileb)
print('Distinguished by mods: %s' % '{0:,}'.format(distinguishcount_m), file=fileb)
print('Distinguished by admins: %s' % '{0:,}'.format(distinguishcount_a), file=fileb)
print('Total upvotes: %s' % '{0:,}'.format(totalscore), file=fileb)
print('Total characters in titles: %s' % '{0:,}'.format(totaltitle), file=fileb)
print('Total characters in selftext: %s' % '{0:,}'.format(totalselftext), file=fileb)
print('Total (supposed) comments on posts: %s' % '{0:,}'.format(commentcount), file=fileb)
print('\n\n', file=fileb)
for key in subkeys:
out = key
out += '.'*(25-len(key))
num = '{0:,}'.format(subredditcounts[key])
out += '.'*(14-len(num))
out += num
print(out, file=fileb)
fileb.close()
|
|
# Copyright (c) 2012-2022, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
from . import AWSObject, AWSProperty, PropsDictType
from .validators import boolean, double, integer
from .validators.autoscaling import EC2_INSTANCE_LAUNCH # noqa: F401
from .validators.autoscaling import EC2_INSTANCE_LAUNCH_ERROR # noqa: F401
from .validators.autoscaling import EC2_INSTANCE_TERMINATE # noqa: F401
from .validators.autoscaling import EC2_INSTANCE_TERMINATE_ERROR # noqa: F401
from .validators.autoscaling import TEST_NOTIFICATION # noqa: F401
from .validators.autoscaling import AllocationStrategy # noqa: F401
from .validators.autoscaling import ClosestToNextInstanceHour # noqa: F401
from .validators.autoscaling import Default # noqa: F401
from .validators.autoscaling import Metadata # noqa: F401
from .validators.autoscaling import NewestInstance # noqa: F401
from .validators.autoscaling import OldestInstance # noqa: F401
from .validators.autoscaling import OldestLaunchConfiguration # noqa: F401
from .validators.autoscaling import OldestLaunchTemplate # noqa: F401
from .validators.autoscaling import Tag # noqa: F401
from .validators.autoscaling import Tags # noqa: F401
from .validators.autoscaling import (
validate_auto_scaling_group,
validate_int_to_str,
validate_launch_template_specification,
validate_tags_or_list,
)
class LaunchTemplateSpecification(AWSProperty):
"""
`LaunchTemplateSpecification <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-launchtemplatespecification.html>`__
"""
props: PropsDictType = {
"LaunchTemplateId": (str, False),
"LaunchTemplateName": (str, False),
"Version": (str, True),
}
def validate(self):
validate_launch_template_specification(self)
class LifecycleHookSpecification(AWSProperty):
"""
`LifecycleHookSpecification <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-lifecyclehookspecification.html>`__
"""
props: PropsDictType = {
"DefaultResult": (str, False),
"HeartbeatTimeout": (integer, False),
"LifecycleHookName": (str, True),
"LifecycleTransition": (str, True),
"NotificationMetadata": (str, False),
"NotificationTargetARN": (str, False),
"RoleARN": (str, False),
}
class MetricsCollection(AWSProperty):
"""
`MetricsCollection <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-metricscollection.html>`__
"""
props: PropsDictType = {
"Granularity": (str, True),
"Metrics": ([str], False),
}
class InstancesDistribution(AWSProperty):
"""
`InstancesDistribution <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-instancesdistribution.html>`__
"""
props: PropsDictType = {
"OnDemandAllocationStrategy": (str, False),
"OnDemandBaseCapacity": (integer, False),
"OnDemandPercentageAboveBaseCapacity": (integer, False),
"SpotAllocationStrategy": (str, False),
"SpotInstancePools": (integer, False),
"SpotMaxPrice": (str, False),
}
class AcceleratorCountRequest(AWSProperty):
"""
`AcceleratorCountRequest <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-acceleratorcountrequest.html>`__
"""
props: PropsDictType = {
"Max": (integer, False),
"Min": (integer, False),
}
class AcceleratorTotalMemoryMiBRequest(AWSProperty):
"""
`AcceleratorTotalMemoryMiBRequest <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-acceleratortotalmemorymibrequest.html>`__
"""
props: PropsDictType = {
"Max": (integer, False),
"Min": (integer, False),
}
class BaselineEbsBandwidthMbpsRequest(AWSProperty):
"""
`BaselineEbsBandwidthMbpsRequest <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-baselineebsbandwidthmbpsrequest.html>`__
"""
props: PropsDictType = {
"Max": (integer, False),
"Min": (integer, False),
}
class MemoryGiBPerVCpuRequest(AWSProperty):
"""
`MemoryGiBPerVCpuRequest <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-memorygibpervcpurequest.html>`__
"""
props: PropsDictType = {
"Max": (integer, False),
"Min": (integer, False),
}
class MemoryMiBRequest(AWSProperty):
"""
`MemoryMiBRequest <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-memorymibrequest.html>`__
"""
props: PropsDictType = {
"Max": (integer, False),
"Min": (integer, False),
}
class NetworkInterfaceCountRequest(AWSProperty):
"""
`NetworkInterfaceCountRequest <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-networkinterfacecountrequest.html>`__
"""
props: PropsDictType = {
"Max": (integer, False),
"Min": (integer, False),
}
class TotalLocalStorageGBRequest(AWSProperty):
"""
`TotalLocalStorageGBRequest <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-totallocalstoragegbrequest.html>`__
"""
props: PropsDictType = {
"Max": (integer, False),
"Min": (integer, False),
}
class VCpuCountRequest(AWSProperty):
"""
`VCpuCountRequest <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-vcpucountrequest.html>`__
"""
props: PropsDictType = {
"Max": (integer, False),
"Min": (integer, False),
}
class InstanceRequirements(AWSProperty):
"""
`InstanceRequirements <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-instancerequirements.html>`__
"""
props: PropsDictType = {
"AcceleratorCount": (AcceleratorCountRequest, False),
"AcceleratorManufacturers": ([str], False),
"AcceleratorNames": ([str], False),
"AcceleratorTotalMemoryMiB": (AcceleratorTotalMemoryMiBRequest, False),
"AcceleratorTypes": ([str], False),
"BareMetal": (str, False),
"BaselineEbsBandwidthMbps": (BaselineEbsBandwidthMbpsRequest, False),
"BurstablePerformance": (str, False),
"CpuManufacturers": ([str], False),
"ExcludedInstanceTypes": ([str], False),
"InstanceGenerations": ([str], False),
"LocalStorage": (str, False),
"LocalStorageTypes": ([str], False),
"MemoryGiBPerVCpu": (MemoryGiBPerVCpuRequest, False),
"MemoryMiB": (MemoryMiBRequest, False),
"NetworkInterfaceCount": (NetworkInterfaceCountRequest, False),
"OnDemandMaxPricePercentageOverLowestPrice": (integer, False),
"RequireHibernateSupport": (boolean, False),
"SpotMaxPricePercentageOverLowestPrice": (integer, False),
"TotalLocalStorageGB": (TotalLocalStorageGBRequest, False),
"VCpuCount": (VCpuCountRequest, False),
}
class LaunchTemplateOverrides(AWSProperty):
"""
`LaunchTemplateOverrides <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-launchtemplateoverrides.html>`__
"""
props: PropsDictType = {
"InstanceRequirements": (InstanceRequirements, False),
"InstanceType": (str, False),
"LaunchTemplateSpecification": (LaunchTemplateSpecification, False),
"WeightedCapacity": (str, False),
}
class LaunchTemplate(AWSProperty):
"""
`LaunchTemplate <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-launchtemplate.html>`__
"""
props: PropsDictType = {
"LaunchTemplateSpecification": (LaunchTemplateSpecification, True),
"Overrides": ([LaunchTemplateOverrides], False),
}
class MixedInstancesPolicy(AWSProperty):
"""
`MixedInstancesPolicy <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-group-mixedinstancespolicy.html>`__
"""
props: PropsDictType = {
"InstancesDistribution": (InstancesDistribution, False),
"LaunchTemplate": (LaunchTemplate, True),
}
class NotificationConfigurations(AWSProperty):
"""
`NotificationConfigurations <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-notificationconfigurations.html>`__
"""
props: PropsDictType = {
"NotificationTypes": ([str], False),
"TopicARN": (str, True),
}
class AutoScalingGroup(AWSObject):
"""
`AutoScalingGroup <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html>`__
"""
resource_type = "AWS::AutoScaling::AutoScalingGroup"
props: PropsDictType = {
"AutoScalingGroupName": (str, False),
"AvailabilityZones": ([str], False),
"CapacityRebalance": (boolean, False),
"Context": (str, False),
"Cooldown": (str, False),
"DesiredCapacity": (str, False),
"DesiredCapacityType": (str, False),
"HealthCheckGracePeriod": (integer, False),
"HealthCheckType": (str, False),
"InstanceId": (str, False),
"LaunchConfigurationName": (str, False),
"LaunchTemplate": (LaunchTemplateSpecification, False),
"LifecycleHookSpecificationList": ([LifecycleHookSpecification], False),
"LoadBalancerNames": ([str], False),
"MaxInstanceLifetime": (integer, False),
"MaxSize": (validate_int_to_str, True),
"MetricsCollection": ([MetricsCollection], False),
"MinSize": (validate_int_to_str, True),
"MixedInstancesPolicy": (MixedInstancesPolicy, False),
"NewInstancesProtectedFromScaleIn": (boolean, False),
"NotificationConfigurations": ([NotificationConfigurations], False),
"PlacementGroup": (str, False),
"ServiceLinkedRoleARN": (str, False),
"Tags": (validate_tags_or_list, False),
"TargetGroupARNs": ([str], False),
"TerminationPolicies": ([str], False),
"VPCZoneIdentifier": ([str], False),
}
def validate(self):
validate_auto_scaling_group(self)
class EBSBlockDevice(AWSProperty):
"""
`EBSBlockDevice <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-launchconfiguration-blockdevice.html>`__
"""
props: PropsDictType = {
"DeleteOnTermination": (boolean, False),
"Encrypted": (boolean, False),
"Iops": (integer, False),
"SnapshotId": (str, False),
"Throughput": (integer, False),
"VolumeSize": (integer, False),
"VolumeType": (str, False),
}
class BlockDeviceMapping(AWSProperty):
"""
`BlockDeviceMapping <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-launchconfiguration-blockdevicemapping.html>`__
"""
props: PropsDictType = {
"DeviceName": (str, True),
"Ebs": (EBSBlockDevice, False),
"NoDevice": (boolean, False),
"VirtualName": (str, False),
}
class MetadataOptions(AWSProperty):
"""
`MetadataOptions <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-launchconfiguration-metadataoptions.html>`__
"""
props: PropsDictType = {
"HttpEndpoint": (str, False),
"HttpPutResponseHopLimit": (integer, False),
"HttpTokens": (str, False),
}
class LaunchConfiguration(AWSObject):
"""
`LaunchConfiguration <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-autoscaling-launchconfiguration.html>`__
"""
resource_type = "AWS::AutoScaling::LaunchConfiguration"
props: PropsDictType = {
"AssociatePublicIpAddress": (boolean, False),
"BlockDeviceMappings": ([BlockDeviceMapping], False),
"ClassicLinkVPCId": (str, False),
"ClassicLinkVPCSecurityGroups": ([str], False),
"EbsOptimized": (boolean, False),
"IamInstanceProfile": (str, False),
"ImageId": (str, True),
"InstanceId": (str, False),
"InstanceMonitoring": (boolean, False),
"InstanceType": (str, True),
"KernelId": (str, False),
"KeyName": (str, False),
"LaunchConfigurationName": (str, False),
"MetadataOptions": (MetadataOptions, False),
"PlacementTenancy": (str, False),
"RamDiskId": (str, False),
"SecurityGroups": ([str], False),
"SpotPrice": (str, False),
"UserData": (str, False),
}
class LifecycleHook(AWSObject):
"""
`LifecycleHook <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-autoscaling-lifecyclehook.html>`__
"""
resource_type = "AWS::AutoScaling::LifecycleHook"
props: PropsDictType = {
"AutoScalingGroupName": (str, True),
"DefaultResult": (str, False),
"HeartbeatTimeout": (integer, False),
"LifecycleHookName": (str, False),
"LifecycleTransition": (str, True),
"NotificationMetadata": (str, False),
"NotificationTargetARN": (str, False),
"RoleARN": (str, False),
}
class PredictiveScalingPredefinedLoadMetric(AWSProperty):
"""
`PredictiveScalingPredefinedLoadMetric <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-predictivescalingpredefinedloadmetric.html>`__
"""
props: PropsDictType = {
"PredefinedMetricType": (str, True),
"ResourceLabel": (str, False),
}
class PredictiveScalingPredefinedMetricPair(AWSProperty):
"""
`PredictiveScalingPredefinedMetricPair <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-predictivescalingpredefinedmetricpair.html>`__
"""
props: PropsDictType = {
"PredefinedMetricType": (str, True),
"ResourceLabel": (str, False),
}
class PredictiveScalingPredefinedScalingMetric(AWSProperty):
"""
`PredictiveScalingPredefinedScalingMetric <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-predictivescalingpredefinedscalingmetric.html>`__
"""
props: PropsDictType = {
"PredefinedMetricType": (str, True),
"ResourceLabel": (str, False),
}
class PredictiveScalingMetricSpecification(AWSProperty):
"""
`PredictiveScalingMetricSpecification <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-predictivescalingmetricspecification.html>`__
"""
props: PropsDictType = {
"PredefinedLoadMetricSpecification": (
PredictiveScalingPredefinedLoadMetric,
False,
),
"PredefinedMetricPairSpecification": (
PredictiveScalingPredefinedMetricPair,
False,
),
"PredefinedScalingMetricSpecification": (
PredictiveScalingPredefinedScalingMetric,
False,
),
"TargetValue": (double, True),
}
class PredictiveScalingConfiguration(AWSProperty):
"""
`PredictiveScalingConfiguration <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-predictivescalingconfiguration.html>`__
"""
props: PropsDictType = {
"MaxCapacityBreachBehavior": (str, False),
"MaxCapacityBuffer": (integer, False),
"MetricSpecifications": ([PredictiveScalingMetricSpecification], True),
"Mode": (str, False),
"SchedulingBufferTime": (integer, False),
}
class StepAdjustments(AWSProperty):
"""
`StepAdjustments <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-stepadjustments.html>`__
"""
props: PropsDictType = {
"MetricIntervalLowerBound": (double, False),
"MetricIntervalUpperBound": (double, False),
"ScalingAdjustment": (integer, True),
}
class MetricDimension(AWSProperty):
"""
`MetricDimension <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-metricdimension.html>`__
"""
props: PropsDictType = {
"Name": (str, True),
"Value": (str, True),
}
class CustomizedMetricSpecification(AWSProperty):
"""
`CustomizedMetricSpecification <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-customizedmetricspecification.html>`__
"""
props: PropsDictType = {
"Dimensions": ([MetricDimension], False),
"MetricName": (str, True),
"Namespace": (str, True),
"Statistic": (str, True),
"Unit": (str, False),
}
class PredefinedMetricSpecification(AWSProperty):
"""
`PredefinedMetricSpecification <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-predefinedmetricspecification.html>`__
"""
props: PropsDictType = {
"PredefinedMetricType": (str, True),
"ResourceLabel": (str, False),
}
class TargetTrackingConfiguration(AWSProperty):
"""
`TargetTrackingConfiguration <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-targettrackingconfiguration.html>`__
"""
props: PropsDictType = {
"CustomizedMetricSpecification": (CustomizedMetricSpecification, False),
"DisableScaleIn": (boolean, False),
"PredefinedMetricSpecification": (PredefinedMetricSpecification, False),
"TargetValue": (double, True),
}
class ScalingPolicy(AWSObject):
"""
`ScalingPolicy <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html>`__
"""
resource_type = "AWS::AutoScaling::ScalingPolicy"
props: PropsDictType = {
"AdjustmentType": (str, False),
"AutoScalingGroupName": (str, True),
"Cooldown": (str, False),
"EstimatedInstanceWarmup": (integer, False),
"MetricAggregationType": (str, False),
"MinAdjustmentMagnitude": (integer, False),
"PolicyType": (str, False),
"PredictiveScalingConfiguration": (PredictiveScalingConfiguration, False),
"ScalingAdjustment": (integer, False),
"StepAdjustments": ([StepAdjustments], False),
"TargetTrackingConfiguration": (TargetTrackingConfiguration, False),
}
class ScheduledAction(AWSObject):
"""
`ScheduledAction <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html>`__
"""
resource_type = "AWS::AutoScaling::ScheduledAction"
props: PropsDictType = {
"AutoScalingGroupName": (str, True),
"DesiredCapacity": (integer, False),
"EndTime": (str, False),
"MaxSize": (integer, False),
"MinSize": (integer, False),
"Recurrence": (str, False),
"StartTime": (str, False),
"TimeZone": (str, False),
}
class InstanceReusePolicy(AWSProperty):
"""
`InstanceReusePolicy <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-warmpool-instancereusepolicy.html>`__
"""
props: PropsDictType = {
"ReuseOnScaleIn": (boolean, False),
}
class WarmPool(AWSObject):
"""
`WarmPool <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-autoscaling-warmpool.html>`__
"""
resource_type = "AWS::AutoScaling::WarmPool"
props: PropsDictType = {
"AutoScalingGroupName": (str, True),
"InstanceReusePolicy": (InstanceReusePolicy, False),
"MaxGroupPreparedCapacity": (integer, False),
"MinSize": (integer, False),
"PoolState": (str, False),
}
|
|
#
# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.
#
import base64
import docker
import gevent
import os
import re
import socket
from netaddr import IPAddress, IPNetwork
class DeviceZtpManager(object):
EXCHANGE = 'device_ztp_exchange'
CONFIG_FILE_ROUTING_KEY = 'device_ztp.config.file'
TFTP_FILE_ROUTING_KEY = 'device_ztp.tftp.file'
ZTP_REQUEST_ROUTING_KEY = 'device_ztp.request'
ZTP_RESPONSE_ROUTING_KEY = 'device_ztp.response.'
MESSAGE_TTL = 5*60
_instance = None
def __init__(self, amqp_client, args, logger):
DeviceZtpManager._instance = self
self._client = None
self._active = False
self._amqp_client = amqp_client
self._dnsmasq_conf_dir = args.dnsmasq_conf_dir
self._tftp_dir = args.tftp_dir
self._dhcp_leases_file = args.dhcp_leases_file
self._timeout = args.ztp_timeout
self._logger = logger
self._lease_pattern = None
self._initialized = False
self._initialize()
# end __init__
@classmethod
def get_instance(cls):
return cls._instance
# end get_instance
@classmethod
def destroy_instance(cls):
inst = cls.get_instance()
if not inst:
return
cls._instance = None
# end destroy_instance
def _initialize(self):
if not self._dnsmasq_conf_dir or not self._tftp_dir or\
not self._dhcp_leases_file:
return
self._initialized = True
self._amqp_client.add_exchange(self.EXCHANGE)
consumer = 'device_manager_ztp.%s.config_queue' % \
socket.getfqdn()
self._amqp_client.add_consumer(consumer, self.EXCHANGE,
routing_key=self.CONFIG_FILE_ROUTING_KEY,
callback=self.handle_config_file_request)
consumer = 'device_manager_ztp.%s.tftp_queue' % \
socket.getfqdn()
self._amqp_client.add_consumer(consumer, self.EXCHANGE,
routing_key=self.TFTP_FILE_ROUTING_KEY,
callback=self.handle_tftp_file_request)
# end _initialize
def set_active(self):
if not self._initialized:
return
self._active = True
self._lease_pattern = re.compile(
r"[0-9]+ ([:A-Fa-f0-9]+) ([0-9.]+) .*",
re.MULTILINE | re.DOTALL)
consumer = 'device_manager_ztp.ztp_queue'
self._amqp_client.add_consumer(consumer, self.EXCHANGE,
routing_key=self.ZTP_REQUEST_ROUTING_KEY,
callback=self.handle_ztp_request)
# end set_active
def handle_config_file_request(self, body, message):
self._handle_file_request(body, message, self._dnsmasq_conf_dir)
# end handle_config_file_request
def handle_tftp_file_request(self, body, message):
self._handle_file_request(body, message, self._tftp_dir)
# end handle_tftp_file_request
def handle_ztp_request(self, body, message):
message.ack()
gevent.spawn(self._ztp_request, message.headers, body)
# end handle_ztp_request
def _ztp_request(self, headers, config):
try:
action = headers.get('action')
if action is None:
return
timeout = self._timeout
file_name = headers.get('file_name')
file_path = os.path.join(self._dnsmasq_conf_dir, file_name)
if action == 'create':
self._logger.info("Waiting for file %s to be created" % file_path)
while timeout > 0 and not os.path.isfile(file_path):
timeout -= 1
gevent.sleep(1)
self._restart_dnsmasq_container()
self._read_dhcp_leases(headers.get('fabric_name'), config)
elif action == 'delete':
self._logger.info("Waiting for file %s to be deleted" % file_path)
while timeout > 0 and os.path.isfile(file_path):
timeout -= 1
gevent.sleep(1)
self._restart_dnsmasq_container()
except Exception as e:
self._logger.error("Error while handling ztp request %s" % repr(e))
# end _ztp_request
def _read_dhcp_leases(self, fabric_name, config):
results = {}
results['failed'] = True
device_count = config.get('device_count', 0)
timeout = self._timeout
self._logger.info("Waiting for %s devices" % device_count)
while timeout > 0:
timeout -= 1
results['device_list'] = []
lease_table = {}
if os.path.isfile(self._dhcp_leases_file):
with open(self._dhcp_leases_file) as lfile:
for match in self._lease_pattern.finditer(lfile.read()):
mac = match.group(1)
ip_addr = match.group(2)
lease_table[mac] = ip_addr
for mac, ip_addr in lease_table.iteritems():
if self._within_dhcp_subnet(ip_addr, config):
results['device_list'].append({"ip_addr": ip_addr, "mac": mac})
if len(results['device_list']) >= device_count:
results['failed'] = False
break
gevent.sleep(1)
results['msg'] = "Found {} devices, expected {} devices".\
format(len(results['device_list']), device_count)
self._logger.info(results['msg'])
self._amqp_client.publish(results, self.EXCHANGE,
routing_key=self.ZTP_RESPONSE_ROUTING_KEY + fabric_name)
# end _read_dhcp_leases
def _handle_file_request(self, body, message, dir):
try:
message.ack()
action = message.headers.get('action')
if action is None:
return
file_name = message.headers.get('file_name')
if file_name is None or len(file_name) == 0:
return
action = str(action).lower()
file_path = os.path.join(dir, file_name)
if action == 'create':
self._logger.info("Creating file %s" % file_path)
with open(file_path, 'w') as f:
f.write(bytearray(base64.b64decode(body)))
elif action == 'delete':
self._logger.info("Deleting file %s" % file_path)
os.remove(file_path)
except Exception as e:
self._logger.error("ZTP manager: Error handling file request %s" %
repr(e))
# end _handle_file_request
def _restart_dnsmasq_container(self):
if self._client is None:
self._client = docker.from_env()
self._logger.debug("Fetching all containers")
all_containers = self._client.containers(all=True)
for container in all_containers:
labels = container.get('Labels', dict())
service = labels.get('net.juniper.contrail.service')
if service == 'dnsmasq':
self._logger.info("Restarting dnsmasq docker: %s" %
str(container))
self._client.restart(container)
# end _restart_dnsmasq_container
@staticmethod
def _within_dhcp_subnet(ip_addr, config):
subnets = config.get('ipam_subnets', [])
for subnet_obj in subnets:
subnet = subnet_obj.get('subnet', {})
ip_prefix = subnet.get('ip_prefix')
length = subnet.get('ip_prefix_len')
if IPAddress(ip_addr) in IPNetwork("{}/{}".format(ip_prefix, length)):
return True
return False
# end _within_dhcp_subnet
# end DeviceZtpManager
|
|
from sqlalchemy.testing import eq_, assert_raises, \
assert_raises_message, is_
from sqlalchemy.ext import declarative as decl
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy import Integer, String, ForeignKey
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.orm import relationship, create_session, class_mapper, \
configure_mappers, clear_mappers, \
deferred, column_property, \
Session
from sqlalchemy.util import classproperty
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.testing import fixtures
Base = None
class DeclarativeTestBase(fixtures.TestBase, testing.AssertsExecutionResults):
def setup(self):
global Base
Base = decl.declarative_base(testing.db)
def teardown(self):
Session.close_all()
clear_mappers()
Base.metadata.drop_all()
class DeclarativeMixinTest(DeclarativeTestBase):
def test_simple(self):
class MyMixin(object):
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
def foo(self):
return 'bar' + str(self.id)
class MyModel(Base, MyMixin):
__tablename__ = 'test'
name = Column(String(100), nullable=False, index=True)
Base.metadata.create_all()
session = create_session()
session.add(MyModel(name='testing'))
session.flush()
session.expunge_all()
obj = session.query(MyModel).one()
eq_(obj.id, 1)
eq_(obj.name, 'testing')
eq_(obj.foo(), 'bar1')
def test_unique_column(self):
class MyMixin(object):
id = Column(Integer, primary_key=True)
value = Column(String, unique=True)
class MyModel(Base, MyMixin):
__tablename__ = 'test'
assert MyModel.__table__.c.value.unique
def test_hierarchical_bases(self):
class MyMixinParent:
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
def foo(self):
return 'bar' + str(self.id)
class MyMixin(MyMixinParent):
baz = Column(String(100), nullable=False, index=True)
class MyModel(Base, MyMixin):
__tablename__ = 'test'
name = Column(String(100), nullable=False, index=True)
Base.metadata.create_all()
session = create_session()
session.add(MyModel(name='testing', baz='fu'))
session.flush()
session.expunge_all()
obj = session.query(MyModel).one()
eq_(obj.id, 1)
eq_(obj.name, 'testing')
eq_(obj.foo(), 'bar1')
eq_(obj.baz, 'fu')
def test_mixin_overrides(self):
"""test a mixin that overrides a column on a superclass."""
class MixinA(object):
foo = Column(String(50))
class MixinB(MixinA):
foo = Column(Integer)
class MyModelA(Base, MixinA):
__tablename__ = 'testa'
id = Column(Integer, primary_key=True)
class MyModelB(Base, MixinB):
__tablename__ = 'testb'
id = Column(Integer, primary_key=True)
eq_(MyModelA.__table__.c.foo.type.__class__, String)
eq_(MyModelB.__table__.c.foo.type.__class__, Integer)
def test_not_allowed(self):
class MyMixin:
foo = Column(Integer, ForeignKey('bar.id'))
def go():
class MyModel(Base, MyMixin):
__tablename__ = 'foo'
assert_raises(sa.exc.InvalidRequestError, go)
class MyRelMixin:
foo = relationship('Bar')
def go():
class MyModel(Base, MyRelMixin):
__tablename__ = 'foo'
assert_raises(sa.exc.InvalidRequestError, go)
class MyDefMixin:
foo = deferred(Column('foo', String))
def go():
class MyModel(Base, MyDefMixin):
__tablename__ = 'foo'
assert_raises(sa.exc.InvalidRequestError, go)
class MyCPropMixin:
foo = column_property(Column('foo', String))
def go():
class MyModel(Base, MyCPropMixin):
__tablename__ = 'foo'
assert_raises(sa.exc.InvalidRequestError, go)
def test_table_name_inherited(self):
class MyMixin:
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
class MyModel(Base, MyMixin):
pass
eq_(MyModel.__table__.name, 'mymodel')
def test_classproperty_still_works(self):
class MyMixin(object):
@classproperty
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
class MyModel(Base, MyMixin):
__tablename__ = 'overridden'
eq_(MyModel.__table__.name, 'overridden')
def test_table_name_not_inherited(self):
class MyMixin:
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
class MyModel(Base, MyMixin):
__tablename__ = 'overridden'
eq_(MyModel.__table__.name, 'overridden')
def test_table_name_inheritance_order(self):
class MyMixin1:
@declared_attr
def __tablename__(cls):
return cls.__name__.lower() + '1'
class MyMixin2:
@declared_attr
def __tablename__(cls):
return cls.__name__.lower() + '2'
class MyModel(Base, MyMixin1, MyMixin2):
id = Column(Integer, primary_key=True)
eq_(MyModel.__table__.name, 'mymodel1')
def test_table_name_dependent_on_subclass(self):
class MyHistoryMixin:
@declared_attr
def __tablename__(cls):
return cls.parent_name + '_changelog'
class MyModel(Base, MyHistoryMixin):
parent_name = 'foo'
id = Column(Integer, primary_key=True)
eq_(MyModel.__table__.name, 'foo_changelog')
def test_table_args_inherited(self):
class MyMixin:
__table_args__ = {'mysql_engine': 'InnoDB'}
class MyModel(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
eq_(MyModel.__table__.kwargs, {'mysql_engine': 'InnoDB'})
def test_table_args_inherited_descriptor(self):
class MyMixin:
@declared_attr
def __table_args__(cls):
return {'info': cls.__name__}
class MyModel(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
eq_(MyModel.__table__.info, 'MyModel')
def test_table_args_inherited_single_table_inheritance(self):
class MyMixin:
__table_args__ = {'mysql_engine': 'InnoDB'}
class General(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
type_ = Column(String(50))
__mapper__args = {'polymorphic_on': type_}
class Specific(General):
__mapper_args__ = {'polymorphic_identity': 'specific'}
assert Specific.__table__ is General.__table__
eq_(General.__table__.kwargs, {'mysql_engine': 'InnoDB'})
def test_columns_single_table_inheritance(self):
"""Test a column on a mixin with an alternate attribute name,
mapped to a superclass and single-table inheritance subclass.
The superclass table gets the column, the subclass shares
the MapperProperty.
"""
class MyMixin(object):
foo = Column('foo', Integer)
bar = Column('bar_newname', Integer)
class General(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
type_ = Column(String(50))
__mapper__args = {'polymorphic_on': type_}
class Specific(General):
__mapper_args__ = {'polymorphic_identity': 'specific'}
assert General.bar.prop.columns[0] is General.__table__.c.bar_newname
assert len(General.bar.prop.columns) == 1
assert Specific.bar.prop is General.bar.prop
@testing.skip_if(lambda: testing.against('oracle'),
"Test has an empty insert in it at the moment")
def test_columns_single_inheritance_conflict_resolution(self):
"""Test that a declared_attr can return the existing column and it will
be ignored. this allows conditional columns to be added.
See [ticket:2472].
"""
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
class Mixin(object):
@declared_attr
def target_id(cls):
return cls.__table__.c.get(
'target_id',
Column(Integer, ForeignKey('other.id'))
)
@declared_attr
def target(cls):
return relationship("Other")
class Engineer(Mixin, Person):
"""single table inheritance"""
class Manager(Mixin, Person):
"""single table inheritance"""
class Other(Base):
__tablename__ = 'other'
id = Column(Integer, primary_key=True)
is_(
Engineer.target_id.property.columns[0],
Person.__table__.c.target_id
)
is_(
Manager.target_id.property.columns[0],
Person.__table__.c.target_id
)
# do a brief round trip on this
Base.metadata.create_all()
session = Session()
o1, o2 = Other(), Other()
session.add_all([
Engineer(target=o1),
Manager(target=o2),
Manager(target=o1)
])
session.commit()
eq_(session.query(Engineer).first().target, o1)
def test_columns_joined_table_inheritance(self):
"""Test a column on a mixin with an alternate attribute name,
mapped to a superclass and joined-table inheritance subclass.
Both tables get the column, in the case of the subclass the two
columns are joined under one MapperProperty.
"""
class MyMixin(object):
foo = Column('foo', Integer)
bar = Column('bar_newname', Integer)
class General(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
type_ = Column(String(50))
__mapper__args = {'polymorphic_on': type_}
class Specific(General):
__tablename__ = 'sub'
id = Column(Integer, ForeignKey('test.id'), primary_key=True)
__mapper_args__ = {'polymorphic_identity': 'specific'}
assert General.bar.prop.columns[0] is General.__table__.c.bar_newname
assert len(General.bar.prop.columns) == 1
assert Specific.bar.prop is General.bar.prop
eq_(len(Specific.bar.prop.columns), 1)
assert Specific.bar.prop.columns[0] is General.__table__.c.bar_newname
def test_column_join_checks_superclass_type(self):
"""Test that the logic which joins subclass props to those
of the superclass checks that the superclass property is a column.
"""
class General(Base):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
general_id = Column(Integer, ForeignKey('test.id'))
type_ = relationship("General")
class Specific(General):
__tablename__ = 'sub'
id = Column(Integer, ForeignKey('test.id'), primary_key=True)
type_ = Column('foob', String(50))
assert isinstance(General.type_.property, sa.orm.RelationshipProperty)
assert Specific.type_.property.columns[0] is Specific.__table__.c.foob
def test_column_join_checks_subclass_type(self):
"""Test that the logic which joins subclass props to those
of the superclass checks that the subclass property is a column.
"""
def go():
class General(Base):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
type_ = Column('foob', Integer)
class Specific(General):
__tablename__ = 'sub'
id = Column(Integer, ForeignKey('test.id'), primary_key=True)
specific_id = Column(Integer, ForeignKey('sub.id'))
type_ = relationship("Specific")
assert_raises_message(
sa.exc.ArgumentError, "column 'foob' conflicts with property", go
)
def test_table_args_overridden(self):
class MyMixin:
__table_args__ = {'mysql_engine': 'Foo'}
class MyModel(Base, MyMixin):
__tablename__ = 'test'
__table_args__ = {'mysql_engine': 'InnoDB'}
id = Column(Integer, primary_key=True)
eq_(MyModel.__table__.kwargs, {'mysql_engine': 'InnoDB'})
def test_mapper_args_declared_attr(self):
class ComputedMapperArgs:
@declared_attr
def __mapper_args__(cls):
if cls.__name__ == 'Person':
return {'polymorphic_on': cls.discriminator}
else:
return {'polymorphic_identity': cls.__name__}
class Person(Base, ComputedMapperArgs):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
class Engineer(Person):
pass
configure_mappers()
assert class_mapper(Person).polymorphic_on \
is Person.__table__.c.type
eq_(class_mapper(Engineer).polymorphic_identity, 'Engineer')
def test_mapper_args_declared_attr_two(self):
# same as test_mapper_args_declared_attr, but we repeat
# ComputedMapperArgs on both classes for no apparent reason.
class ComputedMapperArgs:
@declared_attr
def __mapper_args__(cls):
if cls.__name__ == 'Person':
return {'polymorphic_on': cls.discriminator}
else:
return {'polymorphic_identity': cls.__name__}
class Person(Base, ComputedMapperArgs):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
class Engineer(Person, ComputedMapperArgs):
pass
configure_mappers()
assert class_mapper(Person).polymorphic_on \
is Person.__table__.c.type
eq_(class_mapper(Engineer).polymorphic_identity, 'Engineer')
def test_table_args_composite(self):
class MyMixin1:
__table_args__ = {'info': {'baz': 'bob'}}
class MyMixin2:
__table_args__ = {'info': {'foo': 'bar'}}
class MyModel(Base, MyMixin1, MyMixin2):
__tablename__ = 'test'
@declared_attr
def __table_args__(self):
info = {}
args = dict(info=info)
info.update(MyMixin1.__table_args__['info'])
info.update(MyMixin2.__table_args__['info'])
return args
id = Column(Integer, primary_key=True)
eq_(MyModel.__table__.info, {'foo': 'bar', 'baz': 'bob'})
def test_mapper_args_inherited(self):
class MyMixin:
__mapper_args__ = {'always_refresh': True}
class MyModel(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
eq_(MyModel.__mapper__.always_refresh, True)
def test_mapper_args_inherited_descriptor(self):
class MyMixin:
@declared_attr
def __mapper_args__(cls):
# tenuous, but illustrates the problem!
if cls.__name__ == 'MyModel':
return dict(always_refresh=True)
else:
return dict(always_refresh=False)
class MyModel(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
eq_(MyModel.__mapper__.always_refresh, True)
def test_mapper_args_polymorphic_on_inherited(self):
class MyMixin:
type_ = Column(String(50))
__mapper_args__ = {'polymorphic_on': type_}
class MyModel(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
col = MyModel.__mapper__.polymorphic_on
eq_(col.name, 'type_')
assert col.table is not None
def test_mapper_args_overridden(self):
class MyMixin:
__mapper_args__ = dict(always_refresh=True)
class MyModel(Base, MyMixin):
__tablename__ = 'test'
__mapper_args__ = dict(always_refresh=False)
id = Column(Integer, primary_key=True)
eq_(MyModel.__mapper__.always_refresh, False)
def test_mapper_args_composite(self):
class MyMixin1:
type_ = Column(String(50))
__mapper_args__ = {'polymorphic_on': type_}
class MyMixin2:
__mapper_args__ = {'always_refresh': True}
class MyModel(Base, MyMixin1, MyMixin2):
__tablename__ = 'test'
@declared_attr
def __mapper_args__(cls):
args = {}
args.update(MyMixin1.__mapper_args__)
args.update(MyMixin2.__mapper_args__)
if cls.__name__ != 'MyModel':
args.pop('polymorphic_on')
args['polymorphic_identity'] = cls.__name__
return args
id = Column(Integer, primary_key=True)
class MySubModel(MyModel):
pass
eq_(
MyModel.__mapper__.polymorphic_on.name,
'type_'
)
assert MyModel.__mapper__.polymorphic_on.table is not None
eq_(MyModel.__mapper__.always_refresh, True)
eq_(MySubModel.__mapper__.always_refresh, True)
eq_(MySubModel.__mapper__.polymorphic_identity, 'MySubModel')
def test_mapper_args_property(self):
class MyModel(Base):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
@declared_attr
def __table_args__(cls):
return {'mysql_engine': 'InnoDB'}
@declared_attr
def __mapper_args__(cls):
args = {}
args['polymorphic_identity'] = cls.__name__
return args
id = Column(Integer, primary_key=True)
class MySubModel(MyModel):
id = Column(Integer, ForeignKey('mymodel.id'), primary_key=True)
class MySubModel2(MyModel):
__tablename__ = 'sometable'
id = Column(Integer, ForeignKey('mymodel.id'), primary_key=True)
eq_(MyModel.__mapper__.polymorphic_identity, 'MyModel')
eq_(MySubModel.__mapper__.polymorphic_identity, 'MySubModel')
eq_(MyModel.__table__.kwargs['mysql_engine'], 'InnoDB')
eq_(MySubModel.__table__.kwargs['mysql_engine'], 'InnoDB')
eq_(MySubModel2.__table__.kwargs['mysql_engine'], 'InnoDB')
eq_(MyModel.__table__.name, 'mymodel')
eq_(MySubModel.__table__.name, 'mysubmodel')
def test_mapper_args_custom_base(self):
"""test the @declared_attr approach from a custom base."""
class Base(object):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
@declared_attr
def __table_args__(cls):
return {'mysql_engine': 'InnoDB'}
@declared_attr
def id(self):
return Column(Integer, primary_key=True)
Base = decl.declarative_base(cls=Base)
class MyClass(Base):
pass
class MyOtherClass(Base):
pass
eq_(MyClass.__table__.kwargs['mysql_engine'], 'InnoDB')
eq_(MyClass.__table__.name, 'myclass')
eq_(MyOtherClass.__table__.name, 'myotherclass')
assert MyClass.__table__.c.id.table is MyClass.__table__
assert MyOtherClass.__table__.c.id.table is MyOtherClass.__table__
def test_single_table_no_propagation(self):
class IdColumn:
id = Column(Integer, primary_key=True)
class Generic(Base, IdColumn):
__tablename__ = 'base'
discriminator = Column('type', String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
value = Column(Integer())
class Specific(Generic):
__mapper_args__ = dict(polymorphic_identity='specific')
assert Specific.__table__ is Generic.__table__
eq_(list(Generic.__table__.c.keys()), ['id', 'type', 'value'])
assert class_mapper(Specific).polymorphic_on \
is Generic.__table__.c.type
eq_(class_mapper(Specific).polymorphic_identity, 'specific')
def test_joined_table_propagation(self):
class CommonMixin:
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
__table_args__ = {'mysql_engine': 'InnoDB'}
timestamp = Column(Integer)
id = Column(Integer, primary_key=True)
class Generic(Base, CommonMixin):
discriminator = Column('python_type', String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
class Specific(Generic):
__mapper_args__ = dict(polymorphic_identity='specific')
id = Column(Integer, ForeignKey('generic.id'),
primary_key=True)
eq_(Generic.__table__.name, 'generic')
eq_(Specific.__table__.name, 'specific')
eq_(list(Generic.__table__.c.keys()), ['timestamp', 'id',
'python_type'])
eq_(list(Specific.__table__.c.keys()), ['id'])
eq_(Generic.__table__.kwargs, {'mysql_engine': 'InnoDB'})
eq_(Specific.__table__.kwargs, {'mysql_engine': 'InnoDB'})
def test_some_propagation(self):
class CommonMixin:
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
__table_args__ = {'mysql_engine': 'InnoDB'}
timestamp = Column(Integer)
class BaseType(Base, CommonMixin):
discriminator = Column('type', String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
id = Column(Integer, primary_key=True)
value = Column(Integer())
class Single(BaseType):
__tablename__ = None
__mapper_args__ = dict(polymorphic_identity='type1')
class Joined(BaseType):
__mapper_args__ = dict(polymorphic_identity='type2')
id = Column(Integer, ForeignKey('basetype.id'),
primary_key=True)
eq_(BaseType.__table__.name, 'basetype')
eq_(list(BaseType.__table__.c.keys()), ['timestamp', 'type', 'id',
'value'])
eq_(BaseType.__table__.kwargs, {'mysql_engine': 'InnoDB'})
assert Single.__table__ is BaseType.__table__
eq_(Joined.__table__.name, 'joined')
eq_(list(Joined.__table__.c.keys()), ['id'])
eq_(Joined.__table__.kwargs, {'mysql_engine': 'InnoDB'})
def test_col_copy_vs_declared_attr_joined_propagation(self):
class Mixin(object):
a = Column(Integer)
@declared_attr
def b(cls):
return Column(Integer)
class A(Mixin, Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
class B(A):
__tablename__ = 'b'
id = Column(Integer, ForeignKey('a.id'), primary_key=True)
assert 'a' in A.__table__.c
assert 'b' in A.__table__.c
assert 'a' not in B.__table__.c
assert 'b' not in B.__table__.c
def test_col_copy_vs_declared_attr_joined_propagation_newname(self):
class Mixin(object):
a = Column('a1', Integer)
@declared_attr
def b(cls):
return Column('b1', Integer)
class A(Mixin, Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
class B(A):
__tablename__ = 'b'
id = Column(Integer, ForeignKey('a.id'), primary_key=True)
assert 'a1' in A.__table__.c
assert 'b1' in A.__table__.c
assert 'a1' not in B.__table__.c
assert 'b1' not in B.__table__.c
def test_col_copy_vs_declared_attr_single_propagation(self):
class Mixin(object):
a = Column(Integer)
@declared_attr
def b(cls):
return Column(Integer)
class A(Mixin, Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
class B(A):
pass
assert 'a' in A.__table__.c
assert 'b' in A.__table__.c
def test_non_propagating_mixin(self):
class NoJoinedTableNameMixin:
@declared_attr
def __tablename__(cls):
if decl.has_inherited_table(cls):
return None
return cls.__name__.lower()
class BaseType(Base, NoJoinedTableNameMixin):
discriminator = Column('type', String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
id = Column(Integer, primary_key=True)
value = Column(Integer())
class Specific(BaseType):
__mapper_args__ = dict(polymorphic_identity='specific')
eq_(BaseType.__table__.name, 'basetype')
eq_(list(BaseType.__table__.c.keys()), ['type', 'id', 'value'])
assert Specific.__table__ is BaseType.__table__
assert class_mapper(Specific).polymorphic_on \
is BaseType.__table__.c.type
eq_(class_mapper(Specific).polymorphic_identity, 'specific')
def test_non_propagating_mixin_used_for_joined(self):
class TableNameMixin:
@declared_attr
def __tablename__(cls):
if decl.has_inherited_table(cls) and TableNameMixin \
not in cls.__bases__:
return None
return cls.__name__.lower()
class BaseType(Base, TableNameMixin):
discriminator = Column('type', String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
id = Column(Integer, primary_key=True)
value = Column(Integer())
class Specific(BaseType, TableNameMixin):
__mapper_args__ = dict(polymorphic_identity='specific')
id = Column(Integer, ForeignKey('basetype.id'),
primary_key=True)
eq_(BaseType.__table__.name, 'basetype')
eq_(list(BaseType.__table__.c.keys()), ['type', 'id', 'value'])
eq_(Specific.__table__.name, 'specific')
eq_(list(Specific.__table__.c.keys()), ['id'])
def test_single_back_propagate(self):
class ColumnMixin:
timestamp = Column(Integer)
class BaseType(Base):
__tablename__ = 'foo'
discriminator = Column('type', String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
id = Column(Integer, primary_key=True)
class Specific(BaseType, ColumnMixin):
__mapper_args__ = dict(polymorphic_identity='specific')
eq_(list(BaseType.__table__.c.keys()), ['type', 'id', 'timestamp'])
def test_table_in_model_and_same_column_in_mixin(self):
class ColumnMixin:
data = Column(Integer)
class Model(Base, ColumnMixin):
__table__ = Table('foo', Base.metadata,
Column('data', Integer),
Column('id', Integer, primary_key=True))
model_col = Model.__table__.c.data
mixin_col = ColumnMixin.data
assert model_col is not mixin_col
eq_(model_col.name, 'data')
assert model_col.type.__class__ is mixin_col.type.__class__
def test_table_in_model_and_different_named_column_in_mixin(self):
class ColumnMixin:
tada = Column(Integer)
def go():
class Model(Base, ColumnMixin):
__table__ = Table('foo', Base.metadata,
Column('data', Integer),
Column('id', Integer, primary_key=True))
foo = relationship("Dest")
assert_raises_message(sa.exc.ArgumentError,
"Can't add additional column 'tada' when "
"specifying __table__", go)
def test_table_in_model_and_different_named_alt_key_column_in_mixin(self):
# here, the __table__ has a column 'tada'. We disallow
# the add of the 'foobar' column, even though it's
# keyed to 'tada'.
class ColumnMixin:
tada = Column('foobar', Integer)
def go():
class Model(Base, ColumnMixin):
__table__ = Table('foo', Base.metadata,
Column('data', Integer),
Column('tada', Integer),
Column('id', Integer, primary_key=True))
foo = relationship("Dest")
assert_raises_message(sa.exc.ArgumentError,
"Can't add additional column 'foobar' when "
"specifying __table__", go)
def test_table_in_model_overrides_different_typed_column_in_mixin(self):
class ColumnMixin:
data = Column(String)
class Model(Base, ColumnMixin):
__table__ = Table('foo', Base.metadata,
Column('data', Integer),
Column('id', Integer, primary_key=True))
model_col = Model.__table__.c.data
mixin_col = ColumnMixin.data
assert model_col is not mixin_col
eq_(model_col.name, 'data')
assert model_col.type.__class__ is Integer
def test_mixin_column_ordering(self):
class Foo(object):
col1 = Column(Integer)
col3 = Column(Integer)
class Bar(object):
col2 = Column(Integer)
col4 = Column(Integer)
class Model(Base, Foo, Bar):
id = Column(Integer, primary_key=True)
__tablename__ = 'model'
eq_(list(Model.__table__.c.keys()), ['col1', 'col3', 'col2', 'col4',
'id'])
def test_honor_class_mro_one(self):
class HasXMixin(object):
@declared_attr
def x(self):
return Column(Integer)
class Parent(HasXMixin, Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
class Child(Parent):
__tablename__ = 'child'
id = Column(Integer, ForeignKey('parent.id'), primary_key=True)
assert "x" not in Child.__table__.c
def test_honor_class_mro_two(self):
class HasXMixin(object):
@declared_attr
def x(self):
return Column(Integer)
class Parent(HasXMixin, Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
def x(self):
return "hi"
class C(Parent):
__tablename__ = 'c'
id = Column(Integer, ForeignKey('parent.id'), primary_key=True)
assert C().x() == 'hi'
def test_arbitrary_attrs_one(self):
class HasMixin(object):
@declared_attr
def some_attr(cls):
return cls.__name__ + "SOME ATTR"
class Mapped(HasMixin, Base):
__tablename__ = 't'
id = Column(Integer, primary_key=True)
eq_(Mapped.some_attr, "MappedSOME ATTR")
eq_(Mapped.__dict__['some_attr'], "MappedSOME ATTR")
def test_arbitrary_attrs_two(self):
from sqlalchemy.ext.associationproxy import association_proxy
class FilterA(Base):
__tablename__ = 'filter_a'
id = Column(Integer(), primary_key=True)
parent_id = Column(Integer(),
ForeignKey('type_a.id'))
filter = Column(String())
def __init__(self, filter_, **kw):
self.filter = filter_
class FilterB(Base):
__tablename__ = 'filter_b'
id = Column(Integer(), primary_key=True)
parent_id = Column(Integer(),
ForeignKey('type_b.id'))
filter = Column(String())
def __init__(self, filter_, **kw):
self.filter = filter_
class FilterMixin(object):
@declared_attr
def _filters(cls):
return relationship(cls.filter_class,
cascade='all,delete,delete-orphan')
@declared_attr
def filters(cls):
return association_proxy('_filters', 'filter')
class TypeA(Base, FilterMixin):
__tablename__ = 'type_a'
filter_class = FilterA
id = Column(Integer(), primary_key=True)
class TypeB(Base, FilterMixin):
__tablename__ = 'type_b'
filter_class = FilterB
id = Column(Integer(), primary_key=True)
TypeA(filters=['foo'])
TypeB(filters=['foo'])
class DeclarativeMixinPropertyTest(DeclarativeTestBase):
def test_column_property(self):
class MyMixin(object):
@declared_attr
def prop_hoho(cls):
return column_property(Column('prop', String(50)))
class MyModel(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
class MyOtherModel(Base, MyMixin):
__tablename__ = 'othertest'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
assert MyModel.__table__.c.prop is not None
assert MyOtherModel.__table__.c.prop is not None
assert MyModel.__table__.c.prop \
is not MyOtherModel.__table__.c.prop
assert MyModel.prop_hoho.property.columns \
== [MyModel.__table__.c.prop]
assert MyOtherModel.prop_hoho.property.columns \
== [MyOtherModel.__table__.c.prop]
assert MyModel.prop_hoho.property \
is not MyOtherModel.prop_hoho.property
Base.metadata.create_all()
sess = create_session()
m1, m2 = MyModel(prop_hoho='foo'), MyOtherModel(prop_hoho='bar')
sess.add_all([m1, m2])
sess.flush()
eq_(sess.query(MyModel).filter(MyModel.prop_hoho == 'foo'
).one(), m1)
eq_(sess.query(MyOtherModel).filter(MyOtherModel.prop_hoho
== 'bar').one(), m2)
def test_doc(self):
"""test documentation transfer.
the documentation situation with @declared_attr is problematic.
at least see if mapped subclasses get the doc.
"""
class MyMixin(object):
@declared_attr
def type_(cls):
"""this is a document."""
return Column(String(50))
@declared_attr
def t2(cls):
"""this is another document."""
return column_property(Column(String(50)))
class MyModel(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
configure_mappers()
eq_(MyModel.type_.__doc__, """this is a document.""")
eq_(MyModel.t2.__doc__, """this is another document.""")
def test_column_in_mapper_args(self):
class MyMixin(object):
@declared_attr
def type_(cls):
return Column(String(50))
__mapper_args__ = {'polymorphic_on': type_}
class MyModel(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
configure_mappers()
col = MyModel.__mapper__.polymorphic_on
eq_(col.name, 'type_')
assert col.table is not None
def test_column_in_mapper_args_used_multiple_times(self):
class MyMixin(object):
version_id = Column(Integer)
__mapper_args__ = {'version_id_col': version_id}
class ModelOne(Base, MyMixin):
__tablename__ = 'm1'
id = Column(Integer, primary_key=True)
class ModelTwo(Base, MyMixin):
__tablename__ = 'm2'
id = Column(Integer, primary_key=True)
is_(
ModelOne.__mapper__.version_id_col,
ModelOne.__table__.c.version_id
)
is_(
ModelTwo.__mapper__.version_id_col,
ModelTwo.__table__.c.version_id
)
def test_deferred(self):
class MyMixin(object):
@declared_attr
def data(cls):
return deferred(Column('data', String(50)))
class MyModel(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
Base.metadata.create_all()
sess = create_session()
sess.add_all([MyModel(data='d1'), MyModel(data='d2')])
sess.flush()
sess.expunge_all()
d1, d2 = sess.query(MyModel).order_by(MyModel.data)
assert 'data' not in d1.__dict__
assert d1.data == 'd1'
assert 'data' in d1.__dict__
def _test_relationship(self, usestring):
class RefTargetMixin(object):
@declared_attr
def target_id(cls):
return Column('target_id', ForeignKey('target.id'))
if usestring:
@declared_attr
def target(cls):
return relationship('Target',
primaryjoin='Target.id==%s.target_id'
% cls.__name__)
else:
@declared_attr
def target(cls):
return relationship('Target')
class Foo(Base, RefTargetMixin):
__tablename__ = 'foo'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
class Bar(Base, RefTargetMixin):
__tablename__ = 'bar'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
class Target(Base):
__tablename__ = 'target'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
Base.metadata.create_all()
sess = create_session()
t1, t2 = Target(), Target()
f1, f2, b1 = Foo(target=t1), Foo(target=t2), Bar(target=t1)
sess.add_all([f1, f2, b1])
sess.flush()
eq_(sess.query(Foo).filter(Foo.target == t2).one(), f2)
eq_(sess.query(Bar).filter(Bar.target == t2).first(), None)
sess.expire_all()
eq_(f1.target, t1)
def test_relationship(self):
self._test_relationship(False)
def test_relationship_primryjoin(self):
self._test_relationship(True)
class AbstractTest(DeclarativeTestBase):
def test_abstract_boolean(self):
class A(Base):
__abstract__ = True
__tablename__ = 'x'
id = Column(Integer, primary_key=True)
class B(Base):
__abstract__ = False
__tablename__ = 'y'
id = Column(Integer, primary_key=True)
class C(Base):
__abstract__ = False
__tablename__ = 'z'
id = Column(Integer, primary_key=True)
class D(Base):
__tablename__ = 'q'
id = Column(Integer, primary_key=True)
eq_(set(Base.metadata.tables), set(['y', 'z', 'q']))
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class PolicyDefinitionsOperations(object):
"""PolicyDefinitionsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version to use for the operation. Constant value: "2016-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-12-01"
self.config = config
def create_or_update(
self, policy_definition_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a policy definition.
:param policy_definition_name: The name of the policy definition to
create.
:type policy_definition_name: str
:param parameters: The policy definition properties.
:type parameters:
~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyDefinition
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PolicyDefinition or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyDefinition
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PolicyDefinition')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('PolicyDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, policy_definition_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a policy definition.
:param policy_definition_name: The name of the policy definition to
delete.
:type policy_definition_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get(
self, policy_definition_name, custom_headers=None, raw=False, **operation_config):
"""Gets the policy definition.
:param policy_definition_name: The name of the policy definition to
get.
:type policy_definition_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PolicyDefinition or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyDefinition
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_built_in(
self, policy_definition_name, custom_headers=None, raw=False, **operation_config):
"""Gets the built in policy definition.
:param policy_definition_name: The name of the built in policy
definition to get.
:type policy_definition_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PolicyDefinition or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyDefinition
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update_at_management_group(
self, policy_definition_name, parameters, management_group_id, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a policy definition at management group level.
:param policy_definition_name: The name of the policy definition to
create.
:type policy_definition_name: str
:param parameters: The policy definition properties.
:type parameters:
~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyDefinition
:param management_group_id: The ID of the management group.
:type management_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PolicyDefinition or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyDefinition
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'managementGroupId': self._serialize.url("management_group_id", management_group_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PolicyDefinition')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('PolicyDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_at_management_group(
self, policy_definition_name, management_group_id, custom_headers=None, raw=False, **operation_config):
"""Deletes a policy definition at management group level.
:param policy_definition_name: The name of the policy definition to
delete.
:type policy_definition_name: str
:param management_group_id: The ID of the management group.
:type management_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'managementGroupId': self._serialize.url("management_group_id", management_group_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_at_management_group(
self, policy_definition_name, management_group_id, custom_headers=None, raw=False, **operation_config):
"""Gets the policy definition at management group level.
:param policy_definition_name: The name of the policy definition to
get.
:type policy_definition_name: str
:param management_group_id: The ID of the management group.
:type management_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PolicyDefinition or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyDefinition
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'managementGroupId': self._serialize.url("management_group_id", management_group_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the policy definitions for a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PolicyDefinition
:rtype:
~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyDefinitionPaged[~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyDefinition]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PolicyDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PolicyDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_built_in(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the built in policy definitions.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PolicyDefinition
:rtype:
~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyDefinitionPaged[~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyDefinition]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/providers/Microsoft.Authorization/policyDefinitions'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PolicyDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PolicyDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_management_group(
self, management_group_id, custom_headers=None, raw=False, **operation_config):
"""Gets all the policy definitions for a subscription at management group
level.
:param management_group_id: The ID of the management group.
:type management_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PolicyDefinition
:rtype:
~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyDefinitionPaged[~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyDefinition]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions'
path_format_arguments = {
'managementGroupId': self._serialize.url("management_group_id", management_group_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PolicyDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PolicyDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
|
#!/usr/bin/python3
#
# Copyright (C) 2011 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for testing ganeti.client.gnt_cluster"""
import unittest
import optparse
import os
import shutil
import tempfile
from ganeti import errors
from ganeti.client import gnt_cluster
from ganeti import utils
from ganeti import compat
from ganeti import constants
from ganeti import ssh
from ganeti import cli
import mock
import testutils
class TestEpoUtilities(unittest.TestCase):
def setUp(self):
self.nodes2ip = dict(("node%s" % i, "192.0.2.%s" % i) for i in range(1, 10))
self.nodes = set(self.nodes2ip.keys())
self.ips2node = dict((v, k) for (k, v) in self.nodes2ip.items())
def _FakeAction(*args):
return True
def _FakePing(ip, port, live_port_needed=False):
self.assertTrue(live_port_needed)
self.assertEqual(port, 0)
return True
def _FakeSleep(secs):
self.assertTrue(secs >= 0 and secs <= 5)
return
def _NoopFeedback(self, text):
return
def testPingFnRemoveHostsUp(self):
seen = set()
def _FakeSeenPing(ip, *args, **kwargs):
node = self.ips2node[ip]
self.assertFalse(node in seen)
seen.add(node)
return True
helper = gnt_cluster._RunWhenNodesReachableHelper(self.nodes,
self._FakeAction,
self.nodes2ip, 0,
self._NoopFeedback,
_ping_fn=_FakeSeenPing,
_sleep_fn=self._FakeSleep)
nodes_len = len(self.nodes)
for (num, _) in enumerate(self.nodes):
helper.Wait(5)
if num < nodes_len - 1:
self.assertRaises(utils.RetryAgain, helper)
else:
helper()
self.assertEqual(seen, self.nodes)
self.assertFalse(helper.down)
self.assertEqual(helper.up, self.nodes)
def testActionReturnFalseSetsHelperFalse(self):
called = False
def _FalseAction(*args):
return called
helper = gnt_cluster._RunWhenNodesReachableHelper(self.nodes, _FalseAction,
self.nodes2ip, 0,
self._NoopFeedback,
_ping_fn=self._FakePing,
_sleep_fn=self._FakeSleep)
for _ in self.nodes:
try:
helper()
except utils.RetryAgain:
called = True
self.assertFalse(helper.success)
def testMaybeInstanceStartup(self):
instances_arg = []
def _FakeInstanceStart(opts, instances, start):
instances_arg.append(set(instances))
return None
inst_map = {
"inst1": set(["node1", "node2"]),
"inst2": set(["node1", "node3"]),
"inst3": set(["node2", "node1"]),
"inst4": set(["node2", "node1", "node3"]),
"inst5": set(["node4"]),
}
fn = _FakeInstanceStart
self.assertTrue(gnt_cluster._MaybeInstanceStartup(None, inst_map, set(),
_instance_start_fn=fn))
self.assertFalse(instances_arg)
result = gnt_cluster._MaybeInstanceStartup(None, inst_map, set(["node1"]),
_instance_start_fn=fn)
self.assertTrue(result)
self.assertFalse(instances_arg)
result = gnt_cluster._MaybeInstanceStartup(None, inst_map,
set(["node1", "node3"]),
_instance_start_fn=fn)
self.assertTrue(result is None)
self.assertEqual(instances_arg.pop(0), set(["inst2"]))
self.assertFalse("inst2" in inst_map)
result = gnt_cluster._MaybeInstanceStartup(None, inst_map,
set(["node1", "node3"]),
_instance_start_fn=fn)
self.assertTrue(result)
self.assertFalse(instances_arg)
result = gnt_cluster._MaybeInstanceStartup(None, inst_map,
set(["node1", "node3", "node2"]),
_instance_start_fn=fn)
self.assertEqual(instances_arg.pop(0), set(["inst1", "inst3", "inst4"]))
self.assertTrue(result is None)
result = gnt_cluster._MaybeInstanceStartup(None, inst_map,
set(["node1", "node3", "node2",
"node4"]),
_instance_start_fn=fn)
self.assertTrue(result is None)
self.assertEqual(instances_arg.pop(0), set(["inst5"]))
self.assertFalse(inst_map)
class _ClientForEpo:
def __init__(self, groups, nodes):
self._groups = groups
self._nodes = nodes
def QueryGroups(self, names, fields, use_locking):
assert not use_locking
assert fields == ["node_list"]
return self._groups
def QueryNodes(self, names, fields, use_locking):
assert not use_locking
assert fields == ["name", "master", "pinst_list", "sinst_list", "powered",
"offline"]
return self._nodes
class TestEpo(unittest.TestCase):
_ON_EXITCODE = 253
_OFF_EXITCODE = 254
def _ConfirmForce(self, *args):
self.fail("Shouldn't need confirmation")
def _Confirm(self, exp_names, result, names, ltype, text):
self.assertEqual(names, exp_names)
self.assertFalse(result is NotImplemented)
return result
def _Off(self, exp_node_list, opts, node_list, inst_map):
self.assertEqual(node_list, exp_node_list)
self.assertFalse(inst_map)
return self._OFF_EXITCODE
def _Test(self, *args, **kwargs):
defaults = dict(qcl=NotImplemented, _on_fn=NotImplemented,
_off_fn=NotImplemented,
_stdout_fn=lambda *args: None,
_stderr_fn=lambda *args: None)
defaults.update(kwargs)
return gnt_cluster.Epo(*args, **defaults)
def testShowAllWithGroups(self):
opts = optparse.Values(dict(groups=True, show_all=True))
result = self._Test(opts, NotImplemented)
self.assertEqual(result, constants.EXIT_FAILURE)
def testShowAllWithArgs(self):
opts = optparse.Values(dict(groups=False, show_all=True))
result = self._Test(opts, ["a", "b", "c"])
self.assertEqual(result, constants.EXIT_FAILURE)
def testNoArgumentsNoParameters(self):
for (force, confirm_result) in [(True, NotImplemented), (False, False),
(False, True)]:
opts = optparse.Values(dict(groups=False, show_all=False, force=force,
on=False))
client = _ClientForEpo(NotImplemented, [
("node1.example.com", False, [], [], True, False),
])
if force:
confirm_fn = self._ConfirmForce
else:
confirm_fn = compat.partial(self._Confirm, ["node1.example.com"],
confirm_result)
off_fn = compat.partial(self._Off, ["node1.example.com"])
result = self._Test(opts, [], qcl=client, _off_fn=off_fn,
_confirm_fn=confirm_fn)
if force or confirm_result:
self.assertEqual(result, self._OFF_EXITCODE)
else:
self.assertEqual(result, constants.EXIT_FAILURE)
def testPowerOn(self):
for master in [False, True]:
opts = optparse.Values(dict(groups=False, show_all=True,
force=True, on=True))
client = _ClientForEpo(NotImplemented, [
("node1.example.com", False, [], [], True, False),
("node2.example.com", False, [], [], False, False),
("node3.example.com", False, [], [], True, True),
("node4.example.com", False, [], [], None, True),
("node5.example.com", master, [], [], False, False),
])
def _On(_, all_nodes, node_list, inst_map):
self.assertEqual(all_nodes,
["node%s.example.com" % i for i in range(1, 6)])
if master:
self.assertEqual(node_list, ["node2.example.com"])
else:
self.assertEqual(node_list, ["node2.example.com",
"node5.example.com"])
self.assertFalse(inst_map)
return self._ON_EXITCODE
result = self._Test(opts, [], qcl=client, _on_fn=_On,
_confirm_fn=self._ConfirmForce)
self.assertEqual(result, self._ON_EXITCODE)
def testMasterWithoutShowAll(self):
opts = optparse.Values(dict(groups=False, show_all=False,
force=True, on=False))
client = _ClientForEpo(NotImplemented, [
("node1.example.com", True, [], [], True, False),
])
result = self._Test(opts, [], qcl=client, _confirm_fn=self._ConfirmForce)
self.assertEqual(result, constants.EXIT_FAILURE)
class DrbdHelperTestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.enabled_disk_templates = []
def enableDrbd(self):
self.enabled_disk_templates = [constants.DT_DRBD8]
def disableDrbd(self):
self.enabled_disk_templates = [constants.DT_DISKLESS]
class InitDrbdHelper(DrbdHelperTestCase):
def testNoDrbdNoHelper(self):
opts = mock.Mock()
opts.drbd_helper = None
self.disableDrbd()
helper = gnt_cluster._InitDrbdHelper(opts, self.enabled_disk_templates,
feedback_fn=mock.Mock())
self.assertEqual(None, helper)
def testNoDrbdHelper(self):
opts = mock.Mock()
self.disableDrbd()
opts.drbd_helper = "/bin/true"
helper = gnt_cluster._InitDrbdHelper(opts, self.enabled_disk_templates,
feedback_fn=mock.Mock())
self.assertEqual(opts.drbd_helper, helper)
def testDrbdHelperNone(self):
opts = mock.Mock()
self.enableDrbd()
opts.drbd_helper = None
helper = gnt_cluster._InitDrbdHelper(opts, self.enabled_disk_templates,
feedback_fn=mock.Mock())
self.assertEqual(constants.DEFAULT_DRBD_HELPER, helper)
def testDrbdHelperEmpty(self):
opts = mock.Mock()
self.enableDrbd()
opts.drbd_helper = ''
self.assertRaises(errors.OpPrereqError, gnt_cluster._InitDrbdHelper, opts,
self.enabled_disk_templates, feedback_fn=mock.Mock())
def testDrbdHelper(self):
opts = mock.Mock()
self.enableDrbd()
opts.drbd_helper = "/bin/true"
helper = gnt_cluster._InitDrbdHelper(opts, self.enabled_disk_templates,
feedback_fn=mock.Mock())
self.assertEqual(opts.drbd_helper, helper)
class GetDrbdHelper(DrbdHelperTestCase):
def testNoDrbdNoHelper(self):
opts = mock.Mock()
self.disableDrbd()
opts.drbd_helper = None
helper = gnt_cluster._GetDrbdHelper(opts, self.enabled_disk_templates)
self.assertEqual(None, helper)
def testNoTemplateInfoNoHelper(self):
opts = mock.Mock()
opts.drbd_helper = None
helper = gnt_cluster._GetDrbdHelper(opts, None)
self.assertEqual(None, helper)
def testNoTemplateInfoHelper(self):
opts = mock.Mock()
opts.drbd_helper = "/bin/true"
helper = gnt_cluster._GetDrbdHelper(opts, None)
self.assertEqual(opts.drbd_helper, helper)
def testNoDrbdHelper(self):
opts = mock.Mock()
self.disableDrbd()
opts.drbd_helper = "/bin/true"
helper = gnt_cluster._GetDrbdHelper(opts, None)
self.assertEqual(opts.drbd_helper, helper)
def testDrbdNoHelper(self):
opts = mock.Mock()
self.enableDrbd()
opts.drbd_helper = None
helper = gnt_cluster._GetDrbdHelper(opts, self.enabled_disk_templates)
self.assertEqual(None, helper)
def testDrbdHelper(self):
opts = mock.Mock()
self.enableDrbd()
opts.drbd_helper = "/bin/true"
helper = gnt_cluster._GetDrbdHelper(opts, self.enabled_disk_templates)
self.assertEqual(opts.drbd_helper, helper)
class TestBuildGanetiPubKeys(testutils.GanetiTestCase):
_SOME_KEY_DICT = {"rsa": "key_rsa",
"dsa": "key_dsa"}
_MASTER_NODE_NAME = "master_node"
_MASTER_NODE_UUID = "master_uuid"
_NUM_NODES = 2 # excluding master node
_ONLINE_NODE_NAMES = ["node%s_name" % i for i in range(_NUM_NODES)]
_ONLINE_NODE_UUIDS = ["node%s_uuid" % i for i in range(_NUM_NODES)]
_CLUSTER_NAME = "cluster_name"
_PRIV_KEY = "master_private_key"
_PUB_KEY = "master_public_key"
_MODIFY_SSH_SETUP = True
_AUTH_KEYS = "a\nb\nc"
_SSH_KEY_TYPE = "dsa"
def _setUpFakeKeys(self):
os.makedirs(os.path.join(self.tmpdir, ".ssh"))
for key_type in ["rsa", "dsa"]:
self.priv_filename = os.path.join(self.tmpdir, ".ssh", "id_%s" % key_type)
utils.WriteFile(self.priv_filename, data=self._PRIV_KEY)
self.pub_filename = os.path.join(
self.tmpdir, ".ssh", "id_%s.pub" % key_type)
utils.WriteFile(self.pub_filename, data=self._PUB_KEY)
self.auth_filename = os.path.join(self.tmpdir, ".ssh", "authorized_keys")
utils.WriteFile(self.auth_filename, data=self._AUTH_KEYS)
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
self.pub_key_filename = os.path.join(self.tmpdir, "ganeti_test_pub_keys")
self._setUpFakeKeys()
self._ssh_read_remote_ssh_pub_keys_patcher = testutils \
.patch_object(ssh, "ReadRemoteSshPubKeys")
self._ssh_read_remote_ssh_pub_keys_mock = \
self._ssh_read_remote_ssh_pub_keys_patcher.start()
self._ssh_read_remote_ssh_pub_keys_mock.return_value = self._SOME_KEY_DICT
self.mock_cl = mock.Mock()
self.mock_cl.QueryConfigValues = mock.Mock()
self.mock_cl.QueryConfigValues.return_value = \
(self._CLUSTER_NAME, self._MASTER_NODE_NAME, self._MODIFY_SSH_SETUP,
self._SSH_KEY_TYPE)
self._get_online_nodes_mock = mock.Mock()
self._get_online_nodes_mock.return_value = \
self._ONLINE_NODE_NAMES
self._get_nodes_ssh_ports_mock = mock.Mock()
self._get_nodes_ssh_ports_mock.return_value = \
[22 for i in range(self._NUM_NODES + 1)]
self._get_node_uuids_mock = mock.Mock()
self._get_node_uuids_mock.return_value = \
self._ONLINE_NODE_UUIDS + [self._MASTER_NODE_UUID]
self._options = mock.Mock()
self._options.ssh_key_check = False
def _GetTempHomedir(self, _):
return self.tmpdir
def tearDown(self):
super(testutils.GanetiTestCase, self).tearDown()
shutil.rmtree(self.tmpdir)
self._ssh_read_remote_ssh_pub_keys_patcher.stop()
def testNewPubKeyFile(self):
gnt_cluster._BuildGanetiPubKeys(
self._options,
pub_key_file=self.pub_key_filename,
cl=self.mock_cl,
get_online_nodes_fn=self._get_online_nodes_mock,
get_nodes_ssh_ports_fn=self._get_nodes_ssh_ports_mock,
get_node_uuids_fn=self._get_node_uuids_mock,
homedir_fn=self._GetTempHomedir)
key_file_result = utils.ReadFile(self.pub_key_filename)
for node_uuid in self._ONLINE_NODE_UUIDS + [self._MASTER_NODE_UUID]:
self.assertTrue(node_uuid in key_file_result)
self.assertTrue(self._PUB_KEY in key_file_result)
def testOverridePubKeyFile(self):
fd = open(self.pub_key_filename, "w")
fd.write("Pink Bunny")
fd.close()
gnt_cluster._BuildGanetiPubKeys(
self._options,
pub_key_file=self.pub_key_filename,
cl=self.mock_cl,
get_online_nodes_fn=self._get_online_nodes_mock,
get_nodes_ssh_ports_fn=self._get_nodes_ssh_ports_mock,
get_node_uuids_fn=self._get_node_uuids_mock,
homedir_fn=self._GetTempHomedir)
self.assertFalse("Pink Bunny" in self.pub_key_filename)
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
|
# GPS L1Cp code construction
#
# Copyright 2018 Peter Monta
import numpy as np
from sympy.ntheory import legendre_symbol
chip_rate = 1023000
code_length = 10230
l1cp_params = {
1: (5111,412), 2: (5109,161), 3: (5108,1), 4: (5106,303),
5: (5103,207), 6: (5101,4971), 7: (5100,4496), 8: (5098,5),
9: (5095,4557), 10: (5094,485), 11: (5093,253), 12: (5091,4676),
13: (5090,1), 14: (5081,66), 15: (5080,4485), 16: (5069,282),
17: (5068,193), 18: (5054,5211), 19: (5044,729), 20: (5027,4848),
21: (5026,982), 22: (5014,5955), 23: (5004,9805), 24: (4980,670),
25: (4915,464), 26: (4909,29), 27: (4893,429), 28: (4885,394),
29: (4832,616), 30: (4824,9457), 31: (4591,4429), 32: (3706,4771),
33: (5092,365), 34: (4986,9705), 35: (4965,9489), 36: (4920,4193),
37: (4917,9947), 38: (4858,824), 39: (4847,864), 40: (4790,347),
41: (4770,677), 42: (4318,6544), 43: (4126,6312), 44: (3961,9804),
45: (3790,278), 46: (4911,9461), 47: (4881,444), 48: (4827,4839),
49: (4795,4144), 50: (4789,9875), 51: (4725,197), 52: (4675,1156),
53: (4539,4674), 54: (4535,10035), 55: (4458,4504), 56: (4197,5),
57: (4096,9937), 58: (3484,430), 59: (3481,5), 60: (3393,355),
61: (3175,909), 62: (2360,1622), 63: (1852,6284),
64: (5065,9429), 65: (5063,77), 66: (5055,932), 67: (5012,5973),
68: (4981,377), 69: (4952,10000), 70: (4934,951), 71: (4932,6212),
72: (4786,686), 73: (4762,9352), 74: (4640,5999), 75: (4601,9912),
76: (4563,9620), 77: (4388,635), 78: (3820,4951), 79: (3687,5453),
80: (5052,4658), 81: (5051,4800), 82: (5047,59), 83: (5039,318),
84: (5015,571), 85: (5005,565), 86: (4984,9947), 87: (4975,4654),
88: (4974,148), 89: (4972,3929), 90: (4962,293), 91: (4913,178),
92: (4907,10142), 93: (4903,9683), 94: (4833,137), 95: (4778,565),
96: (4721,35), 97: (4661,5949), 98: (4660,2), 99: (4655,5982),
100: (4623,825), 101: (4590,9614), 102: (4548,9790), 103: (4461,5613),
104: (4442,764), 105: (4347,660), 106: (4259,4870), 107: (4256,4950),
108: (4166,4881), 109: (4155,1151), 110: (4109,9977), 111: (4100,5122),
112: (4023,10074),113: (3998,4832), 114: (3979,77), 115: (3903,4698),
116: (3568,1002), 117: (5088,5549), 118: (5050,9606), 119: (5020,9228),
120: (4990,604), 121: (4982,4678), 122: (4966,4854), 123: (4949,4122),
124: (4947,9471), 125: (4937,5026), 126: (4935,272), 127: (4906,1027),
128: (4901,317), 129: (4872,691), 130: (4865,509), 131: (4863,9708),
132: (4818,5033), 133: (4785,9938), 134: (4781,4314), 135: (4776,10140),
136: (4775,4790), 137: (4754,9823), 138: (4696,6093), 139: (4690,469),
140: (4658,1215), 141: (4607,799), 142: (4599,756), 143: (4596,9994),
144: (4530,4843), 145: (4524,5271), 146: (4451,9661), 147: (4441,6255),
148: (4396,5203), 149: (4340,203), 150: (4335,10070),151: (4296,30),
152: (4267,103), 153: (4168,5692), 154: (4149,32), 155: (4097,9826),
156: (4061,76), 157: (3989,59), 158: (3966,6831), 159: (3789,958),
160: (3775,1471), 161: (3622,10070), 162: (3523,553), 163: (3515,5487),
164: (3492,55), 165: (3345,208), 166: (3235,645), 167: (3169,5268),
168: (3157,1873), 169: (3082,427), 170: (3072,367), 171: (3032,1404),
172: (3030,5652), 173: (4582,5), 174: (4595,368), 175: (4068,451),
176: (4871,9595), 177: (4514,1030), 178: (4439,1324), 179: (4122,692),
180: (4948,9819), 181: (4774,4520), 182: (3923,9911), 183: (3411,278),
184: (4745,642), 185: (4195,6330), 186: (4897,5508), 187: (3047,1872),
188: (4185,5445), 189: (4354,10131), 190: (5077,422), 191: (4042,4918),
192: (2111,787), 193: (4311,9864), 194: (5024,9753), 195: (4352,9859),
196: (4678,328), 197: (5034,1), 198: (5085,4733), 199: (3646,164),
200: (4868,135), 201: (3668,174), 202: (4211,132), 203: (2883,538),
204: (2850,176), 205: (2815,198), 206: (2542,595), 207: (2492,574),
208: (2376,321), 209: (2036,596), 210: (1920,491),
}
N = 10223
L = np.array([legendre_symbol(i,N) for i in range(N)])
L[L==-1] = 0
L[0] = 0
def l1cp(prn):
w,p = l1cp_params[prn]
W = np.array([L[k]^L[(k+w)%N] for k in range(N)])
expansion = np.array([0,1,1,0,1,0,0])
c = np.concatenate((W[0:p-1],expansion,W[p-1:N]))
return c
codes = {}
def l1cp_code(prn):
if prn not in codes:
codes[prn] = l1cp(prn)
return codes[prn]
def code(prn,chips,frac,incr,n):
c = l1cp_code(prn)
idx = (chips%code_length) + frac + incr*np.arange(n)
idx = np.floor(idx).astype('int')
idx = np.mod(idx,code_length)
x = c[idx]
return 1.0 - 2.0*x
l1cp_secondary_params = {
1: (0o5111,0o3266), 2: (0o5421,0o2040), 3: (0o5501,0o1527), 4: (0o5403,0o3307),
5: (0o6417,0o3756), 6: (0o6141,0o3026), 7: (0o6351,0o0562), 8: (0o6501,0o0420),
9: (0o6205,0o3415), 10: (0o6235,0o0337), 11: (0o7751,0o0265), 12: (0o6623,0o1230),
13: (0o6733,0o2204), 14: (0o7627,0o1440), 15: (0o5667,0o2412), 16: (0o5051,0o3516),
17: (0o7665,0o2761), 18: (0o6325,0o3750), 19: (0o4365,0o2701), 20: (0o4745,0o1206),
21: (0o7633,0o1544), 22: (0o6747,0o1774), 23: (0o4475,0o0546), 24: (0o4225,0o2213),
25: (0o7063,0o3707), 26: (0o4423,0o2051), 27: (0o6651,0o3650), 28: (0o4161,0o1777),
29: (0o7237,0o3203), 30: (0o4473,0o1762), 31: (0o5477,0o2100), 32: (0o6163,0o0571),
33: (0o7223,0o3710), 34: (0o6323,0o3535), 35: (0o7125,0o3110), 36: (0o7035,0o1426),
37: (0o4341,0o0255), 38: (0o4353,0o0321), 39: (0o4107,0o3124), 40: (0o5735,0o0572),
41: (0o6741,0o1736), 42: (0o7071,0o3306), 43: (0o4563,0o1307), 44: (0o5755,0o3763),
45: (0o6127,0o1604), 46: (0o4671,0o1021), 47: (0o4511,0o2624), 48: (0o4533,0o0406),
49: (0o5357,0o0114), 50: (0o5607,0o0077), 51: (0o6673,0o3477), 52: (0o6153,0o1000),
53: (0o7565,0o3460), 54: (0o7107,0o2607), 55: (0o6211,0o2057), 56: (0o4321,0o3467),
57: (0o7201,0o0706), 58: (0o4451,0o2032), 59: (0o5411,0o1464), 60: (0o5141,0o0520),
61: (0o7041,0o1766), 62: (0o6637,0o3270), 63: (0o4577,0o0341),
64: (0o5111,0o1740,0o3035), 65: (0o5111,0o3664,0o1557), 66: (0o5111,0o1427,0o0237), 67: (0o5111,0o2627,0o2527),
68: (0o5111,0o0701,0o3307), 69: (0o5111,0o3460,0o1402), 70: (0o5111,0o1373,0o1225), 71: (0o5111,0o2540,0o0607),
72: (0o5111,0o2004,0o0351), 73: (0o5111,0o2274,0o3724), 74: (0o5111,0o1340,0o1675), 75: (0o5111,0o0602,0o2625),
76: (0o5111,0o2502,0o1030), 77: (0o5111,0o0327,0o1443), 78: (0o5111,0o2600,0o3277), 79: (0o5111,0o0464,0o1132),
80: (0o5111,0o3674,0o0572), 81: (0o5111,0o3040,0o1241), 82: (0o5111,0o1153,0o0535), 83: (0o5111,0o0747,0o1366),
84: (0o5111,0o1770,0o0041), 85: (0o5111,0o3772,0o0561), 86: (0o5111,0o1731,0o0122), 87: (0o5111,0o1672,0o1205),
88: (0o5111,0o1333,0o3753), 89: (0o5111,0o2705,0o2543), 90: (0o5111,0o2713,0o3031), 91: (0o5111,0o3562,0o2260),
92: (0o5111,0o3245,0o3773), 93: (0o5111,0o3770,0o3156), 94: (0o5111,0o3202,0o2215), 95: (0o5111,0o3521,0o0146),
96: (0o5111,0o3250,0o2413), 97: (0o5111,0o2117,0o2564), 98: (0o5111,0o0530,0o3310), 99: (0o5111,0o3021,0o2267),
100: (0o5421,0o2511,0o3120), 101: (0o5421,0o1562,0o0064), 102: (0o5421,0o1067,0o1042), 103: (0o5421,0o0424,0o0476),
104: (0o5421,0o3402,0o1020), 105: (0o5421,0o1326,0o0431), 106: (0o5421,0o2142,0o0216), 107: (0o5421,0o0733,0o2736),
108: (0o5421,0o0504,0o2527), 109: (0o5421,0o1611,0o2431), 110: (0o5421,0o2724,0o1013), 111: (0o5421,0o0753,0o0524),
112: (0o5421,0o3724,0o0726), 113: (0o5421,0o2652,0o1042), 114: (0o5421,0o1743,0o3362), 115: (0o5421,0o0013,0o1364),
116: (0o5421,0o3464,0o3354), 117: (0o5421,0o2300,0o0623), 118: (0o5421,0o1334,0o0145), 119: (0o5421,0o2175,0o0214),
120: (0o5421,0o2564,0o0223), 121: (0o5421,0o3075,0o0151), 122: (0o5421,0o3455,0o2405), 123: (0o5421,0o3627,0o2522),
124: (0o5421,0o0617,0o3235), 125: (0o5421,0o1324,0o0452), 126: (0o5421,0o3506,0o2617), 127: (0o5421,0o2231,0o1300),
128: (0o5421,0o1110,0o1430), 129: (0o5421,0o1271,0o0773), 130: (0o5421,0o3740,0o0772), 131: (0o5421,0o3652,0o3561),
132: (0o5421,0o1644,0o0607), 133: (0o5421,0o3635,0o0420), 134: (0o5421,0o3436,0o0527), 135: (0o5421,0o3076,0o3770),
136: (0o5421,0o0434,0o2536), 137: (0o5421,0o3340,0o2233), 138: (0o5421,0o0054,0o3366), 139: (0o5403,0o2446,0o3766),
140: (0o5403,0o0025,0o3554), 141: (0o5403,0o0150,0o2060), 142: (0o5403,0o2746,0o2070), 143: (0o5403,0o2723,0o0713),
144: (0o5403,0o2601,0o3366), 145: (0o5403,0o3440,0o3247), 146: (0o5403,0o1312,0o2776), 147: (0o5403,0o0544,0o1244),
148: (0o5403,0o2062,0o2102), 149: (0o5403,0o0176,0o1712), 150: (0o5403,0o3616,0o1245), 151: (0o5403,0o1740,0o3344),
152: (0o5403,0o3777,0o1277), 153: (0o5403,0o0432,0o0165), 154: (0o5403,0o2466,0o2131), 155: (0o5403,0o1667,0o3623),
156: (0o5403,0o3601,0o0141), 157: (0o5403,0o2706,0o0421), 158: (0o5403,0o2022,0o3032), 159: (0o5403,0o1363,0o2065),
160: (0o5403,0o2331,0o3024), 161: (0o5403,0o3556,0o2663), 162: (0o5403,0o2205,0o2274), 163: (0o5403,0o3734,0o2114),
164: (0o5403,0o2115,0o1664), 165: (0o5403,0o0010,0o0413), 166: (0o5403,0o2140,0o1512), 167: (0o5403,0o3136,0o0135),
168: (0o5403,0o0272,0o2737), 169: (0o5403,0o3264,0o1015), 170: (0o5403,0o2017,0o1075), 171: (0o5403,0o2505,0o1255),
172: (0o5403,0o3532,0o3473), 173: (0o5403,0o0647,0o2716), 174: (0o5403,0o1542,0o0101), 175: (0o5403,0o2154,0o1105),
176: (0o5403,0o3734,0o1407), 177: (0o5403,0o2621,0o3407), 178: (0o5403,0o2711,0o1046), 179: (0o5403,0o0217,0o3237),
180: (0o5403,0o3503,0o0154), 181: (0o5403,0o3457,0o3010), 182: (0o5403,0o3750,0o2245), 183: (0o5403,0o2525,0o2051),
184: (0o5403,0o0113,0o2144), 185: (0o5403,0o0265,0o1743), 186: (0o5403,0o1711,0o2511), 187: (0o5403,0o0552,0o3410),
188: (0o5403,0o0675,0o1414), 189: (0o5403,0o1706,0o1275), 190: (0o5403,0o3513,0o2257), 191: (0o5403,0o1135,0o2331),
192: (0o5403,0o0566,0o0276), 193: (0o5403,0o0500,0o3261), 194: (0o5403,0o0254,0o1760), 195: (0o5403,0o3445,0o0430),
196: (0o5403,0o2542,0o3477), 197: (0o5403,0o1257,0o1676), 198: (0o6501,0o0211,0o1636), 199: (0o6501,0o0534,0o2411),
200: (0o6501,0o1420,0o1473), 201: (0o6501,0o3401,0o2266), 202: (0o6501,0o0714,0o2104), 203: (0o6501,0o0613,0o2070),
204: (0o6501,0o2475,0o1766), 205: (0o6501,0o2572,0o0711), 206: (0o6501,0o3265,0o2533), 207: (0o6501,0o1250,0o0353),
208: (0o6501,0o1711,0o1744), 209: (0o6501,0o2704,0o0053), 210: (0o6501,0o0135,0o2222),
}
sec_code_length = 1800
def int2list(x,n):
y = []
for i in range(n):
y.append((x>>i)&1)
return y
def xorprod(a,b):
t = 0
for x,y in zip(a,b):
t = t ^ (x*y)
return t
def s_shift(x,p):
return [xorprod(x,p)] + x[0:-1]
def sec_l1cp(prn):
p,init = l1cp_secondary_params[prn]
p = int2list(p//2,11)
x = int2list(init,11)
c = np.zeros(sec_code_length)
for i in range(sec_code_length):
c[i] = x[10]
x = s_shift(x,p)
return c
def sec_l1cp_extended(prn):
p1,init1,init2 = l1cp_secondary_params[prn]
p2 = 0o5001
p1 = int2list(p1//2,11)
x1 = int2list(init1,11)
p2 = int2list(p2//2,11)
x2 = int2list(init2,11)
c = np.zeros(sec_code_length)
for i in range(sec_code_length):
c[i] = x1[10] ^ x2[10]
x1 = s_shift(x1,p1)
x2 = s_shift(x2,p2)
return c
secondary_codes = {}
def secondary_code(prn):
if prn not in secondary_codes:
if prn<64:
secondary_codes[prn] = sec_l1cp(prn)
else:
secondary_codes[prn] = sec_l1cp_extended(prn)
return secondary_codes[prn]
boc11 = np.array([1.0,-1.0])
tmboc_pattern = np.array([1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0])
try:
from numba import jit
except:
def jit(**kwargs):
return lambda x: x
@jit(nopython=True)
def correlate(x,prn,chips,frac,incr,c,boc11):
n = len(x)
p = 0.0j
cp = (chips+frac)%code_length
bp = (2*(chips+frac))%2
bp6 = (12*(chips+frac))%2
u = int(cp%33)
for i in range(n):
if tmboc_pattern[u]:
boc = boc11[int(bp6)]
else:
boc = boc11[int(bp)]
p += x[i]*(1.0-2.0*c[int(cp)])*boc
cp = (cp+incr)%code_length
bp = (bp+2*incr)%2
bp6 = (bp6+12*incr)%2
u = int(cp%33)
return p
# test
def chips2octal(x):
s = ''
for i in range(len(x)//3):
d = 4*x[3*i] + 2*x[3*i+1] + x[3*i+2]
s = s + '%o'%int(d)
return s
if __name__=='__main__':
for prn in range(1,211):
c = l1cp_code(prn)
s1 = chips2octal(c[0:24])
s2 = chips2octal(c[-24:])
print("%d %s %s"%(prn,s1,s2))
print("secondary:")
for prn in range(1,211):
c = secondary_code(prn)
print('%d %s'%(prn,chips2octal(np.concatenate((np.array([0]),c[-11:])))))
|
|
"""
Ax_Metrics - EROut plugin 'geckoboard_text'
Writes Geckoboard JSON output for various charts for use with
http://www.geckoboard.com.
Contents:
- EROut_geckoboard_text - pages of text, optionally flagged
See:
- https://developer.geckoboard.com/#geck-o-meter
------------------------------------------------------------------------------
Author: Dan Kamins <dos at axonchisel dot net>
Copyright (c) 2014 Dan Kamins, AxonChisel.net
"""
# ----------------------------------------------------------------------------
from .base import EROut_geckoboard
import logging
log = logging.getLogger(__name__)
# ----------------------------------------------------------------------------
# Type numbers used by Geckoboard Text widget:
_GB_TEXT_TYPE_NORMAL = 0 # no flag
_GB_TEXT_TYPE_INFO = 2 # small gray "i" flag
_GB_TEXT_TYPE_ALERT = 1 # serious orange "!" flag
_GB_TEXT_TYPE_BY_COLOR = { # map color to GB text type
'GREEN': _GB_TEXT_TYPE_NORMAL,
'AMBER': _GB_TEXT_TYPE_INFO,
'RED' : _GB_TEXT_TYPE_ALERT,
}
_QFORMAT_RAG_KEY_BY_COLOR = { # map color to QFormat format key name
'GREEN': 'green',
'AMBER': 'amber',
'RED': 'red',
}
# ----------------------------------------------------------------------------
class EROut_geckoboard_text(EROut_geckoboard):
"""
EROut (Extensible Report Outputter) Plugin for Geckoboard Text.
Adds JSON-serializable output to extinfo['jout'] dict.
Typical usage is with collapsed query, default 'LAST' reduce function,
and ghosts disabled. This prevent needless queries from running.
Non-collapsed queries with other reduce functions may be used too.
Each data series of each query processed by this EROut will result in
an additional text page which Geckoboard cycles through automatically.
If the QMetric 'rag' parameter is specified, the text may be flagged
as either important (amber) or very important (red). Otherwise text
is displayed without any flag.
QMetric NEGATIVE 'impact' is addressed properly to support negative
data (e.g. bugs, expenses, etc.).
The QFormat format strings (red, amber, green) support these params:
- qmlabel - label from QMetric
- value - actual value
- amber - amber value cutoff
- red - red value cutoff
Example formats:
red: "DANGER: SENSOR {qmlabel} - {value} OVER LIMIT!"
amber: "Notice: Sensor {qmlabel} - {value} near limit ({red})"
green: "Sensor {qmlabel} OK"
QFormat support (under 'geckoboard_meter' or '_default'):
reduce : (optional) Function from metricdef.FUNCS to reduce
series with. Default 'LAST'.
red : (optional) Format str for red mode.
Only required if 'rag' specified in QMetric.
amber : (optional) Format str for amber mode.
Only required if 'rag' specified in QMetric.
green : Format str for green mode.
More info:
- https://developer.geckoboard.com/#text
Example JSON:
{
"item": [
{
"text": "Unfortunately, as you probably already know, people",
"type": 0
},
{
"text": "As you might know, I am a full time Internet",
"type": 1
}
]
}
"""
#
# Abstract Method Implementations
#
# abstract
def plugin_output(self, mdseries, query=None):
"""
EROut plugins must implement this abstract method.
Invoked to output MultiDataSeries as specified.
Returns nothing. Output target should be configured separately.
"""
log.debug("Outputting %s for query %s", mdseries, query)
self._qfdomain = 'geckoboard_text'
# Iterate MDS, writing each series:
for dseries in mdseries.iter_series():
self._write_series(dseries)
#
# Internal Methods
#
def _write_series(self, dseries):
"""
Write the current DataSeries to output as an item.
(Geckoboard supports up to 10 items (pages of text) in the JSON,
so up to 10 DataSeries can be used, including spread among multiple
queries)
"""
# Prep:
self._dseries = dseries
self._write_series_prep()
# Calculate details:
self._write_series_identify_color()
self._write_series_set_type()
self._write_series_format_text()
# Add overall item to jout:
self.jout['item'].append(self._jitem)
def _write_series_prep(self):
"""Prepare internal data for new DataSeries."""
# Reduce series to single value by reduce func.
# Usually func 'LAST' with collapsed series (Servant option),
# but other operations can be useful too, e.g. AVG, etc.
reduce_func = self._qformat_get('reduce', 'LAST')
self._value = self._dseries.reduce(reduce_func)
# Prep JSON-serializable template to fill in:
self._jitem = {
"text": "",
"type": _GB_TEXT_TYPE_NORMAL,
}
def _write_series_identify_color(self):
"""Set self._color to GREEN,AMBER,RED based on value."""
# Default to GREEN:
self._color = 'GREEN'
if not self.query:
return # no Query, so stay GREEN
# Find first QMetric in QData with metric_id matching series metric:
# (reverse engineering since QData is not passed through MQEngine)
try:
self._qmetric = next(qm for qm in
self.query.qdata.iter_qmetrics()
if qm.metric_id == self._dseries.mdef.id
)
except StopIteration:
return # no QMetric, so stay GREEN (this is not likely)
if not self._qmetric.rag:
return # no 'rag' set on QMetric, so stay GREEN
(rag_c1, rag_c2) = self._qmetric.rag
# If negative impact (e.g. expenses, bugs, ...):
if self._qmetric.impact == 'NEGATIVE':
if self._value >= rag_c1:
self._color = 'RED'
elif self._value >= rag_c2:
self._color = 'AMBER'
# Else normal positive impact (e.g. revenue, sales, ...):
else:
assert self._qmetric.impact == 'POSITIVE'
if self._value <= rag_c1:
self._color = 'RED'
elif self._value <= rag_c2:
self._color = 'AMBER'
def _write_series_set_type(self):
"""Set jitem type based on color."""
self._jitem['type'] = _GB_TEXT_TYPE_BY_COLOR[self._color]
def _write_series_format_text(self):
"""Format jitem text based on color, value, etc.."""
# Default:
self._jitem['text'] = "{0}".format(self._value)
if not self.query or not self._qmetric:
return # no query or QMetric
# Get color format str:
fmtkey = _QFORMAT_RAG_KEY_BY_COLOR[self._color]
fmt = self._qformat_get(fmtkey, None)
if not fmt:
return # no matching color key in QFormat
# Build format params:
params = {
'qmlabel' : self._qmetric.label,
'value' : self._value,
'amber' : "?",
'red' : "?",
}
if self._qmetric.rag:
params['red'] = self._qmetric.rag[0]
params['amber'] = self._qmetric.rag[1]
# Format string:
text = fmt.format(**params)
self._jitem['text'] = text
|
|
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""The build environment, builders, actions and helper methods.
Define build environments, builders, actions and helper methods in this
central place and reuse it in all SConscripts as much as possible.
"""
import errno
import os
import os.path
import sys
import time
from datetime import datetime
from getversion import open_gee_version
import SCons
from SCons.Environment import Environment
def AppendToFlags(target, env, key, to_add):
if not SCons.Util.is_List(to_add):
to_add = [to_add]
tmp = target.get(key, env.get(key, []))
if not SCons.Util.is_List(tmp):
tmp = [tmp]
target[key] = tmp + to_add
def PrependToFlags(target, env, key, to_add):
if not SCons.Util.is_List(to_add):
to_add = [to_add]
tmp = target.get(key, env.get(key, []))
if not SCons.Util.is_List(tmp):
tmp = [tmp]
target[key] = to_add + tmp
# Qt stuff - yanked from scons-users mailing list archive
def Emitter(env, target, source):
base = SCons.Util.splitext(str(source[0].name))[0]
uidir = os.path.join(str(target[0].get_dir()), '.ui')
hfile = os.path.join(uidir, base+'.h')
cppfile = os.path.join(uidir, base+'.cpp')
mocdir = os.path.join(str(target[0].get_dir()), '.moc')
mocfile = os.path.join(mocdir, 'moc_' + base + '.cpp')
env.uic_impl(cppfile, [hfile, source])
env.moc(mocfile, hfile)
return [hfile], [source]
uic = SCons.Builder.Builder(action='$UIC $SOURCE -o $TARGET',
emitter=Emitter)
uic_impl = SCons.Builder.Builder(action='$UIC -o $TARGET -impl $SOURCES')
moc = SCons.Builder.Builder(action='$MOC -o $TARGET $SOURCE')
# pylint: disable=W0104
def CleanupLibFlags(prefix, a_list, suffix, stripprefix, stripsuffix, env):
a_list = env['_oldstripixes'](prefix, a_list, suffix,
stripprefix, stripsuffix, env)
return a_list
def AddSourceScannerToTargets(target, source, env):
for t in target:
if t.source_scanner is None:
key = t.scanner_key()
scanner = env.get_scanner(key)
if scanner:
t.source_scanner = scanner
return (target, source)
idl_h_builder = SCons.Builder.Builder(
action='$KHIDL --hfile $TARGET $SOURCE',
suffix='.h',
src_suffix='.idl',
# emitter=AddSourceScannerToTargets,
)
idl_impl_h_builder = SCons.Builder.Builder(
action='$KHIDL --impl_hfile $TARGET $SOURCE',
suffix='_impl.h',
src_suffix='.idl',
# emitter=AddSourceScannerToTargets,
)
idl_cpp_builder = SCons.Builder.Builder(
action='$KHIDL --cppfile $TARGET $SOURCE',
suffix='.cpp',
src_suffix='.idl',
# emitter=AddSourceScannerToTargets,
)
def AliasBuilder(env, target, source):
(env, target, source) = (env, target, source) # Silence gpylint
def NoOutput(target, source, env):
(env, target, source) = (env, target, source) # Silence gpylint
return None
my_alias_builder = SCons.Builder.Builder(
action=SCons.Action.Action(AliasBuilder, NoOutput),
target_factory=SCons.Node.Alias.default_ans.Alias,
source_factory=SCons.Node.FS.Entry,
multi=1,
is_explicit=None,
name='my_alias_builder')
def WriteToFileFunc(file_name, strn):
"""Writes strn to file_name.
Args:
file_name: The file to which to write
strn: The string to write
"""
base_path = os.path.dirname(os.path.abspath(file_name))
os.system('test -d %s || mkdir -p %s' % (base_path, base_path))
f = open(file_name, 'w')
f.write(strn)
f.close()
def WriteToFileStrfunc(file_name, strn):
return 'WriteToFile(%s, %s)' % (file_name, strn)
def StringExpandFileFunc(target, source, env):
"""Expand "{var}" strings in a file, using values from `env`."""
if SCons.Util.is_List(target):
target = target[0].get_abspath()
if SCons.Util.is_List(source):
source = source[0].get_abspath()
# Read the input template into a string:
with open(source, 'r') as f:
template = f.read()
# Create output file parent directories:
target_dir = os.path.dirname(os.path.abspath(target))
try:
os.makedirs(target_dir)
except OSError, e:
if e.errno != errno.EEXIST:
raise
# Expand template into output file:
with open(target, 'w') as f:
f.write(template.format(**env.gvars()))
def EmitBuildDateFunc(target, build_date):
"""Emits build date information to target file."""
fp = open(target, 'w')
fp.writelines(['// DO NOT MODIFY - auto-generated file\n',
'extern const char *const BUILD_DATE = "' +
time.strftime('%Y-%m-%d', build_date) + '";\n',
'extern const char *const BUILD_YEAR = "' +
time.strftime('%Y', build_date) + '";\n',
'extern const char *const BUILD_MONTH = "' +
time.strftime('%m', build_date) + '";\n',
'extern const char *const BUILD_DAY = "' +
time.strftime('%d', build_date) + '";\n',
])
fp.close()
def EmitBuildDateStrfunc(target, build_date):
return 'EmitBuildDate(%s, %s)' % (target, build_date)
def EmitVersionHeaderFunc(target):
"""Emit version information to the target file."""
versionStr = open_gee_version.get_short()
longVersionStr = open_gee_version.get_long()
fp = open(target, 'w')
fp.writelines(['// DO NOT MODIFY - auto-generated file\n',
'extern const char *const GEE_VERSION = "' +
versionStr + '";\n',
'extern const char *const GEE_LONG_VERSION = "' +
longVersionStr + '";\n'
])
fp.close()
def EmitVersionHeaderStrfunc(target):
return 'EmitVersionHeader(%s)' % (target,)
def EmitVersionFunc(target):
"""Emit version information to the target file."""
versionStr = open_gee_version.get_short()
with open(target, 'w') as fp:
fp.write(versionStr)
with open(open_gee_version.backup_file, 'w') as fp:
fp.write(versionStr)
def EmitVersionStrfunc(target):
return 'EmitVersion(%s)' % (target,)
def EmitLongVersionFunc(target):
"""Emit version information to the target file."""
versionStr = open_gee_version.get_long()
with open(target, 'w') as fp:
fp.write(versionStr)
def EmitLongVersionStrfunc(target):
return 'EmitLongVersion(%s)' % (target,)
# our derived class
class khEnvironment(Environment):
"""The derived environment class used in all of Fusion SConscripts."""
WriteToFile = SCons.Action.ActionFactory(WriteToFileFunc,
WriteToFileStrfunc)
EmitBuildDate = SCons.Action.ActionFactory(EmitBuildDateFunc,
EmitBuildDateStrfunc)
EmitVersion = SCons.Action.ActionFactory(EmitVersionFunc,
EmitVersionStrfunc)
EmitLongVersion = SCons.Action.ActionFactory(EmitLongVersionFunc,
EmitLongVersionStrfunc)
EmitVersionHeader = SCons.Action.ActionFactory(EmitVersionHeaderFunc,
EmitVersionHeaderStrfunc)
rsync_cmd = 'rsync -rltpvu %s %s'
rsync_excl_cmd = 'rsync -rltpvu --exclude %s %s %s'
def __init__(self,
exportdirs,
installdirs,
platform=SCons.Platform.Platform(),
tools=None,
toolpath=None,
options=None,
**kw):
if toolpath is None:
toolpath = []
args = (self, platform, tools, toolpath, options)
Environment.__init__(*args, **kw)
self.exportdirs = exportdirs
self.installdirs = installdirs
self['BUILDERS']['uic'] = uic
self['BUILDERS']['uic_impl'] = uic_impl
self['BUILDERS']['moc'] = moc
self['BUILDERS']['IDLH'] = idl_h_builder
self['BUILDERS']['IDLIMPLH'] = idl_impl_h_builder
self['BUILDERS']['IDLCPP'] = idl_cpp_builder
self['_oldstripixes'] = self['_stripixes']
self['_stripixes'] = CleanupLibFlags
self.StringExpandFileFunc = StringExpandFileFunc
DefineProtocolBufferBuilder(self)
@staticmethod
def bash_escape(value):
"""Escapes a given value as a BASH string."""
return "'{0}'".format(value.replace("'", "'\\''"))
def DeepCopy(self):
other = self.Clone()
other.MultiCommand = SCons.Action.ActionFactory(other.MultiCommandFunc,
other.MultiCommandStrfunc)
return other
def MultiCommandFunc(self, cmd):
"""Runs multiple commands in a single shell.
Args:
cmd: The bash commands (may be multiple lines)
Returns:
The return status of executing the command.
"""
return self.Execute('set -x && %s' % cmd.replace('\n', ' && '))
def MultiCommandStrfunc(self, cmd):
if SCons.SConf.dryrun:
return '+ %s' % cmd.replace('\n', '\n+ ')
else:
return ''
# Defines a Phony target that doesn't depend on anything and is always
# executed.
def PhonyTargets(self, **kw):
ret_val = []
for target, actions in kw.items():
ret_val.append(self.AlwaysBuild(self.Alias(target, [], actions)))
return ret_val
# Install the file or directory as a part of install target.
# Do this only after dependency is built.
def InstallFileOrDir(self, source, destination, dependency, alias_name):
base_path = os.path.dirname(os.path.abspath(destination))
actions = ['test -d %s || mkdir -p %s' % (base_path, base_path),
self.rsync_cmd % (source, destination)]
if dependency:
self.Depends(self.Alias(alias_name), dependency)
this_dict = {alias_name: actions}
return self.PhonyTargets(**this_dict)
# TODO: looking for removal of this by
# env.Clean(depends_on, list_of_files_to_remove)
# The following is an work around as the above doesn't work for symbolic
# links due to scons bug. The suggested patch to scons is as in
# http://osdir.com/ml/programming.tools.scons.devel/2008-07/msg00100.html
def ExecuteOnClean(self, cmd):
if self.GetOption('clean'):
self.Execute(self.MultiCommand(cmd))
def UpdateCppflagsForSkia(self):
"""Update c++ flags for Skia code compilation."""
if self['release']:
self['CPPFLAGS'] += ['-DSK_RELEASE', '-DGR_RELEASE',
'-DSkDebugf="(void)"']
elif self['optimize']:
self['CPPFLAGS'] += ['-DSK_RELEASE', '-DGR_RELEASE',
'-DSkDebugf="(void)"']
else:
self['CPPFLAGS'] += ['-DSK_DEBUG', '-DGR_DEBUG']
if sys.byteorder == 'little':
self['CPPFLAGS'] += ['-DSK_R32_SHIFT=16', '-DSK_G32_SHIFT=8',
'-DSK_B32_SHIFT=0', '-DSK_A32_SHIFT=24']
else:
self['CPPFLAGS'] += ['-DSK_R32_SHIFT=8', '-DSK_G32_SHIFT=16',
'-DSK_B32_SHIFT=24', '-DSK_A32_SHIFT=0']
self['CPPFLAGS'] += [
'-DSK_SCALAR_IS_FLOAT', '-DSkUserConfig_DEFINED',
'-I' + os.path.join(self.exportdirs['root'], self['skia_rel_dir'],
'include/config'),
'-I' + os.path.join(self.exportdirs['root'], self['skia_rel_dir'],
'include/core'),
'-I' + os.path.join(self.exportdirs['root'], self['skia_rel_dir'],
'include/effects'),
'-I' + os.path.join(self.exportdirs['root'], self['skia_rel_dir'],
'include/images'),
'-I' + os.path.join(self.exportdirs['root'], self['skia_rel_dir'],
'include/lazy')
]
def staticLib(self, target, source, **kw):
# path to the target in the srcdir (not builddir)
target_src_node = self.arg2nodes(target)[0].srcnode()
base = os.path.basename(target)
target = os.path.join(self.exportdirs['lib'], base)
args = (target, source)
ret = self.StaticLibrary(*args, **kw)
self.Default(self.alias(target_src_node, ret))
return ret
def sharedLib(self, target, source, **kw):
# path to the target in the srcdir (not builddir)
target_src_node = self.arg2nodes(target)[0].srcnode()
base = os.path.basename(target)
target = os.path.join(self.exportdirs['lib'], base)
args = (target, source)
ret = self.SharedLibrary(*args, **kw)
self.Default(self.alias(target_src_node, ret))
return ret
def executable(self, target, source, **kw):
# path to the target in the srcdir (not builddir)
target_src_node = self.arg2nodes(target)[0].srcnode()
base = os.path.basename(target)
newtarget = os.path.join(self.exportdirs['bin'], base)
args = (newtarget, source)
ret = self.Program(*args, **kw)
self.Default(self.alias(target_src_node, ret))
return ret
def test(self, target, source, **kw):
# path to the target in the srcdir (not builddir)
target_src_node = self.arg2nodes(target)[0].srcnode()
base = os.path.basename(target)
newtarget = os.path.join(self.exportdirs['bin'], 'tests', base)
args = (newtarget, source)
test_env = self.Clone()
test_env['LINKFLAGS'] = test_env['test_linkflags']
if test_env['test_extra_cppflags']:
# FIXME: The SCons shell escape seems to be broken, and the 'ESCAPE'
# environment variable isn't respected for some reason, so we add a
# dirty patch:
test_env['CPPFLAGS'] += map(
lambda s: s.replace('"', '\\"'), test_env['test_extra_cppflags'])
ret = test_env.Program(*args, **kw)
self.Default(self.alias(target_src_node, ret))
return ret
def testScript(self, target, dest='bin', subdir='tests'):
instdir = self.fs.Dir(subdir, self.exportdirs[dest])
if not SCons.Util.is_List(target):
target = [target]
self.Install(instdir, target)
def executableLink(self, dest, target, source, **unused_kw):
"""path to the target in the srcdir (not builddir)."""
target_src_node = self.arg2nodes(target)[0].srcnode()
targetbase = os.path.basename(target)
newtarget = os.path.join(self.exportdirs['bin'], targetbase)
sourcebase = os.path.basename(source)
newsource = os.path.join(self.exportdirs['bin'], sourcebase)
ret = self.Command(newtarget, [newsource],
['ln -sf ${SOURCE.file} $TARGET'])
self.Command(self.fs.File(targetbase, self.installdirs[dest]),
[self.fs.File(sourcebase, self.installdirs[dest])],
['ln $SOURCE $TARGET',
'chmod a+x $TARGET'])
self.Default(self.alias(target_src_node, ret))
return ret
def installedExecutableSymlink(self, dest, target, source, **unused_kw):
"""path to the target in the srcdir (not builddir)."""
targetbase = os.path.basename(target)
sourcebase = os.path.basename(source)
return self.Command(
self.fs.File(targetbase, self.installdirs[dest]),
[self.fs.File(sourcebase, self.installdirs[dest])],
['ln -sf ${SOURCE.file} $TARGET'])
def install(self, dest, target, subdir=''):
instdir = self.fs.Dir(subdir, self.installdirs[dest])
if not SCons.Util.is_List(target):
target = [target]
self.Install(instdir, target)
def installAs(self, dest, src, newname, subdir=''):
instdir = self.fs.Dir(subdir, self.installdirs[dest])
if not SCons.Util.is_List(src):
src = [src]
if not SCons.Util.is_List(newname):
newname = [newname]
self.InstallAs([self.fs.File(i, instdir) for i in newname], src)
def installRecursive(self, dest_root, source_path):
for root_dir, _, files in os.walk(source_path):
for file_path in files:
self.Install(
os.path.join(
dest_root,
os.path.relpath(root_dir, os.path.dirname(source_path))),
os.path.join(root_dir, file_path))
def installDirExcluding(self, dest, target_dir, excluded_list, subdir=''):
instdir = self.fs.Dir(subdir, self.installdirs[dest])
self.installDirExcludingInternal(instdir, target_dir, excluded_list)
def installDirExcludingInternal(self, instdir, target_dir, excluded_list):
"""Get contents of target_dir and install in instdir."""
contents = os.listdir(target_dir)
target_dir += '/'
if not os.path.exists(instdir.get_abspath()):
os.makedirs(instdir.get_abspath())
for file_name in contents:
if file_name in excluded_list:
continue
target_file = target_dir + file_name
if os.path.isdir(target_file):
subdir = self.fs.Dir(file_name, instdir)
self.installDirExcludingInternal(subdir, target_file,
excluded_list)
else:
self.Install(instdir, target_file)
def copyfile(self, destdir, target, subdir=''):
instdir = self.fs.Dir(subdir, destdir)
ret = self.Install(instdir, target)
self.Default(self.alias(self.arg2nodes('all')[0].srcnode(), ret))
return ret
def qtFiles(self, uifiles, hfiles, imgfiles, prjbase):
for ui in uifiles:
self.uic(ui)
# now strip extentions from .ui & .h files
uifiles = [os.path.splitext(str(i))[0] for i in uifiles]
hfiles = [os.path.splitext(str(i))[0] for i in hfiles]
for h in hfiles:
self.moc('.moc/moc_'+h+'.cpp', h+'.h')
if imgfiles:
imgcollect = [self.Command('.ui/image_collection.cpp', imgfiles,
'$UIC -embed %s $SOURCES -o $TARGET' % (
prjbase))
]
else:
imgcollect = []
uicpps = ['.ui/' + u + '.cpp' for u in uifiles]
uimoccpps = ['.moc/moc_' + u + '.cpp' for u in uifiles]
hmoccpps = ['.moc/moc_' + h + '.cpp' for h in hfiles]
return uicpps + uimoccpps + hmoccpps + imgcollect
def idl(self, sources):
for idlfile in sources:
base = os.path.splitext(str(idlfile))[0]
self.IDLH('.idl/%s.h' % base, [idlfile, self['KHIDL']])
self.IDLIMPLH('.idl/%s_impl.h' % base, [idlfile, self['KHIDL']])
self.IDLCPP('.idl/%s.cpp' % base, [idlfile, self['KHIDL']])
def alias(self, target, source=None):
if source is None:
source = []
tlist = self.arg2nodes(target, self.ans.Alias)
if not SCons.Util.is_List(source):
source = [source]
source = filter(None, source)
# Re-call all the target builders to add the sources to each target.
result = []
for t in tlist:
bld = t.get_builder() or my_alias_builder
result.extend(bld(self, t, source))
return result
def ObjFromOtherDir(self, sources):
if not SCons.Util.is_List(sources):
sources = [sources]
root_dir = self.exportdirs['root']
shobj_suffix = self['SHOBJSUFFIX']
return [root_dir + p + shobj_suffix for p in sources if p]
def get_open_gee_version(self):
return open_gee_version
def ProtocolBufferGenerator(source, target, env, for_signature):
"""Protocol buffer generator builder.
Args:
source: List of source nodes
target: List of target nodes
env: Environment in which to build
for_signature: Just generate command for build signature; don't actually
run it.
Returns:
protocol buffer generator.
"""
(env, target, source) = (env, target, source) # Silence gpylint
for_signature = for_signature # Silence gpylint
# Must run the protocol buffer compiler from the source directory!
command = ('cd ${SOURCES.dir}; '
'${TOOLS_BIN.abspath}/${PROTOBUF_COMPILER} '
'--cpp_out $PROTOBUF_OUT_ROOT ${SOURCES.file}')
return [command]
def ProtocolBufferEmitter(target, source, env):
"""Protocol buffer emitter.
Args:
target: List of target nodes
source: List of source nodes
env: Environment in which to build
Returns:
New (target, source).
"""
env = env # Silence gpylint
# regardless of where the source comes from, we want to put the output files
# (.pb.cc and .pb.h) into the PROTOBUF_OUT_ROOT directory.
out_dir = env['PROTOBUF_OUT_ROOT'] + '/'
# get the basename (non directory) of our source
sourcebase = os.path.basename(str(source[0]))
# strip off source extension and replace it with the two we want
targetcc = out_dir + os.path.splitext(sourcebase)[0] + '.pb.cc'
targeth = out_dir + os.path.splitext(sourcebase)[0] + '.pb.h'
# build a new list of targets (ignoring anything that scons already has there)
target = [targetcc, targeth]
return target, source
def DefineProtocolBufferBuilder(env):
# Note: SCons requires the use of this name, which fails gpylint.
"""SCons entry point for this tool.
Args:
env: Environment to modify.
"""
# All protocol buffer generated files will be placed in the export directory
# under protobuf.
# To include them, the caller need only include "protobuf/xxx.pb.h"
out_dir = os.path.join(env.exportdirs['root'], 'protobuf')
out_dir = out_dir.strip('#')
out_dir = os.path.abspath(out_dir)
env.Replace(
# Root of output; files will be placed in subdirs of this mirroring the
# source tree.
PROTOBUF_OUT_ROOT=out_dir
)
# Set tool based on local platform
env['TOOLS_BIN'] = env.fs.Dir('../tools/bin/')
env['PROTOBUF_COMPILER'] = 'protoc'
# Add protocol buffer builder
bld = SCons.Script.Builder(generator=ProtocolBufferGenerator,
emitter=ProtocolBufferEmitter,
single_source=1,
suffix='.pb.cc')
env.Append(BUILDERS={'ProtocolBuffer': bld})
|
|
# Copyright 2011 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_serialization import jsonutils
from six.moves import range
from nova.compute import api as compute_api
from nova.compute import manager as compute_manager
import nova.context
from nova import db
from nova import exception
from nova.network import api as network_api
from nova.network import manager as network_manager
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import base as obj_base
from nova.objects import virtual_interface as vif_obj
from nova.tests.unit.objects import test_fixed_ip
from nova.tests.unit.objects import test_instance_info_cache
from nova.tests.unit.objects import test_pci_device
HOST = "testhost"
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
class FakeModel(dict):
"""Represent a model from the db."""
def __init__(self, *args, **kwargs):
self.update(kwargs)
class FakeNetworkManager(network_manager.NetworkManager):
"""This NetworkManager doesn't call the base class so we can bypass all
inherited service cruft and just perform unit tests.
"""
class FakeDB(object):
vifs = [{'id': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'instance_uuid': '00000000-0000-0000-0000-000000000010',
'network_id': 1,
'uuid': 'fake-uuid',
'address': 'DC:AD:BE:FF:EF:01'},
{'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'instance_uuid': '00000000-0000-0000-0000-000000000020',
'network_id': 21,
'uuid': 'fake-uuid2',
'address': 'DC:AD:BE:FF:EF:02'},
{'id': 2,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'instance_uuid': '00000000-0000-0000-0000-000000000030',
'network_id': 31,
'uuid': 'fake-uuid3',
'address': 'DC:AD:BE:FF:EF:03'}]
floating_ips = [dict(address='172.16.1.1',
fixed_ip_id=100),
dict(address='172.16.1.2',
fixed_ip_id=200),
dict(address='173.16.1.2',
fixed_ip_id=210)]
fixed_ips = [dict(test_fixed_ip.fake_fixed_ip,
id=100,
address='172.16.0.1',
virtual_interface_id=0),
dict(test_fixed_ip.fake_fixed_ip,
id=200,
address='172.16.0.2',
virtual_interface_id=1),
dict(test_fixed_ip.fake_fixed_ip,
id=210,
address='173.16.0.2',
virtual_interface_id=2)]
def fixed_ip_get_by_instance(self, context, instance_uuid):
return [dict(address='10.0.0.0'), dict(address='10.0.0.1'),
dict(address='10.0.0.2')]
def network_get_by_cidr(self, context, cidr):
raise exception.NetworkNotFoundForCidr(cidr=cidr)
def network_create_safe(self, context, net):
fakenet = dict(net)
fakenet['id'] = 999
return fakenet
def network_get(self, context, network_id, project_only="allow_none"):
return {'cidr_v6': '2001:db8:69:%x::/64' % network_id}
def network_get_by_uuid(self, context, network_uuid):
raise exception.NetworkNotFoundForUUID(uuid=network_uuid)
def network_get_all(self, context):
raise exception.NoNetworksFound()
def network_get_all_by_uuids(self, context, project_only="allow_none"):
raise exception.NoNetworksFound()
def network_disassociate(self, context, network_id):
return True
def virtual_interface_get_all(self, context):
return self.vifs
def fixed_ips_by_virtual_interface(self, context, vif_id):
return [ip for ip in self.fixed_ips
if ip['virtual_interface_id'] == vif_id]
def fixed_ip_disassociate(self, context, address):
return True
def __init__(self, stubs=None):
self.db = self.FakeDB()
if stubs:
stubs.Set(vif_obj, 'db', self.db)
self.deallocate_called = None
self.deallocate_fixed_ip_calls = []
self.network_rpcapi = network_rpcapi.NetworkAPI()
# TODO(matelakat) method signature should align with the faked one's
def deallocate_fixed_ip(self, context, address=None, host=None,
instance=None):
self.deallocate_fixed_ip_calls.append((context, address, host))
# TODO(matelakat) use the deallocate_fixed_ip_calls instead
self.deallocate_called = address
def _create_fixed_ips(self, context, network_id, fixed_cidr=None,
extra_reserved=None, bottom_reserved=0,
top_reserved=0):
pass
def get_instance_nw_info(context, instance_id, rxtx_factor,
host, instance_uuid=None, **kwargs):
pass
def fake_network(network_id, ipv6=None):
if ipv6 is None:
ipv6 = CONF.use_ipv6
fake_network = {'id': network_id,
'uuid': '00000000-0000-0000-0000-00000000000000%02d' % network_id,
'label': 'test%d' % network_id,
'injected': False,
'multi_host': False,
'cidr': '192.168.%d.0/24' % network_id,
'cidr_v6': None,
'netmask': '255.255.255.0',
'netmask_v6': None,
'bridge': 'fake_br%d' % network_id,
'bridge_interface': 'fake_eth%d' % network_id,
'gateway': '192.168.%d.1' % network_id,
'gateway_v6': None,
'broadcast': '192.168.%d.255' % network_id,
'dns1': '192.168.%d.3' % network_id,
'dns2': '192.168.%d.4' % network_id,
'dns3': '192.168.%d.3' % network_id,
'vlan': None,
'host': None,
'project_id': 'fake_project',
'vpn_public_address': '192.168.%d.2' % network_id,
'vpn_public_port': None,
'vpn_private_address': None,
'dhcp_start': None,
'rxtx_base': network_id * 10,
'priority': None,
'deleted': False,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'mtu': None,
'dhcp_server': '192.168.%d.1' % network_id,
'enable_dhcp': True,
'share_address': False}
if ipv6:
fake_network['cidr_v6'] = '2001:db8:0:%x::/64' % network_id
fake_network['gateway_v6'] = '2001:db8:0:%x::1' % network_id
fake_network['netmask_v6'] = '64'
if CONF.flat_injected:
fake_network['injected'] = True
return fake_network
def fake_vif(x):
return{'id': x,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:%02x' % x,
'uuid': '00000000-0000-0000-0000-00000000000000%02d' % x,
'network_id': x,
'instance_uuid': 'fake-uuid'}
def floating_ip_ids():
for i in range(1, 100):
yield i
def fixed_ip_ids():
for i in range(1, 100):
yield i
floating_ip_id = floating_ip_ids()
fixed_ip_id = fixed_ip_ids()
def next_fixed_ip(network_id, num_floating_ips=0):
next_id = next(fixed_ip_id)
f_ips = [FakeModel(**next_floating_ip(next_id))
for i in range(num_floating_ips)]
return {'id': next_id,
'network_id': network_id,
'address': '192.168.%d.%03d' % (network_id, (next_id + 99)),
'instance_uuid': 1,
'allocated': False,
'reserved': False,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'leased': True,
'host': HOST,
'deleted': 0,
'network': fake_network(network_id),
'virtual_interface': fake_vif(network_id),
# and since network_id and vif_id happen to be equivalent
'virtual_interface_id': network_id,
'floating_ips': f_ips}
def next_floating_ip(fixed_ip_id):
next_id = next(floating_ip_id)
return {'id': next_id,
'address': '10.10.10.%03d' % (next_id + 99),
'fixed_ip_id': fixed_ip_id,
'project_id': None,
'auto_assigned': False}
def ipv4_like(ip, match_string):
ip = ip.split('.')
match_octets = match_string.split('.')
for i, octet in enumerate(match_octets):
if octet == '*':
continue
if octet != ip[i]:
return False
return True
def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2,
floating_ips_per_fixed_ip=0):
# stubs is the self.stubs from the test
# ips_per_vif is the number of ips each vif will have
# num_floating_ips is number of float ips for each fixed ip
network = network_manager.FlatManager(host=HOST)
network.db = db
# reset the fixed and floating ip generators
global floating_ip_id, fixed_ip_id, fixed_ips
floating_ip_id = floating_ip_ids()
fixed_ip_id = fixed_ip_ids()
fixed_ips = []
def fixed_ips_fake(*args, **kwargs):
global fixed_ips
ips = [next_fixed_ip(i, floating_ips_per_fixed_ip)
for i in range(1, num_networks + 1)
for j in range(ips_per_vif)]
fixed_ips = ips
return ips
def update_cache_fake(*args, **kwargs):
fake_info_cache = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'instance_uuid': 'fake-uuid',
'network_info': '[]',
}
return fake_info_cache
stubs.Set(db, 'fixed_ip_get_by_instance', fixed_ips_fake)
stubs.Set(db, 'instance_info_cache_update', update_cache_fake)
class FakeContext(nova.context.RequestContext):
def is_admin(self):
return True
nw_model = network.get_instance_nw_info(
FakeContext('fakeuser', 'fake_project'),
0, 3, None)
return nw_model
def stub_out_nw_api_get_instance_nw_info(stubs, func=None,
num_networks=1,
ips_per_vif=1,
floating_ips_per_fixed_ip=0):
def get_instance_nw_info(self, context, instance, conductor_api=None):
return fake_get_instance_nw_info(stubs, num_networks=num_networks,
ips_per_vif=ips_per_vif,
floating_ips_per_fixed_ip=floating_ips_per_fixed_ip)
if func is None:
func = get_instance_nw_info
stubs.Set(network_api.API, 'get_instance_nw_info', func)
def stub_out_network_cleanup(stubs):
stubs.Set(network_api.API, 'deallocate_for_instance',
lambda *args, **kwargs: None)
_real_functions = {}
def set_stub_network_methods(stubs):
global _real_functions
cm = compute_manager.ComputeManager
if not _real_functions:
_real_functions = {
'_allocate_network': cm._allocate_network,
'_deallocate_network': cm._deallocate_network}
def fake_networkinfo(*args, **kwargs):
return network_model.NetworkInfo()
def fake_async_networkinfo(*args, **kwargs):
return network_model.NetworkInfoAsyncWrapper(fake_networkinfo)
stubs.Set(cm, '_allocate_network', fake_async_networkinfo)
stubs.Set(cm, '_deallocate_network', lambda *args, **kwargs: None)
def unset_stub_network_methods(stubs):
global _real_functions
if _real_functions:
cm = compute_manager.ComputeManager
for name in _real_functions:
stubs.Set(cm, name, _real_functions[name])
def stub_compute_with_ips(stubs):
orig_get = compute_api.API.get
orig_get_all = compute_api.API.get_all
orig_create = compute_api.API.create
def fake_get(*args, **kwargs):
return _get_instances_with_cached_ips(orig_get, *args, **kwargs)
def fake_get_all(*args, **kwargs):
return _get_instances_with_cached_ips(orig_get_all, *args, **kwargs)
def fake_create(*args, **kwargs):
return _create_instances_with_cached_ips(orig_create, *args, **kwargs)
def fake_pci_device_get_by_addr(context, node_id, dev_addr):
return test_pci_device.fake_db_dev
stubs.Set(db, 'pci_device_get_by_addr', fake_pci_device_get_by_addr)
stubs.Set(compute_api.API, 'get', fake_get)
stubs.Set(compute_api.API, 'get_all', fake_get_all)
stubs.Set(compute_api.API, 'create', fake_create)
def _get_fake_cache():
def _ip(ip, fixed=True, floats=None):
ip_dict = {'address': ip, 'type': 'fixed'}
if not fixed:
ip_dict['type'] = 'floating'
if fixed and floats:
ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
return ip_dict
info = [{'address': 'aa:bb:cc:dd:ee:ff',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'private',
'subnets': [{'cidr': '192.168.0.0/24',
'ips': [_ip('192.168.0.3')]}]}}]
if CONF.use_ipv6:
ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
'ips': [_ip(ipv6_addr)]})
return jsonutils.dumps(info)
def _get_instances_with_cached_ips(orig_func, *args, **kwargs):
"""Kludge the cache into instance(s) without having to create DB
entries
"""
instances = orig_func(*args, **kwargs)
context = args[0]
fake_device = objects.PciDevice.get_by_dev_addr(context, 1, 'a')
def _info_cache_for(instance):
info_cache = dict(test_instance_info_cache.fake_info_cache,
network_info=_get_fake_cache(),
instance_uuid=instance['uuid'])
if isinstance(instance, obj_base.NovaObject):
_info_cache = objects.InstanceInfoCache(context)
objects.InstanceInfoCache._from_db_object(context, _info_cache,
info_cache)
info_cache = _info_cache
instance['info_cache'] = info_cache
if isinstance(instances, (list, obj_base.ObjectListBase)):
for instance in instances:
_info_cache_for(instance)
fake_device.claim(instance)
fake_device.allocate(instance)
else:
_info_cache_for(instances)
fake_device.claim(instances)
fake_device.allocate(instances)
return instances
def _create_instances_with_cached_ips(orig_func, *args, **kwargs):
"""Kludge the above kludge so that the database doesn't get out
of sync with the actual instance.
"""
instances, reservation_id = orig_func(*args, **kwargs)
fake_cache = _get_fake_cache()
for instance in instances:
instance['info_cache']['network_info'] = fake_cache
db.instance_info_cache_update(args[1], instance['uuid'],
{'network_info': fake_cache})
return (instances, reservation_id)
|
|
import json
import cgi
import datetime
from google.appengine.api import users
from django import http
from django.shortcuts import render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from django.template import Context, loader
from django.core.context_processors import csrf
from django.views.decorators.csrf import ensure_csrf_cookie
from models import *
from google.appengine.ext import webapp
from google.appengine.ext import blobstore
from google.appengine.ext import ndb
from google.appengine.api import images
from google.appengine.ext.webapp import blobstore_handlers
from djangoappengine import storage
# TODOS
# - dont get user_id until we're sure user is defined
# - update profile instead of creating new entry
def home(request):
return http.HttpResponse('Hello World!')
def test(request):
upload_url = blobstore.create_upload_url('/upload-rfp-photo/12345')
# profile_query = UserPhoto.query(UserPhoto.user == users.get_current_user().user_id())
# profile = profile_query.fetch()
# print profile[0].to_dict()['blob_key']
profile_query = RfpPhoto.query(RfpPhoto.rfp_key == '12345')
profile = profile_query.fetch()
if profile_query.count() > 0:
profile = profile_query.fetch()
blob_key = profile[0].to_dict()['blob_key']
profile_url = images.get_serving_url(blob_key)
else:
profile_url = ""
template = loader.get_template('test.html')
# # Context is a normal Python dictionary whose keys can be accessed in the template index.html
context = Context({
'url': upload_url,
'profile_url': profile_url
})
context.update(csrf(request))
return HttpResponse(template.render(context))
#########################################################
# Helpers
#########################################################
class BlobstoreFileUploadHandler(storage.BlobstoreFileUploadHandler):
"""Handler that adds blob key info to the file object."""
def new_file(self, field_name, *args, **kwargs):
# We need to re-process the POST data to get the blobkey info.
meta = self.request.META
meta['wsgi.input'].seek(0)
fields = cgi.FieldStorage(meta['wsgi.input'], environ=meta)
if field_name in fields:
current_field = fields[field_name]
self.content_type_extra = current_field.type_options
super(BlobstoreFileUploadHandler, self).new_file(field_name,
*args, **kwargs)
class DatetimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
elif isinstance(obj, datetime.date):
return obj.strftime('%Y-%m-%d')
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
#########################################################
# RFPs
#########################################################
def get_my_rfp(request):
user = users.get_current_user()
if user:
rfp_query = Rfp.query(Rfp.user == user.user_id())
if rfp_query.count() > 0:
rfps = rfp_query.fetch()
rfps_dict = [rfp.to_dict() for rfp in rfps]
return HttpResponse(json.dumps({'rfps': rfps_dict}, cls=DatetimeEncoder), content_type="application/json")
return HttpResponse(json.dumps({'rfps': None}), content_type="application/json")
def get_rfp(request):
user = users.get_current_user()
if user:
rfp_query = Rfp.query(Rfp.user != user.user_id())
else:
rfp_query = Rfp.query()
if rfp_query.count() > 0:
rfps = rfp_query.fetch()
rfps_dict = [rfp.to_dict() for rfp in rfps]
return HttpResponse(json.dumps({'rfps': rfps_dict}, cls=DatetimeEncoder), content_type="application/json")
return HttpResponse(json.dumps({'rfps': None}), content_type="application/json")
def rfp(request, rfp_key):
user = users.get_current_user()
print rfp_key
if user:
rfp = Rfp.get_by_id(int(rfp_key))
mine = False
if rfp:
rfp_info = json.dumps(rfp.to_dict(), cls=DatetimeEncoder)
if rfp.to_dict()['user'] == user.user_id():
mine = True
else:
rfp_info = None
submission_img_url = ""
submission_id = ""
if not mine: # if this RFP was not created by the current user.
submission_query = Submission.query(Submission.user == user.user_id())
if submission_query.count() > 0:
submissions = submission_query.fetch()
for s in submissions:
s_dict = s.to_dict()
if s_dict['rfp_key'] == rfp_key:
submission_img_url = images.get_serving_url(s_dict['blob_key'])
submission_id = s_dict['id']
if request.GET.__contains__("key"):
submission_img_url = images.get_serving_url(request.GET.__getitem__("key"))
if request.GET.__contains__("sid"):
submission_id = request.GET.__getitem__("sid")
context = Context({
'user_name': user.nickname(),
'login_url': "",
'logout_url': users.create_logout_url('/'),
'logged_in': True,
'rfp_info': rfp_info,
'mine' : mine,
'submission_id' : submission_id,
'submission_img_url' : submission_img_url,
'submission_img_upload_url': blobstore.create_upload_url('/upload-rfp-submission/%s' % rfp_key)
}, autoescape=False)
template = loader.get_template('rfp.html')
return HttpResponse(template.render(context))
return HttpResponseRedirect('/browse-projects.html')
def add_rfp(request):
user = users.get_current_user()
if user == None:
return HttpResponse("User not logged in.")
if request.method != 'POST':
return HttpResponse("Add RFP only accepts POSTs")
data = json.loads(request.body)
year, month, day = data['endDate'].split('-')
end_date = datetime.date(int(year), int(month), int(day))
rfp = Rfp(user=user.user_id(),
userName=data['userName'],
title=data['title'],
subtitle=data['subtitle'],
prize=int(data['prize']),
endDate=end_date,
details=data['details'],
terms=data['terms'],
intendedUse=data['intendedUse'],
duration=data['duration'],
territory=data['territory'],
exclusivity=data['exclusivity'],
hashtag=data['hashtag'])
rfp.put()
return HttpResponse(
json.dumps({'url': blobstore.create_upload_url('/upload-rfp-photo/%s' % str(rfp.key.id()))}), content_type="application/json")
#########################################################
# Submissions
#########################################################
def delete_submission(request, id):
if users.get_current_user():
user_id = users.get_current_user().user_id()
else:
return HttpResponse(content='Login required', content_type=None, status=401)
if request.method == 'POST':
submission = Submission.get_by_id(int(id))
if submission and submission.to_dict()['user'] == user_id:
submission.key.delete()
return HttpResponse('Success')
return HttpResponse(content='Request failed', content_type=None, status=401)
def get_submissions(request, rfp_key):
user = users.get_current_user()
if user:
rfp = Rfp.get_by_id(int(rfp_key))
if rfp:
#rfp_info = json.dumps(rfp.to_dict(), cls=DatetimeEncoder)
if rfp.to_dict()['user'] != user.user_id():
return HttpResponse(json.dumps({'submissions': None}), content_type="application/json")
submission_query = Submission.query(Submission.rfp_key == rfp_key)
if submission_query.count() > 0:
submissions = submission_query.fetch()
submission_img_urls = []
for s in submissions:
submission_img_urls.append({
'url': images.get_serving_url(s.to_dict()['blob_key']),
'user': s.to_dict()['user'],
'id': s.to_dict()['id'],
'liked': False
})
return HttpResponse(json.dumps({'submissions': submission_img_urls}), content_type="application/json")
return HttpResponse(json.dumps({'submissions': None}), content_type="application/json")
#########################################################
# Purchase
#########################################################
def purchase(request):
user = users.get_current_user()
if user == None:
return HttpResponse("User not logged in.")
if request.method != 'POST':
return HttpResponse("Purchase only accepts POSTs")
data = json.loads(request.body)
rfp = Rfp.get_by_id(data['rfp'])
if rfp:
if rfp.to_dict()['user'] != user.user_id():
return HttpResponse("Can only purchase photos for RFPs you created", content_type=None, status=401)
purchase = Purchase(rfp_id=str(data['rfp']),
submission_id=str(data['submission']),
user_id=str(data['user']))
purchase.put()
return HttpResponse("Success!")
#########################################################
# User profiles
#########################################################
def save_profile(request):
user = users.get_current_user()
if user == None:
return HttpResponse("User not logged in.")
if request.method != 'POST':
return HttpResponse("Add RFP only accepts POSTs")
print request.body
data = json.loads(request.body)
profile = Profile.query(Profile.user == user.user_id())
if profile.count() > 0:
profile.fetch()[0].key.delete()
profile = Profile(brand="",
user=user.user_id(),
name=data['name'],
email=data['email'],
phone=data['phone'],
instagram=data['instagram'],
location=data['location'],
website=data['website'],
about=data['about'])
profile.put()
return HttpResponse("Success!")
def profile(request, user_id):
user = users.get_current_user()
if user == None:
return HttpResponse("User not logged in.")
profile_query = Profile.query(Profile.user == user_id)
if profile_query.count() > 0:
profile = profile_query.fetch()
profile_info = json.dumps(profile[0].to_dict())
else:
profile_info = None
img_query = UserPhoto.query(UserPhoto.user == user_id)
if img_query.count() > 0:
img = img_query.fetch()
img_url = images.get_serving_url(img[0].to_dict()['blob_key'])
else:
img_url = None
context = Context({
'user_name': user.nickname(),
'login_url': "",
'logout_url': users.create_logout_url('/'),
'logged_in': True,
'profile_url' : img_url,
'profile_info': profile_info
}, autoescape=False)
template = loader.get_template('profile.html')
return HttpResponse(template.render(context))
@ensure_csrf_cookie
def my_profile(request):
user = users.get_current_user()
if user:
if request.GET.__contains__("key"):
profile_url = images.get_serving_url(request.GET.__getitem__("key"))
else:
profile_query = UserPhoto.query(UserPhoto.user == user.user_id())
if profile_query.count() > 0:
profile = profile_query.fetch()
profile_url = images.get_serving_url(profile[0].to_dict()['blob_key'])
else:
profile_url = ""
profile_query = Profile.query(Profile.user == user.user_id())
if profile_query.count() > 0:
profile = profile_query.fetch()
profile_info = json.dumps(profile[0].to_dict())
else:
profile_info = None
context = Context({
'user_name': user.nickname(),
'login_url': "",
'logout_url': users.create_logout_url('/'),
'logged_in': True,
'profile_url': profile_url,
'profile_info': profile_info,
'profile_upload_url' : blobstore.create_upload_url('/upload-profile-photo.html')
}, autoescape=False)
template = loader.get_template('my-profile.html')
else:
context = Context({
'user_name': "",
'login_url': users.create_login_url('/'),
'logout_url': "",
'logged_in': False
}, autoescape=False)
template = loader.get_template('browse-projects.html')
return HttpResponse(template.render(context))
#########################################################
# Photo handlers
#########################################################
def profile_photo_upload_handler(request):
image = request.FILES['file']
image_key = image.blobstore_info.key()
user_id = users.get_current_user().user_id()
profile_photo_query = UserPhoto.query(UserPhoto.user == user_id)
if profile_photo_query.count() > 0:
profile_photo = profile_photo_query.fetch()
profile_photo[0].key.delete()
user_photo = UserPhoto(user=user_id,
blob_key=image_key)
user_photo.put()
return HttpResponseRedirect('/my-profile.html?key='+str(image_key))
def submission_photo_upload_handler(request, rfp_key):
user_id = users.get_current_user().user_id()
image = request.FILES['file']
image_key = image.blobstore_info.key()
submission_query = Submission.query(Submission.user == user_id)
if submission_query.count() > 0:
submission = submission_query.fetch()
submission[0].blob_key = image_key
submission[0].put()
else:
submission = Submission(user=user_id,
rfp_key=rfp_key,
blob_key=image_key)
submission.put()
return HttpResponseRedirect('/rfp/%s?key=%s&sid=%d' % (rfp_key, str(image_key), submission.key.id()))
def rfp_photo_upload_handler(request, rfp_key):
image = request.FILES['file']
image_key = image.blobstore_info.key()
rfp_photo = RfpPhoto(rfp_key=rfp_key,
blob_key=image_key)
rfp_photo.put()
return HttpResponseRedirect('/my-projects.html?key=%s&rfp=%s' % (str(image_key), rfp_key))
#########################################################
# Main pages
#########################################################
@ensure_csrf_cookie
def home(request):
user = users.get_current_user()
if user:
context = Context({
'user_name': user.nickname(),
'login_url': "",
'logout_url': users.create_logout_url('/'),
'logged_in': True
}, autoescape=False)
else:
context = Context({
'user_name': "",
'login_url': users.create_login_url('/'),
'logout_url': "",
'logged_in': False
}, autoescape=False)
template = loader.get_template('home.html')
return HttpResponse(template.render(context))
@ensure_csrf_cookie
def browse_projects(request):
user = users.get_current_user()
if user:
context = Context({
'user_name': user.nickname(),
'login_url': "",
'logout_url': users.create_logout_url('/'),
'logged_in': True
}, autoescape=False)
else:
context = Context({
'user_name': "",
'login_url': users.create_login_url('/'),
'logout_url': "",
'logged_in': False
}, autoescape=False)
template = loader.get_template('browse-projects.html')
return HttpResponse(template.render(context))
@ensure_csrf_cookie
def my_projects(request):
# if profile_query.count() > 0:
# profile = profile_query.fetch()
# blob_key = profile[0].to_dict()['blob_key']
# profile_url = images.get_serving_url(blob_key)
# else:
# profile_url = ""
if request.GET.__contains__("key"):
rfp_img_key = request.GET.__getitem__("key")
rfp_img_url = images.get_serving_url(rfp_img_key)
else:
rfp_img_url = ""
if request.GET.__contains__("rfp_key"):
rfp_img_upload_key = request.GET.__getitem__("rfp_key")
rfp_img_upload_url = blobstore.create_upload_url('/upload-rfp-photo/%s' % rfp_img_upload_key)
else:
rfp_img_upload_url = ""
user = users.get_current_user()
if user:
context = Context({
'user_name': user.nickname(),
'login_url': "",
'logout_url': users.create_logout_url('/'),
'logged_in': True,
'rfp_img_url': rfp_img_url,
'rfp_img_upload_url': rfp_img_upload_url
}, autoescape=False)
else:
return HttpResponseRedirect('/browse-projects.html')
template = loader.get_template('my-projects.html')
return HttpResponse(template.render(context))
|
|
# grammar.py, part of Yapps 2 - yet another python parser system
# Copyright 1999-2003 by Amit J. Patel <[email protected]>
# Enhancements copyright 2003-2004 by Matthias Urlichs <[email protected]>
#
# This version of the Yapps 2 grammar can be distributed under the
# terms of the MIT open source license, either found in the LICENSE
# file included with the Yapps distribution
# <http://theory.stanford.edu/~amitp/yapps/> or at
# <http://www.opensource.org/licenses/mit-license.php>
#
"""Parser for Yapps grammars.
This file defines the grammar of Yapps grammars. Naturally, it is
implemented in Yapps. The grammar.py module needed by Yapps is built
by running Yapps on yapps_grammar.g. (Holy circularity, Batman!)
"""
import sys, re
from yapps import parsetree
######################################################################
def cleanup_choice(rule, lst):
if len(lst) == 0: return Sequence(rule, [])
if len(lst) == 1: return lst[0]
return parsetree.Choice(rule, *tuple(lst))
def cleanup_sequence(rule, lst):
if len(lst) == 1: return lst[0]
return parsetree.Sequence(rule, *tuple(lst))
def resolve_name(rule, tokens, id, args):
if id in [x[0] for x in tokens]:
# It's a token
if args:
print('Warning: ignoring parameters on TOKEN %s<<%s>>' % (id, args))
return parsetree.Terminal(rule, id)
else:
# It's a name, so assume it's a nonterminal
return parsetree.NonTerminal(rule, id, args)
# Begin -- grammar generated by Yapps
import sys, re
from yapps import runtime
class ParserDescriptionScanner(runtime.Scanner):
patterns = [
('"rule"', re.compile('rule')),
('"ignore"', re.compile('ignore')),
('"token"', re.compile('token')),
('"option"', re.compile('option')),
('":"', re.compile(':')),
('"parser"', re.compile('parser')),
('[ \t\r\n]+', re.compile('[ \t\r\n]+')),
('#.*?\r?\n', re.compile('#.*?\r?\n')),
('EOF', re.compile('$')),
('ATTR', re.compile('<<.+?>>')),
('STMT', re.compile('{{.+?}}')),
('ID', re.compile('[a-zA-Z_][a-zA-Z_0-9]*')),
('STR', re.compile('[rR]?\'([^\\n\'\\\\]|\\\\.)*\'|[rR]?"([^\\n"\\\\]|\\\\.)*"')),
('LP', re.compile('\\(')),
('RP', re.compile('\\)')),
('LB', re.compile('\\[')),
('RB', re.compile('\\]')),
('OR', re.compile('[|]')),
('STAR', re.compile('[*]')),
('PLUS', re.compile('[+]')),
('QUEST', re.compile('[?]')),
('COLON', re.compile(':')),
]
def __init__(self, str,*args,**kw):
runtime.Scanner.__init__(self,None,{'[ \t\r\n]+':None,'#.*?\r?\n':None,},str,*args,**kw)
class ParserDescription(runtime.Parser):
Context = runtime.Context
def Parser(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'Parser', [])
self._scan('"parser"', context=_context)
ID = self._scan('ID', context=_context)
self._scan('":"', context=_context)
Options = self.Options(_context)
Tokens = self.Tokens(_context)
Rules = self.Rules(Tokens, _context)
EOF = self._scan('EOF', context=_context)
return parsetree.Generator(ID,Options,Tokens,Rules)
def Options(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'Options', [])
opt = {}
while self._peek('"option"', '"token"', '"ignore"', 'EOF', '"rule"', context=_context) == '"option"':
self._scan('"option"', context=_context)
self._scan('":"', context=_context)
Str = self.Str(_context)
opt[Str] = 1
return opt
def Tokens(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'Tokens', [])
tok = []
while self._peek('"token"', '"ignore"', 'EOF', '"rule"', context=_context) in ['"token"', '"ignore"']:
_token = self._peek('"token"', '"ignore"', context=_context)
if _token == '"token"':
self._scan('"token"', context=_context)
ID = self._scan('ID', context=_context)
self._scan('":"', context=_context)
Str = self.Str(_context)
tok.append( (ID,Str) )
else: # == '"ignore"'
self._scan('"ignore"', context=_context)
self._scan('":"', context=_context)
Str = self.Str(_context)
ign = ('#ignore',Str)
if self._peek('STMT', '"token"', '"ignore"', 'EOF', '"rule"', context=_context) == 'STMT':
STMT = self._scan('STMT', context=_context)
ign = ign + (STMT[2:-2],)
tok.append( ign )
return tok
def Rules(self, tokens, _parent=None):
_context = self.Context(_parent, self._scanner, 'Rules', [tokens])
rul = []
while self._peek('"rule"', 'EOF', context=_context) == '"rule"':
self._scan('"rule"', context=_context)
ID = self._scan('ID', context=_context)
OptParam = self.OptParam(_context)
self._scan('":"', context=_context)
ClauseA = self.ClauseA(ID, tokens, _context)
rul.append( (ID, OptParam, ClauseA) )
return rul
def ClauseA(self, rule, tokens, _parent=None):
_context = self.Context(_parent, self._scanner, 'ClauseA', [rule, tokens])
ClauseB = self.ClauseB(rule,tokens, _context)
v = [ClauseB]
while self._peek('OR', 'RP', 'RB', '"rule"', 'EOF', context=_context) == 'OR':
OR = self._scan('OR', context=_context)
ClauseB = self.ClauseB(rule,tokens, _context)
v.append(ClauseB)
return cleanup_choice(rule,v)
def ClauseB(self, rule,tokens, _parent=None):
_context = self.Context(_parent, self._scanner, 'ClauseB', [rule,tokens])
v = []
while self._peek('STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'EOF', context=_context) in ['STR', 'ID', 'LP', 'LB', 'STMT']:
ClauseC = self.ClauseC(rule,tokens, _context)
v.append(ClauseC)
return cleanup_sequence(rule, v)
def ClauseC(self, rule,tokens, _parent=None):
_context = self.Context(_parent, self._scanner, 'ClauseC', [rule,tokens])
ClauseD = self.ClauseD(rule,tokens, _context)
_token = self._peek('PLUS', 'STAR', 'QUEST', 'STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'EOF', context=_context)
if _token == 'PLUS':
PLUS = self._scan('PLUS', context=_context)
return parsetree.Plus(rule, ClauseD)
elif _token == 'STAR':
STAR = self._scan('STAR', context=_context)
return parsetree.Star(rule, ClauseD)
elif _token == 'QUEST':
QUEST = self._scan('QUEST', context=_context)
return parsetree.Option(rule, ClauseD)
else:
return ClauseD
def ClauseD(self, rule,tokens, _parent=None):
_context = self.Context(_parent, self._scanner, 'ClauseD', [rule,tokens])
_token = self._peek('STR', 'ID', 'LP', 'LB', 'STMT', context=_context)
if _token == 'STR':
STR = self._scan('STR', context=_context)
t = (STR, eval(STR,{},{}))
if t not in tokens: tokens.insert( 0, t )
return parsetree.Terminal(rule, STR)
elif _token == 'ID':
ID = self._scan('ID', context=_context)
OptParam = self.OptParam(_context)
return resolve_name(rule,tokens, ID, OptParam)
elif _token == 'LP':
LP = self._scan('LP', context=_context)
ClauseA = self.ClauseA(rule,tokens, _context)
RP = self._scan('RP', context=_context)
return ClauseA
elif _token == 'LB':
LB = self._scan('LB', context=_context)
ClauseA = self.ClauseA(rule,tokens, _context)
RB = self._scan('RB', context=_context)
return parsetree.Option(rule, ClauseA)
else: # == 'STMT'
STMT = self._scan('STMT', context=_context)
return parsetree.Eval(rule, STMT[2:-2])
def OptParam(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'OptParam', [])
if self._peek('ATTR', '":"', 'PLUS', 'STAR', 'QUEST', 'STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'EOF', context=_context) == 'ATTR':
ATTR = self._scan('ATTR', context=_context)
return ATTR[2:-2]
return ''
def Str(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'Str', [])
STR = self._scan('STR', context=_context)
return eval(STR,{},{})
def parse(rule, text):
P = ParserDescription(ParserDescriptionScanner(text))
return runtime.wrap_error_reporter(P, rule)
# End -- grammar generated by Yapps
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.