repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
earshel/PokeyPyManager | POGOProtos/Settings/Master/IapSettings_pb2.py | 16 | 4839 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Settings/Master/IapSettings.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Settings/Master/IapSettings.proto',
package='POGOProtos.Settings.Master',
syntax='proto3',
serialized_pb=_b('\n,POGOProtos/Settings/Master/IapSettings.proto\x12\x1aPOGOProtos.Settings.Master\"\x8c\x02\n\x0bIapSettings\x12\x19\n\x11\x64\x61ily_bonus_coins\x18\x01 \x01(\x05\x12(\n daily_defender_bonus_per_pokemon\x18\x02 \x03(\x05\x12*\n\"daily_defender_bonus_max_defenders\x18\x03 \x01(\x05\x12%\n\x1d\x64\x61ily_defender_bonus_currency\x18\x04 \x03(\t\x12\"\n\x1amin_time_between_claims_ms\x18\x05 \x01(\x03\x12\x1b\n\x13\x64\x61ily_bonus_enabled\x18\x06 \x01(\x08\x12$\n\x1c\x64\x61ily_defender_bonus_enabled\x18\x07 \x01(\x08\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_IAPSETTINGS = _descriptor.Descriptor(
name='IapSettings',
full_name='POGOProtos.Settings.Master.IapSettings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='daily_bonus_coins', full_name='POGOProtos.Settings.Master.IapSettings.daily_bonus_coins', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='daily_defender_bonus_per_pokemon', full_name='POGOProtos.Settings.Master.IapSettings.daily_defender_bonus_per_pokemon', index=1,
number=2, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='daily_defender_bonus_max_defenders', full_name='POGOProtos.Settings.Master.IapSettings.daily_defender_bonus_max_defenders', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='daily_defender_bonus_currency', full_name='POGOProtos.Settings.Master.IapSettings.daily_defender_bonus_currency', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_time_between_claims_ms', full_name='POGOProtos.Settings.Master.IapSettings.min_time_between_claims_ms', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='daily_bonus_enabled', full_name='POGOProtos.Settings.Master.IapSettings.daily_bonus_enabled', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='daily_defender_bonus_enabled', full_name='POGOProtos.Settings.Master.IapSettings.daily_defender_bonus_enabled', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=77,
serialized_end=345,
)
DESCRIPTOR.message_types_by_name['IapSettings'] = _IAPSETTINGS
IapSettings = _reflection.GeneratedProtocolMessageType('IapSettings', (_message.Message,), dict(
DESCRIPTOR = _IAPSETTINGS,
__module__ = 'POGOProtos.Settings.Master.IapSettings_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Settings.Master.IapSettings)
))
_sym_db.RegisterMessage(IapSettings)
# @@protoc_insertion_point(module_scope)
| mit |
jyogi/purvar-agent | tests/checks/integration/test_gearmand.py | 46 | 2041 | # 3rd party
from nose.plugins.attrib import attr
# Agent
from checks import AgentCheck
from tests.checks.common import AgentCheckTest
@attr(requires='gearman')
class GearmanTestCase(AgentCheckTest):
CHECK_NAME = "gearmand"
def test_metrics(self):
tags = ['first_tag', 'second_tag']
service_checks_tags = ['server:127.0.0.1', 'port:4730']
config = {
'instances': [{
'tags': tags
}]
}
tags += service_checks_tags
self.run_check(config)
self.assertMetric('gearman.unique_tasks', value=0.0, tags=tags, count=1)
self.assertMetric('gearman.running', value=0.0, tags=tags, count=1)
self.assertMetric('gearman.queued', value=0.0, tags=tags, count=1)
self.assertMetric('gearman.workers', value=0.0, tags=tags, count=1)
self.assertServiceCheck("gearman.can_connect", status=AgentCheck.OK,
tags=service_checks_tags, count=1)
self.coverage_report()
def test_service_checks(self):
config = {
'instances': [
{'host': '127.0.0.1', 'port': 4730},
{'host': '127.0.0.1', 'port': 4731}]
}
self.assertRaises(Exception, self.run_check, config)
service_checks_tags_ok = ['server:127.0.0.1', 'port:4730']
service_checks_tags_not_ok = ['server:127.0.0.1', 'port:4731']
tags = service_checks_tags_ok
self.assertMetric('gearman.unique_tasks', value=0.0, tags=tags, count=1)
self.assertMetric('gearman.running', value=0.0, tags=tags, count=1)
self.assertMetric('gearman.queued', value=0.0, tags=tags, count=1)
self.assertMetric('gearman.workers', value=0.0, tags=tags, count=1)
self.assertServiceCheck("gearman.can_connect", status=AgentCheck.OK,
tags=service_checks_tags_ok, count=1)
self.assertServiceCheck("gearman.can_connect", status=AgentCheck.CRITICAL,
tags=service_checks_tags_not_ok, count=1)
self.coverage_report()
| bsd-3-clause |
ojengwa/talk | venv/lib/python2.7/site-packages/django_filters/views.py | 56 | 3632 | from __future__ import absolute_import
from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.views.generic import View
from django.views.generic.list import MultipleObjectMixin
from django.views.generic.list import MultipleObjectTemplateResponseMixin
from .filterset import filterset_factory
class FilterMixin(object):
"""
A mixin that provides a way to show and handle a FilterSet in a request.
"""
filterset_class = None
def get_filterset_class(self):
"""
Returns the filterset class to use in this view
"""
if self.filterset_class:
return self.filterset_class
elif self.model:
return filterset_factory(self.model)
else:
msg = "'%s' must define 'filterset_class' or 'model'"
raise ImproperlyConfigured(msg % self.__class__.__name__)
def get_filterset(self, filterset_class):
"""
Returns an instance of the filterset to be used in this view.
"""
kwargs = self.get_filterset_kwargs(filterset_class)
return filterset_class(**kwargs)
def get_filterset_kwargs(self, filterset_class):
"""
Returns the keyword arguments for instanciating the filterset.
"""
kwargs = {'data': self.request.GET or None}
try:
kwargs.update({
'queryset': self.get_queryset(),
})
except ImproperlyConfigured:
# ignore the error here if the filterset has a model defined
# to acquire a queryset from
if filterset_class._meta.model is None:
msg = ("'%s' does not define a 'model' and the view '%s' does "
"not return a valid queryset from 'get_queryset'. You "
"must fix one of them.")
args = (filterset_class.__name__, self.__class__.__name__)
raise ImproperlyConfigured(msg % args)
return kwargs
class BaseFilterView(FilterMixin, MultipleObjectMixin, View):
def get(self, request, *args, **kwargs):
filterset_class = self.get_filterset_class()
self.filterset = self.get_filterset(filterset_class)
self.object_list = self.filterset.qs
context = self.get_context_data(filter=self.filterset,
object_list=self.object_list)
return self.render_to_response(context)
class FilterView(MultipleObjectTemplateResponseMixin, BaseFilterView):
"""
Render some list of objects with filter, set by `self.model` or
`self.queryset`.
`self.queryset` can actually be any iterable of items, not just a queryset.
"""
template_name_suffix = '_filter'
def object_filter(request, model=None, queryset=None, template_name=None,
extra_context=None, context_processors=None,
filter_class=None):
class ECFilterView(FilterView):
"""Handle the extra_context from the functional object_filter view"""
def get_context_data(self, **kwargs):
context = super(ECFilterView, self).get_context_data(**kwargs)
extra_context = self.kwargs.get('extra_context') or {}
for k, v in extra_context.items():
if callable(v):
v = v()
context[k] = v
return context
kwargs = dict(model=model, queryset=queryset, template_name=template_name,
filterset_class=filter_class)
view = ECFilterView.as_view(**kwargs)
return view(request, extra_context=extra_context)
| mit |
philippjfr/bokeh | examples/models/file/data_tables.py | 9 | 3295 | from bokeh.document import Document
from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid, Circle, HoverTool, BoxSelectTool
from bokeh.models.widgets import DataTable, TableColumn, StringFormatter, NumberFormatter, StringEditor, IntEditor, NumberEditor, SelectEditor
from bokeh.models.layouts import Column
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.util.browser import view
from bokeh.sampledata.autompg2 import autompg2 as mpg
source = ColumnDataSource(mpg)
manufacturers = sorted(mpg["manufacturer"].unique())
models = sorted(mpg["model"].unique())
transmissions = sorted(mpg["trans"].unique())
drives = sorted(mpg["drv"].unique())
classes = sorted(mpg["class"].unique())
columns = [
TableColumn(field="manufacturer", title="Manufacturer", editor=SelectEditor(options=manufacturers), formatter=StringFormatter(font_style="bold")),
TableColumn(field="model", title="Model", editor=StringEditor(completions=models)),
TableColumn(field="displ", title="Displacement", editor=NumberEditor(step=0.1), formatter=NumberFormatter(format="0.0")),
TableColumn(field="year", title="Year", editor=IntEditor()),
TableColumn(field="cyl", title="Cylinders", editor=IntEditor()),
TableColumn(field="trans", title="Transmission", editor=SelectEditor(options=transmissions)),
TableColumn(field="drv", title="Drive", editor=SelectEditor(options=drives)),
TableColumn(field="class", title="Class", editor=SelectEditor(options=classes)),
TableColumn(field="cty", title="City MPG", editor=IntEditor()),
TableColumn(field="hwy", title="Highway MPG", editor=IntEditor()),
]
data_table = DataTable(source=source, columns=columns, editable=True, width=1000)
plot = Plot(title=None, x_range= DataRange1d(), y_range=DataRange1d(), plot_width=1000, plot_height=300)
# Set up x & y axis
plot.add_layout(LinearAxis(), 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
# Add Glyphs
cty_glyph = Circle(x="index", y="cty", fill_color="#396285", size=8, fill_alpha=0.5, line_alpha=0.5)
hwy_glyph = Circle(x="index", y="hwy", fill_color="#CE603D", size=8, fill_alpha=0.5, line_alpha=0.5)
cty = plot.add_glyph(source, cty_glyph)
hwy = plot.add_glyph(source, hwy_glyph)
# Add the tools
tooltips = [
("Manufacturer", "@manufacturer"),
("Model", "@model"),
("Displacement", "@displ"),
("Year", "@year"),
("Cylinders", "@cyl"),
("Transmission", "@trans"),
("Drive", "@drv"),
("Class", "@class"),
]
cty_hover_tool = HoverTool(renderers=[cty], tooltips=tooltips + [("City MPG", "@cty")])
hwy_hover_tool = HoverTool(renderers=[hwy], tooltips=tooltips + [("Highway MPG", "@hwy")])
select_tool = BoxSelectTool(renderers=[cty, hwy], dimensions='width')
plot.add_tools(cty_hover_tool, hwy_hover_tool, select_tool)
layout = Column(plot, data_table)
doc = Document()
doc.add_root(layout)
if __name__ == "__main__":
doc.validate()
filename = "data_tables.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Data Tables"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
hgl888/chromium-crosswalk | tools/telemetry/telemetry/core/platform/profiling_controller_backend.py | 16 | 1761 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.platform.profiler import profiler_finder
class ProfilingControllerBackend(object):
def __init__(self, platform_backend, browser_backend):
self._platform_backend = platform_backend
self._browser_backend = browser_backend
self._active_profilers = []
self._profilers_states = {}
def Start(self, profiler_name, base_output_file):
"""Starts profiling using |profiler_name|. Results are saved to
|base_output_file|.<process_name>."""
assert not self._active_profilers, 'Already profiling. Must stop first.'
profiler_class = profiler_finder.FindProfiler(profiler_name)
if not profiler_class.is_supported(self._browser_backend.browser_type):
raise Exception('The %s profiler is not '
'supported on this platform.' % profiler_name)
if not profiler_class in self._profilers_states:
self._profilers_states[profiler_class] = {}
self._active_profilers.append(
profiler_class(self._browser_backend, self._platform_backend,
base_output_file, self._profilers_states[profiler_class]))
def Stop(self):
"""Stops all active profilers and saves their results.
Returns:
A list of filenames produced by the profiler.
"""
output_files = []
for profiler in self._active_profilers:
output_files.extend(profiler.CollectProfile())
self._active_profilers = []
return output_files
def WillCloseBrowser(self):
for profiler_class in self._profilers_states:
profiler_class.WillCloseBrowser(
self._browser_backend, self._platform_backend)
| bsd-3-clause |
philippegabriel/xen | tools/pygrub/src/LiloConf.py | 55 | 5243 | #
#LiloConf.py
#
import sys, re, os
import logging
import GrubConf
class LiloImage(object):
def __init__(self, lines, path):
self.reset(lines, path)
def __repr__(self):
return ("title: %s\n"
" root: %s\n"
" kernel: %s\n"
" args: %s\n"
" initrd: %s\n" %(self.title, self.root, self.kernel,
self.args, self.initrd))
def reset(self, lines, path):
self._initrd = self._kernel = self._readonly = None
self._args = ""
self.title = ""
self.lines = []
self.path = path
self.root = ""
map(self.set_from_line, lines)
def set_from_line(self, line, replace = None):
(com, arg) = GrubConf.grub_exact_split(line, 2)
if self.commands.has_key(com):
if self.commands[com] is not None:
setattr(self, self.commands[com], re.sub('^"(.+)"$', r"\1", arg.strip()))
else:
logging.info("Ignored image directive %s" %(com,))
else:
logging.warning("Unknown image directive %s" %(com,))
# now put the line in the list of lines
if replace is None:
self.lines.append(line)
else:
self.lines.pop(replace)
self.lines.insert(replace, line)
def set_kernel(self, val):
self._kernel = (None, self.path + "/" + val)
def get_kernel(self):
return self._kernel
kernel = property(get_kernel, set_kernel)
def set_initrd(self, val):
self._initrd = (None, self.path + "/" + val)
def get_initrd(self):
return self._initrd
initrd = property(get_initrd, set_initrd)
def set_args(self, val):
self._args = val
def get_args(self):
args = self._args
if self.root:
args += " root=" + self.root
if self.readonly:
args += " ro"
return args
args = property(get_args, set_args)
def set_readonly(self, val):
self._readonly = 1
def get_readonly(self):
return self._readonly
readonly = property(get_readonly, set_readonly)
# set up command handlers
commands = { "label": "title",
"root": "root",
"rootnoverify": "root",
"image": "kernel",
"initrd": "initrd",
"append": "args",
"read-only": "readonly",
"chainloader": None,
"module": None}
class LiloConfigFile(object):
def __init__(self, fn = None):
self.filename = fn
self.images = []
self.timeout = -1
self._default = 0
if fn is not None:
self.parse()
def parse(self, buf = None):
if buf is None:
if self.filename is None:
raise ValueError, "No config file defined to parse!"
f = open(self.filename, 'r')
lines = f.readlines()
f.close()
else:
lines = buf.split("\n")
path = os.path.dirname(self.filename)
img = []
for l in lines:
l = l.strip()
# skip blank lines
if len(l) == 0:
continue
# skip comments
if l.startswith('#'):
continue
# new image
if l.startswith("image"):
if len(img) > 0:
self.add_image(LiloImage(img, path))
img = [l]
continue
if len(img) > 0:
img.append(l)
continue
(com, arg) = GrubConf.grub_exact_split(l, 2)
if self.commands.has_key(com):
if self.commands[com] is not None:
setattr(self, self.commands[com], arg.strip())
else:
logging.info("Ignored directive %s" %(com,))
else:
logging.warning("Unknown directive %s" %(com,))
if len(img) > 0:
self.add_image(LiloImage(img, path))
def hasPassword(self):
return False
def hasPasswordAccess(self):
return True
def add_image(self, image):
self.images.append(image)
def new_image(self, title, lines):
# LiloImage constructor doesn't have title but since path
# is being used by get_{kernel|initrd} functions we pass
# empty string rather than None (see lines above)
return LiloImage(lines, "")
def _get_default(self):
for i in range(len(self.images)):
if self.images[i].title == self._default:
return i
return 0
def _set_default(self, val):
self._default = val
default = property(_get_default, _set_default)
commands = { "default": "default",
"timeout": "timeout",
"prompt": None,
"relocatable": None,
}
if __name__ == "__main__":
if len(sys.argv) < 2:
raise RuntimeError, "Need a lilo.conf to read"
g = LiloConfigFile(sys.argv[1])
for i in g.images:
print i #, i.title, i.root, i.kernel, i.args, i.initrd
print g.default
| gpl-2.0 |
RaoUmer/django | tests/regressiontests/utils/http.py | 41 | 6263 | from datetime import datetime
import sys
from django.http import HttpResponse, utils
from django.test import RequestFactory
from django.utils.datastructures import MultiValueDict
from django.utils import http
from django.utils import six
from django.utils import unittest
class TestUtilsHttp(unittest.TestCase):
def test_same_origin_true(self):
# Identical
self.assertTrue(http.same_origin('http://foo.com/', 'http://foo.com/'))
# One with trailing slash - see #15617
self.assertTrue(http.same_origin('http://foo.com', 'http://foo.com/'))
self.assertTrue(http.same_origin('http://foo.com/', 'http://foo.com'))
# With port
self.assertTrue(http.same_origin('https://foo.com:8000', 'https://foo.com:8000/'))
def test_same_origin_false(self):
# Different scheme
self.assertFalse(http.same_origin('http://foo.com', 'https://foo.com'))
# Different host
self.assertFalse(http.same_origin('http://foo.com', 'http://goo.com'))
# Different host again
self.assertFalse(http.same_origin('http://foo.com', 'http://foo.com.evil.com'))
# Different port
self.assertFalse(http.same_origin('http://foo.com:8000', 'http://foo.com:8001'))
def test_urlencode(self):
# 2-tuples (the norm)
result = http.urlencode((('a', 1), ('b', 2), ('c', 3)))
self.assertEqual(result, 'a=1&b=2&c=3')
# A dictionary
result = http.urlencode({ 'a': 1, 'b': 2, 'c': 3})
acceptable_results = [
# Need to allow all of these as dictionaries have to be treated as
# unordered
'a=1&b=2&c=3',
'a=1&c=3&b=2',
'b=2&a=1&c=3',
'b=2&c=3&a=1',
'c=3&a=1&b=2',
'c=3&b=2&a=1'
]
self.assertTrue(result in acceptable_results)
result = http.urlencode({'a': [1, 2]}, doseq=False)
self.assertEqual(result, 'a=%5B%271%27%2C+%272%27%5D')
result = http.urlencode({'a': [1, 2]}, doseq=True)
self.assertEqual(result, 'a=1&a=2')
result = http.urlencode({'a': []}, doseq=True)
self.assertEqual(result, '')
# A MultiValueDict
result = http.urlencode(MultiValueDict({
'name': ['Adrian', 'Simon'],
'position': ['Developer']
}), doseq=True)
acceptable_results = [
# MultiValueDicts are similarly unordered
'name=Adrian&name=Simon&position=Developer',
'position=Developer&name=Adrian&name=Simon'
]
self.assertTrue(result in acceptable_results)
def test_fix_IE_for_vary(self):
"""
Regression for #16632.
`fix_IE_for_vary` shouldn't crash when there's no Content-Type header.
"""
# functions to generate responses
def response_with_unsafe_content_type():
r = HttpResponse(content_type="text/unsafe")
r['Vary'] = 'Cookie'
return r
def no_content_response_with_unsafe_content_type():
# 'Content-Type' always defaulted, so delete it
r = response_with_unsafe_content_type()
del r['Content-Type']
return r
# request with & without IE user agent
rf = RequestFactory()
request = rf.get('/')
ie_request = rf.get('/', HTTP_USER_AGENT='MSIE')
# not IE, unsafe_content_type
response = response_with_unsafe_content_type()
utils.fix_IE_for_vary(request, response)
self.assertTrue('Vary' in response)
# IE, unsafe_content_type
response = response_with_unsafe_content_type()
utils.fix_IE_for_vary(ie_request, response)
self.assertFalse('Vary' in response)
# not IE, no_content
response = no_content_response_with_unsafe_content_type()
utils.fix_IE_for_vary(request, response)
self.assertTrue('Vary' in response)
# IE, no_content
response = no_content_response_with_unsafe_content_type()
utils.fix_IE_for_vary(ie_request, response)
self.assertFalse('Vary' in response)
def test_base36(self):
# reciprocity works
for n in [0, 1, 1000, 1000000]:
self.assertEqual(n, http.base36_to_int(http.int_to_base36(n)))
if not six.PY3:
self.assertEqual(sys.maxint, http.base36_to_int(http.int_to_base36(sys.maxint)))
# bad input
self.assertRaises(ValueError, http.int_to_base36, -1)
if not six.PY3:
self.assertRaises(ValueError, http.int_to_base36, sys.maxint + 1)
for n in ['1', 'foo', {1: 2}, (1, 2, 3), 3.141]:
self.assertRaises(TypeError, http.int_to_base36, n)
for n in ['#', ' ']:
self.assertRaises(ValueError, http.base36_to_int, n)
for n in [123, {1: 2}, (1, 2, 3), 3.141]:
self.assertRaises(TypeError, http.base36_to_int, n)
# more explicit output testing
for n, b36 in [(0, '0'), (1, '1'), (42, '16'), (818469960, 'django')]:
self.assertEqual(http.int_to_base36(n), b36)
self.assertEqual(http.base36_to_int(b36), n)
class ETagProcessingTests(unittest.TestCase):
def testParsing(self):
etags = http.parse_etags(r'"", "etag", "e\"t\"ag", "e\\tag", W/"weak"')
self.assertEqual(etags, ['', 'etag', 'e"t"ag', r'e\tag', 'weak'])
def testQuoting(self):
quoted_etag = http.quote_etag(r'e\t"ag')
self.assertEqual(quoted_etag, r'"e\\t\"ag"')
class HttpDateProcessingTests(unittest.TestCase):
def testParsingRfc1123(self):
parsed = http.parse_http_date('Sun, 06 Nov 1994 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed),
datetime(1994, 11, 6, 8, 49, 37))
def testParsingRfc850(self):
parsed = http.parse_http_date('Sunday, 06-Nov-94 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed),
datetime(1994, 11, 6, 8, 49, 37))
def testParsingAsctime(self):
parsed = http.parse_http_date('Sun Nov 6 08:49:37 1994')
self.assertEqual(datetime.utcfromtimestamp(parsed),
datetime(1994, 11, 6, 8, 49, 37))
| bsd-3-clause |
stevekuznetsov/ansible | lib/ansible/modules/notification/mqtt.py | 16 | 6860 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, 2014, Jan-Piet Mens <jpmens () gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: mqtt
short_description: Publish a message on an MQTT topic for the IoT
version_added: "1.2"
description:
- Publish a message on an MQTT topic.
options:
server:
description:
- MQTT broker address/name
required: false
default: localhost
port:
description:
- MQTT broker port number
required: false
default: 1883
username:
description:
- Username to authenticate against the broker.
required: false
password:
description:
- Password for C(username) to authenticate against the broker.
required: false
client_id:
description:
- MQTT client identifier
required: false
default: hostname + pid
topic:
description:
- MQTT topic name
required: true
default: null
payload:
description:
- Payload. The special string C("None") may be used to send a NULL
(i.e. empty) payload which is useful to simply notify with the I(topic)
or to clear previously retained messages.
required: true
default: null
qos:
description:
- QoS (Quality of Service)
required: false
default: 0
choices: [ "0", "1", "2" ]
retain:
description:
- Setting this flag causes the broker to retain (i.e. keep) the message so that
applications that subsequently subscribe to the topic can received the last
retained message immediately.
required: false
default: False
ca_certs:
description:
- The path to the Certificate Authority certificate files that are to be
treated as trusted by this client. If this is the only option given
then the client will operate in a similar manner to a web browser. That
is to say it will require the broker to have a certificate signed by the
Certificate Authorities in ca_certs and will communicate using TLS v1,
but will not attempt any form of authentication. This provides basic
network encryption but may not be sufficient depending on how the broker
is configured.
required: False
default: None
version_added: 2.3
certfile:
description:
- The path pointing to the PEM encoded client certificate. If this is not
None it will be used as client information for TLS based
authentication. Support for this feature is broker dependent.
required: False
default: None
version_added: 2.3
keyfile:
description:
- The path pointing to the PEM encoded client private key. If this is not
None it will be used as client information for TLS based
authentication. Support for this feature is broker dependent.
required: False
default: None
version_added: 2.3
# informational: requirements for nodes
requirements: [ mosquitto ]
notes:
- This module requires a connection to an MQTT broker such as Mosquitto
U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.python.org/pypi/paho-mqtt)).
author: "Jan-Piet Mens (@jpmens)"
'''
EXAMPLES = '''
- mqtt:
topic: 'service/ansible/{{ ansible_hostname }}'
payload: 'Hello at {{ ansible_date_time.iso8601 }}'
qos: 0
retain: False
client_id: ans001
delegate_to: localhost
'''
# ===========================================
# MQTT module support methods.
#
HAS_PAHOMQTT = True
try:
import socket
import paho.mqtt.publish as mqtt
except ImportError:
HAS_PAHOMQTT = False
# ===========================================
# Main
#
def main():
module = AnsibleModule(
argument_spec=dict(
server = dict(default = 'localhost'),
port = dict(default = 1883, type='int'),
topic = dict(required = True),
payload = dict(required = True),
client_id = dict(default = None),
qos = dict(default="0", choices=["0", "1", "2"]),
retain = dict(default=False, type='bool'),
username = dict(default = None),
password = dict(default = None, no_log=True),
ca_certs = dict(default = None, type='path'),
certfile = dict(default = None, type='path'),
keyfile = dict(default = None, type='path'),
),
supports_check_mode=True
)
if not HAS_PAHOMQTT:
module.fail_json(msg="Paho MQTT is not installed")
server = module.params.get("server", 'localhost')
port = module.params.get("port", 1883)
topic = module.params.get("topic")
payload = module.params.get("payload")
client_id = module.params.get("client_id", '')
qos = int(module.params.get("qos", 0))
retain = module.params.get("retain")
username = module.params.get("username", None)
password = module.params.get("password", None)
ca_certs = module.params.get("ca_certs", None)
certfile = module.params.get("certfile", None)
keyfile = module.params.get("keyfile", None)
if client_id is None:
client_id = "%s_%s" % (socket.getfqdn(), os.getpid())
if payload and payload == 'None':
payload = None
auth=None
if username is not None:
auth = { 'username' : username, 'password' : password }
tls=None
if ca_certs is not None:
tls = {'ca_certs': ca_certs, 'certfile': certfile,
'keyfile': keyfile}
try:
rc = mqtt.single(topic, payload,
qos=qos,
retain=retain,
client_id=client_id,
hostname=server,
port=port,
auth=auth,
tls=tls)
except Exception:
e = get_exception()
module.fail_json(msg="unable to publish to MQTT broker %s" % (e))
module.exit_json(changed=False, topic=topic)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main()
| gpl-3.0 |
Newman101/scipy | scipy/special/tests/test_basic.py | 17 | 132165 | # this program corresponds to special.py
### Means test is not done yet
# E Means test is giving error (E)
# F Means test is failing (F)
# EF Means test is giving error and Failing
#! Means test is segfaulting
# 8 Means test runs forever
### test_besselpoly
### test_mathieu_a
### test_mathieu_even_coef
### test_mathieu_odd_coef
### test_modfresnelp
### test_modfresnelm
# test_pbdv_seq
### test_pbvv_seq
### test_sph_harm
# test_sph_in
# test_sph_jn
# test_sph_kn
from __future__ import division, print_function, absolute_import
import itertools
import warnings
import numpy as np
from numpy import (array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp,
log, zeros, sqrt, asarray, inf, nan_to_num, real, arctan, float_)
from numpy.testing import (assert_equal, assert_almost_equal,
assert_array_equal, assert_array_almost_equal, assert_approx_equal,
assert_, dec, TestCase, run_module_suite, assert_allclose,
assert_raises, assert_array_almost_equal_nulp)
from scipy import special
import scipy.special._ufuncs as cephes
from scipy.special import ellipk, zeta
from scipy.special._testutils import assert_tol_equal, with_special_errors, \
assert_func_equal
from scipy._lib._version import NumpyVersion
import math
class TestCephes(TestCase):
def test_airy(self):
cephes.airy(0)
def test_airye(self):
cephes.airye(0)
def test_binom(self):
n = np.array([0.264, 4, 5.2, 17])
k = np.array([2, 0.4, 7, 3.3])
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
rknown = np.array([[-0.097152, 0.9263051596159367, 0.01858423645695389,
-0.007581020651518199],[6, 2.0214389119675666, 0, 2.9827344527963846],
[10.92, 2.22993515861399, -0.00585728, 10.468891352063146],
[136, 3.5252179590758828, 19448, 1024.5526916174495]])
assert_func_equal(cephes.binom, rknown.ravel(), nk, rtol=1e-13)
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.arange(-7, 30), 1000*np.random.rand(30) - 500]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_2(self):
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.logspace(1, 300, 20)]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_exact(self):
@np.vectorize
def binom_int(n, k):
n = int(n)
k = int(k)
num = int(1)
den = int(1)
for i in range(1, k+1):
num *= i + n - k
den *= i
return float(num/den)
np.random.seed(1234)
n = np.arange(1, 15)
k = np.arange(0, 15)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
nk = nk[nk[:,0] >= nk[:,1]]
assert_func_equal(cephes.binom,
binom_int(nk[:,0], nk[:,1]),
nk,
atol=0, rtol=0)
def test_bdtr(self):
assert_equal(cephes.bdtr(1,1,0.5),1.0)
def test_bdtri(self):
assert_equal(cephes.bdtri(1,3,0.5),0.5)
def test_bdtrc(self):
assert_equal(cephes.bdtrc(1,3,0.5),0.5)
def test_bdtrin(self):
assert_equal(cephes.bdtrin(1,0,1),5.0)
def test_bdtrik(self):
cephes.bdtrik(1,3,0.5)
def test_bei(self):
assert_equal(cephes.bei(0),0.0)
def test_beip(self):
assert_equal(cephes.beip(0),0.0)
def test_ber(self):
assert_equal(cephes.ber(0),1.0)
def test_berp(self):
assert_equal(cephes.berp(0),0.0)
def test_besselpoly(self):
assert_equal(cephes.besselpoly(0,0,0),1.0)
def test_beta(self):
assert_equal(cephes.beta(1,1),1.0)
assert_allclose(cephes.beta(-100.3, 1e-200), cephes.gamma(1e-200))
assert_allclose(cephes.beta(0.0342, 171), 24.070498359873497,
rtol=1e-13, atol=0)
def test_betainc(self):
assert_equal(cephes.betainc(1,1,1),1.0)
assert_allclose(cephes.betainc(0.0342, 171, 1e-10), 0.55269916901806648)
def test_betaln(self):
assert_equal(cephes.betaln(1,1),0.0)
assert_allclose(cephes.betaln(-100.3, 1e-200), cephes._gammaln(1e-200))
assert_allclose(cephes.betaln(0.0342, 170), 3.1811881124242447,
rtol=1e-14, atol=0)
def test_betaincinv(self):
assert_equal(cephes.betaincinv(1,1,1),1.0)
assert_allclose(cephes.betaincinv(0.0342, 171, 0.25),
8.4231316935498957e-21, rtol=3e-12, atol=0)
def test_beta_inf(self):
assert_(np.isinf(special.beta(-1, 2)))
def test_btdtr(self):
assert_equal(cephes.btdtr(1,1,1),1.0)
def test_btdtri(self):
assert_equal(cephes.btdtri(1,1,1),1.0)
def test_btdtria(self):
assert_equal(cephes.btdtria(1,1,1),5.0)
def test_btdtrib(self):
assert_equal(cephes.btdtrib(1,1,1),5.0)
def test_cbrt(self):
assert_approx_equal(cephes.cbrt(1),1.0)
def test_chdtr(self):
assert_equal(cephes.chdtr(1,0),0.0)
def test_chdtrc(self):
assert_equal(cephes.chdtrc(1,0),1.0)
def test_chdtri(self):
assert_equal(cephes.chdtri(1,1),0.0)
def test_chdtriv(self):
assert_equal(cephes.chdtriv(0,0),5.0)
def test_chndtr(self):
assert_equal(cephes.chndtr(0,1,0),0.0)
p = cephes.chndtr(np.linspace(20, 25, 5), 2, 1.07458615e+02)
assert_allclose(p, [1.21805009e-09, 2.81979982e-09, 6.25652736e-09,
1.33520017e-08, 2.74909967e-08],
rtol=1e-6, atol=0)
assert_almost_equal(cephes.chndtr(np.inf, np.inf, 0), 2.0)
assert_almost_equal(cephes.chndtr(2, 1, np.inf), 0.0)
assert_(np.isnan(cephes.chndtr(np.nan, 1, 2)))
assert_(np.isnan(cephes.chndtr(5, np.nan, 2)))
assert_(np.isnan(cephes.chndtr(5, 1, np.nan)))
def test_chndtridf(self):
assert_equal(cephes.chndtridf(0,0,1),5.0)
def test_chndtrinc(self):
assert_equal(cephes.chndtrinc(0,1,0),5.0)
def test_chndtrix(self):
assert_equal(cephes.chndtrix(0,1,0),0.0)
def test_cosdg(self):
assert_equal(cephes.cosdg(0),1.0)
def test_cosm1(self):
assert_equal(cephes.cosm1(0),0.0)
def test_cotdg(self):
assert_almost_equal(cephes.cotdg(45),1.0)
def test_dawsn(self):
assert_equal(cephes.dawsn(0),0.0)
assert_allclose(cephes.dawsn(1.23), 0.50053727749081767)
def test_diric(self):
# Test behavior near multiples of 2pi. Regression test for issue
# described in gh-4001.
n_odd = [1, 5, 25]
x = np.array(2*np.pi + 5e-5).astype(np.float32)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=7)
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
x = np.array(2*np.pi + 1e-15).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
if hasattr(np, 'float128'):
# No float128 available in 32-bit numpy
x = np.array(2*np.pi + 1e-12).astype(np.float128)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=19)
n_even = [2, 4, 24]
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_even), -1.0, decimal=15)
# Test at some values not near a multiple of pi
x = np.arange(0.2*np.pi, 1.0*np.pi, 0.2*np.pi)
octave_result = [0.872677996249965, 0.539344662916632,
0.127322003750035, -0.206011329583298]
assert_almost_equal(special.diric(x, 3), octave_result, decimal=15)
def test_diric_broadcasting(self):
x = np.arange(5)
n = np.array([1, 3, 7])
assert_(special.diric(x[:, np.newaxis], n).shape == (x.size, n.size))
def test_ellipe(self):
assert_equal(cephes.ellipe(1),1.0)
def test_ellipeinc(self):
assert_equal(cephes.ellipeinc(0,1),0.0)
def test_ellipj(self):
cephes.ellipj(0,1)
def test_ellipk(self):
assert_allclose(ellipk(0), pi/2)
def test_ellipkinc(self):
assert_equal(cephes.ellipkinc(0,0),0.0)
def test_erf(self):
assert_equal(cephes.erf(0),0.0)
def test_erfc(self):
assert_equal(cephes.erfc(0),1.0)
def test_exp1(self):
cephes.exp1(1)
def test_expi(self):
cephes.expi(1)
def test_expn(self):
cephes.expn(1,1)
def test_exp1_reg(self):
# Regression for #834
a = cephes.exp1(-complex(19.9999990))
b = cephes.exp1(-complex(19.9999991))
assert_array_almost_equal(a.imag, b.imag)
def test_exp10(self):
assert_approx_equal(cephes.exp10(2),100.0)
def test_exp2(self):
assert_equal(cephes.exp2(2),4.0)
def test_expm1(self):
assert_equal(cephes.expm1(0),0.0)
assert_equal(cephes.expm1(np.inf), np.inf)
assert_equal(cephes.expm1(-np.inf), -1)
assert_equal(cephes.expm1(np.nan), np.nan)
# Earlier numpy version don't guarantee that npy_cexp conforms to C99.
@dec.skipif(NumpyVersion(np.__version__) < '1.9.0')
def test_expm1_complex(self):
expm1 = cephes.expm1
assert_equal(expm1(0 + 0j), 0 + 0j)
assert_equal(expm1(complex(np.inf, 0)), complex(np.inf, 0))
assert_equal(expm1(complex(np.inf, 1)), complex(np.inf, np.inf))
assert_equal(expm1(complex(np.inf, 2)), complex(-np.inf, np.inf))
assert_equal(expm1(complex(np.inf, 4)), complex(-np.inf, -np.inf))
assert_equal(expm1(complex(np.inf, 5)), complex(np.inf, -np.inf))
assert_equal(expm1(complex(1, np.inf)), complex(np.nan, np.nan))
assert_equal(expm1(complex(0, np.inf)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.inf, np.inf)), complex(np.inf, np.nan))
assert_equal(expm1(complex(-np.inf, np.inf)), complex(-1, 0))
assert_equal(expm1(complex(-np.inf, np.nan)), complex(-1, 0))
assert_equal(expm1(complex(np.inf, np.nan)), complex(np.inf, np.nan))
assert_equal(expm1(complex(0, np.nan)), complex(np.nan, np.nan))
assert_equal(expm1(complex(1, np.nan)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.nan, 1)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.nan, np.nan)), complex(np.nan, np.nan))
@dec.knownfailureif(True, 'The real part of expm1(z) bad at these points')
def test_expm1_complex_hard(self):
# The real part of this function is difficult to evaluate when
# z.real = -log(cos(z.imag)).
y = np.array([0.1, 0.2, 0.3, 5, 11, 20])
x = -np.log(np.cos(y))
z = x + 1j*y
# evaluate using mpmath.expm1 with dps=1000
expected = np.array([-5.5507901846769623e-17+0.10033467208545054j,
2.4289354732893695e-18+0.20271003550867248j,
4.5235500262585768e-17+0.30933624960962319j,
7.8234305217489006e-17-3.3805150062465863j,
-1.3685191953697676e-16-225.95084645419513j,
8.7175620481291045e-17+2.2371609442247422j])
found = cephes.expm1(z)
# this passes.
assert_array_almost_equal_nulp(found.imag, expected.imag, 3)
# this fails.
assert_array_almost_equal_nulp(found.real, expected.real, 20)
def test_fdtr(self):
assert_equal(cephes.fdtr(1,1,0),0.0)
def test_fdtrc(self):
assert_equal(cephes.fdtrc(1,1,0),1.0)
def test_fdtri(self):
# cephes.fdtri(1,1,0.5) #BUG: gives NaN, should be 1
assert_allclose(cephes.fdtri(1, 1, [0.499, 0.501]),
array([0.9937365, 1.00630298]), rtol=1e-6)
def test_fdtridfd(self):
assert_equal(cephes.fdtridfd(1,0,0),5.0)
def test_fresnel(self):
assert_equal(cephes.fresnel(0),(0.0,0.0))
def test_gamma(self):
assert_equal(cephes.gamma(5),24.0)
def test_gammainc(self):
assert_equal(cephes.gammainc(5,0),0.0)
def test_gammaincc(self):
assert_equal(cephes.gammaincc(5,0),1.0)
def test_gammainccinv(self):
assert_equal(cephes.gammainccinv(5,1),0.0)
def test_gammaln(self):
cephes._gammaln(10)
def test_gammasgn(self):
vals = np.array([-4, -3.5, -2.3, 1, 4.2], np.float64)
assert_array_equal(cephes.gammasgn(vals), np.sign(cephes.rgamma(vals)))
def test_gdtr(self):
assert_equal(cephes.gdtr(1,1,0),0.0)
def test_gdtr_inf(self):
assert_equal(cephes.gdtr(1,1,np.inf),1.0)
def test_gdtrc(self):
assert_equal(cephes.gdtrc(1,1,0),1.0)
def test_gdtria(self):
assert_equal(cephes.gdtria(0,1,1),0.0)
def test_gdtrib(self):
cephes.gdtrib(1,0,1)
# assert_equal(cephes.gdtrib(1,0,1),5.0)
def test_gdtrix(self):
cephes.gdtrix(1,1,.1)
def test_hankel1(self):
cephes.hankel1(1,1)
def test_hankel1e(self):
cephes.hankel1e(1,1)
def test_hankel2(self):
cephes.hankel2(1,1)
def test_hankel2e(self):
cephes.hankel2e(1,1)
def test_hyp1f1(self):
assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0))
assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095)
cephes.hyp1f1(1,1,1)
def test_hyp1f2(self):
cephes.hyp1f2(1,1,1,1)
def test_hyp2f0(self):
cephes.hyp2f0(1,1,1,1)
def test_hyp2f1(self):
assert_equal(cephes.hyp2f1(1,1,1,0),1.0)
def test_hyp3f0(self):
assert_equal(cephes.hyp3f0(1,1,1,0),(1.0,0.0))
def test_hyperu(self):
assert_equal(cephes.hyperu(0,1,1),1.0)
def test_i0(self):
assert_equal(cephes.i0(0),1.0)
def test_i0e(self):
assert_equal(cephes.i0e(0),1.0)
def test_i1(self):
assert_equal(cephes.i1(0),0.0)
def test_i1e(self):
assert_equal(cephes.i1e(0),0.0)
def test_it2i0k0(self):
cephes.it2i0k0(1)
def test_it2j0y0(self):
cephes.it2j0y0(1)
def test_it2struve0(self):
cephes.it2struve0(1)
def test_itairy(self):
cephes.itairy(1)
def test_iti0k0(self):
assert_equal(cephes.iti0k0(0),(0.0,0.0))
def test_itj0y0(self):
assert_equal(cephes.itj0y0(0),(0.0,0.0))
def test_itmodstruve0(self):
assert_equal(cephes.itmodstruve0(0),0.0)
def test_itstruve0(self):
assert_equal(cephes.itstruve0(0),0.0)
def test_iv(self):
assert_equal(cephes.iv(1,0),0.0)
def _check_ive(self):
assert_equal(cephes.ive(1,0),0.0)
def test_j0(self):
assert_equal(cephes.j0(0),1.0)
def test_j1(self):
assert_equal(cephes.j1(0),0.0)
def test_jn(self):
assert_equal(cephes.jn(0,0),1.0)
def test_jv(self):
assert_equal(cephes.jv(0,0),1.0)
def _check_jve(self):
assert_equal(cephes.jve(0,0),1.0)
def test_k0(self):
cephes.k0(2)
def test_k0e(self):
cephes.k0e(2)
def test_k1(self):
cephes.k1(2)
def test_k1e(self):
cephes.k1e(2)
def test_kei(self):
cephes.kei(2)
def test_keip(self):
assert_equal(cephes.keip(0),0.0)
def test_ker(self):
cephes.ker(2)
def test_kerp(self):
cephes.kerp(2)
def _check_kelvin(self):
cephes.kelvin(2)
def test_kn(self):
cephes.kn(1,1)
def test_kolmogi(self):
assert_equal(cephes.kolmogi(1),0.0)
assert_(np.isnan(cephes.kolmogi(np.nan)))
def test_kolmogorov(self):
assert_equal(cephes.kolmogorov(0),1.0)
def _check_kv(self):
cephes.kv(1,1)
def _check_kve(self):
cephes.kve(1,1)
def test_log1p(self):
log1p = cephes.log1p
assert_equal(log1p(0), 0.0)
assert_equal(log1p(-1), -np.inf)
assert_equal(log1p(-2), np.nan)
assert_equal(log1p(np.inf), np.inf)
# earlier numpy version don't guarantee that npy_clog conforms to C99
@dec.skipif(NumpyVersion(np.__version__) < '1.9.0')
def test_log1p_complex(self):
log1p = cephes.log1p
c = complex
assert_equal(log1p(0 + 0j), 0 + 0j)
assert_equal(log1p(c(-1, 0)), c(-np.inf, 0))
assert_allclose(log1p(c(1, np.inf)), c(np.inf, np.pi/2))
assert_equal(log1p(c(1, np.nan)), c(np.nan, np.nan))
assert_allclose(log1p(c(-np.inf, 1)), c(np.inf, np.pi))
assert_equal(log1p(c(np.inf, 1)), c(np.inf, 0))
assert_allclose(log1p(c(-np.inf, np.inf)), c(np.inf, 3*np.pi/4))
assert_allclose(log1p(c(np.inf, np.inf)), c(np.inf, np.pi/4))
assert_equal(log1p(c(np.inf, np.nan)), c(np.inf, np.nan))
assert_equal(log1p(c(-np.inf, np.nan)), c(np.inf, np.nan))
assert_equal(log1p(c(np.nan, np.inf)), c(np.inf, np.nan))
assert_equal(log1p(c(np.nan, 1)), c(np.nan, np.nan))
assert_equal(log1p(c(np.nan, np.nan)), c(np.nan, np.nan))
def test_lpmv(self):
assert_equal(cephes.lpmv(0,0,1),1.0)
def test_mathieu_a(self):
assert_equal(cephes.mathieu_a(1,0),1.0)
def test_mathieu_b(self):
assert_equal(cephes.mathieu_b(1,0),1.0)
def test_mathieu_cem(self):
assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0))
# Test AMS 20.2.27
@np.vectorize
def ce_smallq(m, q, z):
z *= np.pi/180
if m == 0:
return 2**(-0.5) * (1 - .5*q*cos(2*z)) # + O(q^2)
elif m == 1:
return cos(z) - q/8 * cos(3*z) # + O(q^2)
elif m == 2:
return cos(2*z) - q*(cos(4*z)/12 - 1/4) # + O(q^2)
else:
return cos(m*z) - q*(cos((m+2)*z)/(4*(m+1)) - cos((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(0, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_cem(m[:,None], q[None,:], 0.123)[0],
ce_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_sem(self):
assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0))
# Test AMS 20.2.27
@np.vectorize
def se_smallq(m, q, z):
z *= np.pi/180
if m == 1:
return sin(z) - q/8 * sin(3*z) # + O(q^2)
elif m == 2:
return sin(2*z) - q*sin(4*z)/12 # + O(q^2)
else:
return sin(m*z) - q*(sin((m+2)*z)/(4*(m+1)) - sin((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(1, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_sem(m[:,None], q[None,:], 0.123)[0],
se_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_modcem1(self):
assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0))
def test_mathieu_modcem2(self):
cephes.mathieu_modcem2(1,1,1)
# Test reflection relation AMS 20.6.19
m = np.arange(0, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modcem2(m, q, -z)[0]
fr = -cephes.mathieu_modcem2(m, q, 0)[0] / cephes.mathieu_modcem1(m, q, 0)[0]
y2 = -cephes.mathieu_modcem2(m, q, z)[0] - 2*fr*cephes.mathieu_modcem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_modsem1(self):
assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0))
def test_mathieu_modsem2(self):
cephes.mathieu_modsem2(1,1,1)
# Test reflection relation AMS 20.6.20
m = np.arange(1, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modsem2(m, q, -z)[0]
fr = cephes.mathieu_modsem2(m, q, 0)[1] / cephes.mathieu_modsem1(m, q, 0)[1]
y2 = cephes.mathieu_modsem2(m, q, z)[0] - 2*fr*cephes.mathieu_modsem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_overflow(self):
# Check that these return NaNs instead of causing a SEGV
assert_equal(cephes.mathieu_cem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_cem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem2(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem2(10000, 1.5, 1.3), (np.nan, np.nan))
def test_mathieu_ticket_1847(self):
# Regression test --- this call had some out-of-bounds access
# and could return nan occasionally
for k in range(60):
v = cephes.mathieu_modsem2(2, 100, -1)
# Values from ACM TOMS 804 (derivate by numerical differentiation)
assert_allclose(v[0], 0.1431742913063671074347, rtol=1e-10)
assert_allclose(v[1], 0.9017807375832909144719, rtol=1e-4)
def test_modfresnelm(self):
cephes.modfresnelm(0)
def test_modfresnelp(self):
cephes.modfresnelp(0)
def _check_modstruve(self):
assert_equal(cephes.modstruve(1,0),0.0)
def test_nbdtr(self):
assert_equal(cephes.nbdtr(1,1,1),1.0)
def test_nbdtrc(self):
assert_equal(cephes.nbdtrc(1,1,1),0.0)
def test_nbdtri(self):
assert_equal(cephes.nbdtri(1,1,1),1.0)
def __check_nbdtrik(self):
cephes.nbdtrik(1,.4,.5)
def test_nbdtrin(self):
assert_equal(cephes.nbdtrin(1,0,0),5.0)
def test_ncfdtr(self):
assert_equal(cephes.ncfdtr(1,1,1,0),0.0)
def test_ncfdtri(self):
assert_equal(cephes.ncfdtri(1,1,1,0),0.0)
def test_ncfdtridfd(self):
cephes.ncfdtridfd(1,0.5,0,1)
def __check_ncfdtridfn(self):
cephes.ncfdtridfn(1,0.5,0,1)
def __check_ncfdtrinc(self):
cephes.ncfdtrinc(1,0.5,0,1)
def test_nctdtr(self):
assert_equal(cephes.nctdtr(1,0,0),0.5)
assert_equal(cephes.nctdtr(9, 65536, 45), 0.0)
assert_approx_equal(cephes.nctdtr(np.inf, 1., 1.), 0.5, 5)
assert_(np.isnan(cephes.nctdtr(2., np.inf, 10.)))
assert_approx_equal(cephes.nctdtr(2., 1., np.inf), 1.)
assert_(np.isnan(cephes.nctdtr(np.nan, 1., 1.)))
assert_(np.isnan(cephes.nctdtr(2., np.nan, 1.)))
assert_(np.isnan(cephes.nctdtr(2., 1., np.nan)))
def __check_nctdtridf(self):
cephes.nctdtridf(1,0.5,0)
def test_nctdtrinc(self):
cephes.nctdtrinc(1,0,0)
def test_nctdtrit(self):
cephes.nctdtrit(.1,0.2,.5)
def test_ndtr(self):
assert_equal(cephes.ndtr(0), 0.5)
assert_almost_equal(cephes.ndtr(1), 0.84134474606)
def test_ndtri(self):
assert_equal(cephes.ndtri(0.5),0.0)
def test_nrdtrimn(self):
assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)
def test_nrdtrisd(self):
assert_tol_equal(cephes.nrdtrisd(0.5,0.5,0.5), 0.0,
atol=0, rtol=0)
def test_obl_ang1(self):
cephes.obl_ang1(1,1,1,0)
def test_obl_ang1_cv(self):
result = cephes.obl_ang1_cv(1,1,1,1,0)
assert_almost_equal(result[0],1.0)
assert_almost_equal(result[1],0.0)
def _check_obl_cv(self):
assert_equal(cephes.obl_cv(1,1,0),2.0)
def test_obl_rad1(self):
cephes.obl_rad1(1,1,1,0)
def test_obl_rad1_cv(self):
cephes.obl_rad1_cv(1,1,1,1,0)
def test_obl_rad2(self):
cephes.obl_rad2(1,1,1,0)
def test_obl_rad2_cv(self):
cephes.obl_rad2_cv(1,1,1,1,0)
def test_pbdv(self):
assert_equal(cephes.pbdv(1,0),(0.0,1.0))
def test_pbvv(self):
cephes.pbvv(1,0)
def test_pbwa(self):
cephes.pbwa(1,0)
def test_pdtr(self):
val = cephes.pdtr(0, 1)
assert_almost_equal(val, np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtr([0, 1, 2], 0.0)
assert_array_equal(val, [1, 1, 1])
def test_pdtrc(self):
val = cephes.pdtrc(0, 1)
assert_almost_equal(val, 1 - np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtrc([0, 1, 2], 0.0)
assert_array_equal(val, [0, 0, 0])
def test_pdtri(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
cephes.pdtri(0.5,0.5)
def test_pdtrik(self):
k = cephes.pdtrik(0.5, 1)
assert_almost_equal(cephes.gammaincc(k + 1, 1), 0.5)
# Edge case: m = 0 or very small.
k = cephes.pdtrik([[0], [0.25], [0.95]], [0, 1e-20, 1e-6])
assert_array_equal(k, np.zeros((3, 3)))
def test_pro_ang1(self):
cephes.pro_ang1(1,1,1,0)
def test_pro_ang1_cv(self):
assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0),
array((1.0,0.0)))
def _check_pro_cv(self):
assert_equal(cephes.pro_cv(1,1,0),2.0)
def test_pro_rad1(self):
cephes.pro_rad1(1,1,1,0.1)
def test_pro_rad1_cv(self):
cephes.pro_rad1_cv(1,1,1,1,0)
def test_pro_rad2(self):
cephes.pro_rad2(1,1,1,0)
def test_pro_rad2_cv(self):
cephes.pro_rad2_cv(1,1,1,1,0)
def test_psi(self):
cephes.psi(1)
def test_radian(self):
assert_equal(cephes.radian(0,0,0),0)
def test_rgamma(self):
assert_equal(cephes.rgamma(1),1.0)
def test_round(self):
assert_equal(cephes.round(3.4),3.0)
assert_equal(cephes.round(-3.4),-3.0)
assert_equal(cephes.round(3.6),4.0)
assert_equal(cephes.round(-3.6),-4.0)
assert_equal(cephes.round(3.5),4.0)
assert_equal(cephes.round(-3.5),-4.0)
def test_shichi(self):
cephes.shichi(1)
def test_sici(self):
cephes.sici(1)
s, c = cephes.sici(np.inf)
assert_almost_equal(s, np.pi * 0.5)
assert_almost_equal(c, 0)
s, c = cephes.sici(-np.inf)
assert_almost_equal(s, -np.pi * 0.5)
assert_(np.isnan(c), "cosine integral(-inf) is not nan")
def test_sindg(self):
assert_equal(cephes.sindg(90),1.0)
def test_smirnov(self):
assert_equal(cephes.smirnov(1,.1),0.9)
assert_(np.isnan(cephes.smirnov(1,np.nan)))
def test_smirnovi(self):
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4)
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6)
assert_(np.isnan(cephes.smirnovi(1,np.nan)))
def test_spence(self):
assert_equal(cephes.spence(1),0.0)
def test_stdtr(self):
assert_equal(cephes.stdtr(1,0),0.5)
assert_almost_equal(cephes.stdtr(1,1), 0.75)
assert_almost_equal(cephes.stdtr(1,2), 0.852416382349)
def test_stdtridf(self):
cephes.stdtridf(0.7,1)
def test_stdtrit(self):
cephes.stdtrit(1,0.7)
def test_struve(self):
assert_equal(cephes.struve(0,0),0.0)
def test_tandg(self):
assert_equal(cephes.tandg(45),1.0)
def test_tklmbda(self):
assert_almost_equal(cephes.tklmbda(1,1),1.0)
def test_y0(self):
cephes.y0(1)
def test_y1(self):
cephes.y1(1)
def test_yn(self):
cephes.yn(1,1)
def test_yv(self):
cephes.yv(1,1)
def _check_yve(self):
cephes.yve(1,1)
def test_zeta(self):
assert_allclose(zeta(2,2), pi**2/6 - 1, rtol=1e-12)
def test_zetac(self):
assert_equal(cephes.zetac(0),-1.5)
def test_zeta_1arg(self):
assert_allclose(zeta(2), pi**2/6, rtol=1e-12)
assert_allclose(zeta(4), pi**4/90, rtol=1e-12)
def test_wofz(self):
z = [complex(624.2,-0.26123), complex(-0.4,3.), complex(0.6,2.),
complex(-1.,1.), complex(-1.,-9.), complex(-1.,9.),
complex(-0.0000000234545,1.1234), complex(-3.,5.1),
complex(-53,30.1), complex(0.0,0.12345),
complex(11,1), complex(-22,-2), complex(9,-28),
complex(21,-33), complex(1e5,1e5), complex(1e14,1e14)
]
w = [
complex(-3.78270245518980507452677445620103199303131110e-7,
0.000903861276433172057331093754199933411710053155),
complex(0.1764906227004816847297495349730234591778719532788,
-0.02146550539468457616788719893991501311573031095617),
complex(0.2410250715772692146133539023007113781272362309451,
0.06087579663428089745895459735240964093522265589350),
complex(0.30474420525691259245713884106959496013413834051768,
-0.20821893820283162728743734725471561394145872072738),
complex(7.317131068972378096865595229600561710140617977e34,
8.321873499714402777186848353320412813066170427e34),
complex(0.0615698507236323685519612934241429530190806818395,
-0.00676005783716575013073036218018565206070072304635),
complex(0.3960793007699874918961319170187598400134746631,
-5.593152259116644920546186222529802777409274656e-9),
complex(0.08217199226739447943295069917990417630675021771804,
-0.04701291087643609891018366143118110965272615832184),
complex(0.00457246000350281640952328010227885008541748668738,
-0.00804900791411691821818731763401840373998654987934),
complex(0.8746342859608052666092782112565360755791467973338452,
0.),
complex(0.00468190164965444174367477874864366058339647648741,
0.0510735563901306197993676329845149741675029197050),
complex(-0.0023193175200187620902125853834909543869428763219,
-0.025460054739731556004902057663500272721780776336),
complex(9.11463368405637174660562096516414499772662584e304,
3.97101807145263333769664875189354358563218932e305),
complex(-4.4927207857715598976165541011143706155432296e281,
-2.8019591213423077494444700357168707775769028e281),
complex(2.820947917809305132678577516325951485807107151e-6,
2.820947917668257736791638444590253942253354058e-6),
complex(2.82094791773878143474039725787438662716372268e-15,
2.82094791773878143474039725773333923127678361e-15)
]
assert_func_equal(cephes.wofz, w, z, rtol=1e-13)
class TestAiry(TestCase):
def test_airy(self):
# This tests the airy function to ensure 8 place accuracy in computation
x = special.airy(.99)
assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8)
x = special.airy(.41)
assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8)
x = special.airy(-.36)
assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8)
def test_airye(self):
a = special.airye(0.01)
b = special.airy(0.01)
b1 = [None]*4
for n in range(2):
b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01))
for n in range(2,4):
b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01))))
assert_array_almost_equal(a,b1,6)
def test_bi_zeros(self):
bi = special.bi_zeros(2)
bia = (array([-1.17371322, -3.2710930]),
array([-2.29443968, -4.07315509]),
array([-0.45494438, 0.39652284]),
array([0.60195789, -0.76031014]))
assert_array_almost_equal(bi,bia,4)
bi = special.bi_zeros(5)
assert_array_almost_equal(bi[0],array([-1.173713222709127,
-3.271093302836352,
-4.830737841662016,
-6.169852128310251,
-7.376762079367764]),11)
assert_array_almost_equal(bi[1],array([-2.294439682614122,
-4.073155089071828,
-5.512395729663599,
-6.781294445990305,
-7.940178689168587]),10)
assert_array_almost_equal(bi[2],array([-0.454944383639657,
0.396522836094465,
-0.367969161486959,
0.349499116831805,
-0.336026240133662]),11)
assert_array_almost_equal(bi[3],array([0.601957887976239,
-0.760310141492801,
0.836991012619261,
-0.88947990142654,
0.929983638568022]),10)
def test_ai_zeros(self):
ai = special.ai_zeros(1)
assert_array_almost_equal(ai,(array([-2.33810741]),
array([-1.01879297]),
array([0.5357]),
array([0.7012])),4)
def test_ai_zeros_big(self):
z, zp, ai_zpx, aip_zx = special.ai_zeros(50000)
ai_z, aip_z, _, _ = special.airy(z)
ai_zp, aip_zp, _, _ = special.airy(zp)
ai_envelope = 1/abs(z)**(1./4)
aip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(ai_zpx, ai_zp, rtol=1e-10)
assert_allclose(aip_zx, aip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(ai_z/ai_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(aip_zp/aip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.1
assert_allclose(z[:6],
[-2.3381074105, -4.0879494441, -5.5205598281,
-6.7867080901, -7.9441335871, -9.0226508533], rtol=1e-10)
assert_allclose(zp[:6],
[-1.0187929716, -3.2481975822, -4.8200992112,
-6.1633073556, -7.3721772550, -8.4884867340], rtol=1e-10)
def test_bi_zeros_big(self):
z, zp, bi_zpx, bip_zx = special.bi_zeros(50000)
_, _, bi_z, bip_z = special.airy(z)
_, _, bi_zp, bip_zp = special.airy(zp)
bi_envelope = 1/abs(z)**(1./4)
bip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(bi_zpx, bi_zp, rtol=1e-10)
assert_allclose(bip_zx, bip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(bi_z/bi_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(bip_zp/bip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.2
assert_allclose(z[:6],
[-1.1737132227, -3.2710933028, -4.8307378417,
-6.1698521283, -7.3767620794, -8.4919488465], rtol=1e-10)
assert_allclose(zp[:6],
[-2.2944396826, -4.0731550891, -5.5123957297,
-6.7812944460, -7.9401786892, -9.0195833588], rtol=1e-10)
class TestAssocLaguerre(TestCase):
def test_assoc_laguerre(self):
a1 = special.genlaguerre(11,1)
a2 = special.assoc_laguerre(.2,11,1)
assert_array_almost_equal(a2,a1(.2),8)
a2 = special.assoc_laguerre(1,11,1)
assert_array_almost_equal(a2,a1(1),8)
class TestBesselpoly(TestCase):
def test_besselpoly(self):
pass
class TestKelvin(TestCase):
def test_bei(self):
mbei = special.bei(2)
assert_almost_equal(mbei, 0.9722916273066613,5) # this may not be exact
def test_beip(self):
mbeip = special.beip(2)
assert_almost_equal(mbeip,0.91701361338403631,5) # this may not be exact
def test_ber(self):
mber = special.ber(2)
assert_almost_equal(mber,0.75173418271380821,5) # this may not be exact
def test_berp(self):
mberp = special.berp(2)
assert_almost_equal(mberp,-0.49306712470943909,5) # this may not be exact
def test_bei_zeros(self):
# Abramowitz & Stegun, Table 9.12
bi = special.bei_zeros(5)
assert_array_almost_equal(bi,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
def test_beip_zeros(self):
bip = special.beip_zeros(5)
assert_array_almost_equal(bip,array([3.772673304934953,
8.280987849760042,
12.742147523633703,
17.193431752512542,
21.641143941167325]),8)
def test_ber_zeros(self):
ber = special.ber_zeros(5)
assert_array_almost_equal(ber,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
def test_berp_zeros(self):
brp = special.berp_zeros(5)
assert_array_almost_equal(brp,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
def test_kelvin(self):
mkelv = special.kelvin(2)
assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j,
special.ker(2) + special.kei(2)*1j,
special.berp(2) + special.beip(2)*1j,
special.kerp(2) + special.keip(2)*1j),8)
def test_kei(self):
mkei = special.kei(2)
assert_almost_equal(mkei,-0.20240006776470432,5)
def test_keip(self):
mkeip = special.keip(2)
assert_almost_equal(mkeip,0.21980790991960536,5)
def test_ker(self):
mker = special.ker(2)
assert_almost_equal(mker,-0.041664513991509472,5)
def test_kerp(self):
mkerp = special.kerp(2)
assert_almost_equal(mkerp,-0.10660096588105264,5)
def test_kei_zeros(self):
kei = special.kei_zeros(5)
assert_array_almost_equal(kei,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
def test_keip_zeros(self):
keip = special.keip_zeros(5)
assert_array_almost_equal(keip,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
# numbers come from 9.9 of A&S pg. 381
def test_kelvin_zeros(self):
tmp = special.kelvin_zeros(5)
berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp
assert_array_almost_equal(berz,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
assert_array_almost_equal(beiz,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
assert_array_almost_equal(kerz,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44382]),4)
assert_array_almost_equal(keiz,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
assert_array_almost_equal(berpz,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
assert_array_almost_equal(beipz,array([3.77267,
# table from 1927 had 3.77320
# but this is more accurate
8.28099,
12.74215,
17.19343,
21.64114]),4)
assert_array_almost_equal(kerpz,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
assert_array_almost_equal(keipz,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
def test_ker_zeros(self):
ker = special.ker_zeros(5)
assert_array_almost_equal(ker,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44381]),4)
def test_kerp_zeros(self):
kerp = special.kerp_zeros(5)
assert_array_almost_equal(kerp,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
class TestBernoulli(TestCase):
def test_bernoulli(self):
brn = special.bernoulli(5)
assert_array_almost_equal(brn,array([1.0000,
-0.5000,
0.1667,
0.0000,
-0.0333,
0.0000]),4)
class TestBeta(TestCase):
def test_beta(self):
bet = special.beta(2,4)
betg = (special.gamma(2)*special.gamma(4))/special.gamma(6)
assert_almost_equal(bet,betg,8)
def test_betaln(self):
betln = special.betaln(2,4)
bet = log(abs(special.beta(2,4)))
assert_almost_equal(betln,bet,8)
def test_betainc(self):
btinc = special.betainc(1,1,.2)
assert_almost_equal(btinc,0.2,8)
def test_betaincinv(self):
y = special.betaincinv(2,4,.5)
comp = special.betainc(2,4,y)
assert_almost_equal(comp,.5,5)
class TestCombinatorics(TestCase):
def test_comb(self):
assert_array_almost_equal(special.comb([10, 10], [3, 4]), [120., 210.])
assert_almost_equal(special.comb(10, 3), 120.)
assert_equal(special.comb(10, 3, exact=True), 120)
assert_equal(special.comb(10, 3, exact=True, repetition=True), 220)
assert_allclose([special.comb(20, k, exact=True) for k in range(21)],
special.comb(20, list(range(21))), atol=1e-15)
ii = np.iinfo(int).max + 1
assert_equal(special.comb(ii, ii-1, exact=True), ii)
expected = 100891344545564193334812497256
assert_equal(special.comb(100, 50, exact=True), expected)
def test_comb_with_np_int64(self):
n = 70
k = 30
np_n = np.int64(n)
np_k = np.int64(k)
assert_equal(special.comb(np_n, np_k, exact=True),
special.comb(n, k, exact=True))
def test_comb_zeros(self):
assert_equal(special.comb(2, 3, exact=True), 0)
assert_equal(special.comb(-1, 3, exact=True), 0)
assert_equal(special.comb(2, -1, exact=True), 0)
assert_equal(special.comb(2, -1, exact=False), 0)
assert_array_almost_equal(special.comb([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 120.])
def test_perm(self):
assert_array_almost_equal(special.perm([10, 10], [3, 4]), [720., 5040.])
assert_almost_equal(special.perm(10, 3), 720.)
assert_equal(special.perm(10, 3, exact=True), 720)
def test_perm_zeros(self):
assert_equal(special.perm(2, 3, exact=True), 0)
assert_equal(special.perm(-1, 3, exact=True), 0)
assert_equal(special.perm(2, -1, exact=True), 0)
assert_equal(special.perm(2, -1, exact=False), 0)
assert_array_almost_equal(special.perm([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 720.])
class TestTrigonometric(TestCase):
def test_cbrt(self):
cb = special.cbrt(27)
cbrl = 27**(1.0/3.0)
assert_approx_equal(cb,cbrl)
def test_cbrtmore(self):
cb1 = special.cbrt(27.9)
cbrl1 = 27.9**(1.0/3.0)
assert_almost_equal(cb1,cbrl1,8)
def test_cosdg(self):
cdg = special.cosdg(90)
cdgrl = cos(pi/2.0)
assert_almost_equal(cdg,cdgrl,8)
def test_cosdgmore(self):
cdgm = special.cosdg(30)
cdgmrl = cos(pi/6.0)
assert_almost_equal(cdgm,cdgmrl,8)
def test_cosm1(self):
cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10))
csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1)
assert_array_almost_equal(cs,csrl,8)
def test_cotdg(self):
ct = special.cotdg(30)
ctrl = tan(pi/6.0)**(-1)
assert_almost_equal(ct,ctrl,8)
def test_cotdgmore(self):
ct1 = special.cotdg(45)
ctrl1 = tan(pi/4.0)**(-1)
assert_almost_equal(ct1,ctrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.cotdg(45), 1.0, 14)
assert_almost_equal(special.cotdg(-45), -1.0, 14)
assert_almost_equal(special.cotdg(90), 0.0, 14)
assert_almost_equal(special.cotdg(-90), 0.0, 14)
assert_almost_equal(special.cotdg(135), -1.0, 14)
assert_almost_equal(special.cotdg(-135), 1.0, 14)
assert_almost_equal(special.cotdg(225), 1.0, 14)
assert_almost_equal(special.cotdg(-225), -1.0, 14)
assert_almost_equal(special.cotdg(270), 0.0, 14)
assert_almost_equal(special.cotdg(-270), 0.0, 14)
assert_almost_equal(special.cotdg(315), -1.0, 14)
assert_almost_equal(special.cotdg(-315), 1.0, 14)
assert_almost_equal(special.cotdg(765), 1.0, 14)
def test_sinc(self):
# the sinc implementation and more extensive sinc tests are in numpy
assert_array_equal(special.sinc([0]), 1)
assert_equal(special.sinc(0.0), 1.0)
def test_sindg(self):
sn = special.sindg(90)
assert_equal(sn,1.0)
def test_sindgmore(self):
snm = special.sindg(30)
snmrl = sin(pi/6.0)
assert_almost_equal(snm,snmrl,8)
snm1 = special.sindg(45)
snmrl1 = sin(pi/4.0)
assert_almost_equal(snm1,snmrl1,8)
class TestTandg(TestCase):
def test_tandg(self):
tn = special.tandg(30)
tnrl = tan(pi/6.0)
assert_almost_equal(tn,tnrl,8)
def test_tandgmore(self):
tnm = special.tandg(45)
tnmrl = tan(pi/4.0)
assert_almost_equal(tnm,tnmrl,8)
tnm1 = special.tandg(60)
tnmrl1 = tan(pi/3.0)
assert_almost_equal(tnm1,tnmrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.tandg(0), 0.0, 14)
assert_almost_equal(special.tandg(45), 1.0, 14)
assert_almost_equal(special.tandg(-45), -1.0, 14)
assert_almost_equal(special.tandg(135), -1.0, 14)
assert_almost_equal(special.tandg(-135), 1.0, 14)
assert_almost_equal(special.tandg(180), 0.0, 14)
assert_almost_equal(special.tandg(-180), 0.0, 14)
assert_almost_equal(special.tandg(225), 1.0, 14)
assert_almost_equal(special.tandg(-225), -1.0, 14)
assert_almost_equal(special.tandg(315), -1.0, 14)
assert_almost_equal(special.tandg(-315), 1.0, 14)
class TestEllip(TestCase):
def test_ellipj_nan(self):
"""Regression test for #912."""
special.ellipj(0.5, np.nan)
def test_ellipj(self):
el = special.ellipj(0.2,0)
rel = [sin(0.2),cos(0.2),1.0,0.20]
assert_array_almost_equal(el,rel,13)
def test_ellipk(self):
elk = special.ellipk(.2)
assert_almost_equal(elk,1.659623598610528,11)
assert_equal(special.ellipkm1(0.0), np.inf)
assert_equal(special.ellipkm1(1.0), pi/2)
assert_equal(special.ellipkm1(np.inf), 0.0)
assert_equal(special.ellipkm1(np.nan), np.nan)
assert_equal(special.ellipkm1(-1), np.nan)
assert_allclose(special.ellipk(-10), 0.7908718902387385)
def test_ellipkinc(self):
elkinc = special.ellipkinc(pi/2,.2)
elk = special.ellipk(0.2)
assert_almost_equal(elkinc,elk,15)
alpha = 20*pi/180
phi = 45*pi/180
m = sin(alpha)**2
elkinc = special.ellipkinc(phi,m)
assert_almost_equal(elkinc,0.79398143,8)
# From pg. 614 of A & S
assert_equal(special.ellipkinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipkinc(pi/2, 1.0), np.inf)
assert_equal(special.ellipkinc(pi/2, -np.inf), 0.0)
assert_equal(special.ellipkinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipkinc(pi/2, 2), np.nan)
assert_equal(special.ellipkinc(0, 0.5), 0.0)
assert_equal(special.ellipkinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipkinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipkinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipkinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipkinc(0.38974112035318718, 1), 0.4, rtol=1e-14)
assert_allclose(special.ellipkinc(1.5707, -10), 0.79084284661724946)
def test_ellipkinc_2(self):
# Regression test for gh-3550
# ellipkinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipkinc(phi, mvals)
assert_array_almost_equal_nulp(f, 1.0259330100195334 * np.ones_like(f), 1)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipkinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, 5.1296650500976675 * np.ones_like(f1), 2)
def test_ellipkinc_singular(self):
# ellipkinc(phi, 1) has closed form and is finite only for phi in (-pi/2, pi/2)
xlog = np.logspace(-300, -17, 25)
xlin = np.linspace(1e-17, 0.1, 25)
xlin2 = np.linspace(0.1, pi/2, 25, endpoint=False)
assert_allclose(special.ellipkinc(xlog, 1), np.arcsinh(np.tan(xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin, 1), np.arcsinh(np.tan(xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin2, 1), np.arcsinh(np.tan(xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(np.pi/2, 1), np.inf)
assert_allclose(special.ellipkinc(-xlog, 1), np.arcsinh(np.tan(-xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin, 1), np.arcsinh(np.tan(-xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin2, 1), np.arcsinh(np.tan(-xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(-np.pi/2, 1), np.inf)
def test_ellipe(self):
ele = special.ellipe(.2)
assert_almost_equal(ele,1.4890350580958529,8)
assert_equal(special.ellipe(0.0), pi/2)
assert_equal(special.ellipe(1.0), 1.0)
assert_equal(special.ellipe(-np.inf), np.inf)
assert_equal(special.ellipe(np.nan), np.nan)
assert_equal(special.ellipe(2), np.nan)
assert_allclose(special.ellipe(-10), 3.6391380384177689)
def test_ellipeinc(self):
eleinc = special.ellipeinc(pi/2,.2)
ele = special.ellipe(0.2)
assert_almost_equal(eleinc,ele,14)
# pg 617 of A & S
alpha, phi = 52*pi/180,35*pi/180
m = sin(alpha)**2
eleinc = special.ellipeinc(phi,m)
assert_almost_equal(eleinc, 0.58823065, 8)
assert_equal(special.ellipeinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipeinc(pi/2, 1.0), 1.0)
assert_equal(special.ellipeinc(pi/2, -np.inf), np.inf)
assert_equal(special.ellipeinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipeinc(pi/2, 2), np.nan)
assert_equal(special.ellipeinc(0, 0.5), 0.0)
assert_equal(special.ellipeinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipeinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipeinc(np.inf, -np.inf), np.inf)
assert_equal(special.ellipeinc(-np.inf, -np.inf), -np.inf)
assert_equal(special.ellipeinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipeinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipeinc(1.5707, -10), 3.6388185585822876)
def test_ellipeinc_2(self):
# Regression test for gh-3550
# ellipeinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipeinc(phi, mvals)
assert_array_almost_equal_nulp(f, 0.84442884574781019 * np.ones_like(f), 2)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipeinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, 3.3471442287390509 * np.ones_like(f1), 4)
class TestErf(TestCase):
def test_erf(self):
er = special.erf(.25)
assert_almost_equal(er,0.2763263902,8)
def test_erf_zeros(self):
erz = special.erf_zeros(5)
erzr = array([1.45061616+1.88094300j,
2.24465928+2.61657514j,
2.83974105+3.17562810j,
3.33546074+3.64617438j,
3.76900557+4.06069723j])
assert_array_almost_equal(erz,erzr,4)
def _check_variant_func(self, func, other_func, rtol, atol=0):
np.random.seed(1234)
n = 10000
x = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
y = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
z = x + 1j*y
old_errors = np.seterr(all='ignore')
try:
w = other_func(z)
w_real = other_func(x).real
mask = np.isfinite(w)
w = w[mask]
z = z[mask]
mask = np.isfinite(w_real)
w_real = w_real[mask]
x = x[mask]
# test both real and complex variants
assert_func_equal(func, w, z, rtol=rtol, atol=atol)
assert_func_equal(func, w_real, x, rtol=rtol, atol=atol)
finally:
np.seterr(**old_errors)
def test_erfc_consistent(self):
self._check_variant_func(
cephes.erfc,
lambda z: 1 - cephes.erf(z),
rtol=1e-12,
atol=1e-14 # <- the test function loses precision
)
def test_erfcx_consistent(self):
self._check_variant_func(
cephes.erfcx,
lambda z: np.exp(z*z) * cephes.erfc(z),
rtol=1e-12
)
def test_erfi_consistent(self):
self._check_variant_func(
cephes.erfi,
lambda z: -1j * cephes.erf(1j*z),
rtol=1e-12
)
def test_dawsn_consistent(self):
self._check_variant_func(
cephes.dawsn,
lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z),
rtol=1e-12
)
def test_erfcinv(self):
i = special.erfcinv(1)
# Use assert_array_equal instead of assert_equal, so the comparsion
# of -0.0 and 0.0 doesn't fail.
assert_array_equal(i, 0)
def test_erfinv(self):
i = special.erfinv(0)
assert_equal(i,0)
def test_errprint(self):
a = special.errprint()
b = 1-a # a is the state 1-a inverts state
c = special.errprint(b) # returns last state 'a'
assert_equal(a,c)
d = special.errprint(a) # returns to original state
assert_equal(d,b) # makes sure state was returned
# assert_equal(d,1-a)
def test_erf_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -1, 1]
assert_allclose(special.erf(vals), expected, rtol=1e-15)
def test_erfc_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, 2, 0]
assert_allclose(special.erfc(vals), expected, rtol=1e-15)
def test_erfcx_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, np.inf, 0]
assert_allclose(special.erfcx(vals), expected, rtol=1e-15)
def test_erfi_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -np.inf, np.inf]
assert_allclose(special.erfi(vals), expected, rtol=1e-15)
def test_dawsn_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -0.0, 0.0]
assert_allclose(special.dawsn(vals), expected, rtol=1e-15)
def test_wofz_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan + np.nan * 1.j, 0.-0.j, 0.+0.j]
assert_allclose(special.wofz(vals), expected, rtol=1e-15)
class TestEuler(TestCase):
def test_euler(self):
eu0 = special.euler(0)
eu1 = special.euler(1)
eu2 = special.euler(2) # just checking segfaults
assert_almost_equal(eu0[0],1,8)
assert_almost_equal(eu2[2],-1,8)
eu24 = special.euler(24)
mathworld = [1,1,5,61,1385,50521,2702765,199360981,
19391512145,2404879675441,
370371188237525,69348874393137901,
15514534163557086905]
correct = zeros((25,),'d')
for k in range(0,13):
if (k % 2):
correct[2*k] = -float(mathworld[k])
else:
correct[2*k] = float(mathworld[k])
olderr = np.seterr(all='ignore')
try:
err = nan_to_num((eu24-correct)/correct)
errmax = max(err)
finally:
np.seterr(**olderr)
assert_almost_equal(errmax, 0.0, 14)
class TestExp(TestCase):
def test_exp2(self):
ex = special.exp2(2)
exrl = 2**2
assert_equal(ex,exrl)
def test_exp2more(self):
exm = special.exp2(2.5)
exmrl = 2**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_exp10(self):
ex = special.exp10(2)
exrl = 10**2
assert_approx_equal(ex,exrl)
def test_exp10more(self):
exm = special.exp10(2.5)
exmrl = 10**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_expm1(self):
ex = (special.expm1(2),special.expm1(3),special.expm1(4))
exrl = (exp(2)-1,exp(3)-1,exp(4)-1)
assert_array_almost_equal(ex,exrl,8)
def test_expm1more(self):
ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2))
exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1)
assert_array_almost_equal(ex1,exrl1,8)
class TestFactorialFunctions(TestCase):
def test_factorial(self):
# Some known values, float math
assert_array_almost_equal(special.factorial(0), 1)
assert_array_almost_equal(special.factorial(1), 1)
assert_array_almost_equal(special.factorial(2), 2)
assert_array_almost_equal([6., 24., 120.],
special.factorial([3, 4, 5], exact=False))
assert_array_almost_equal(special.factorial([[5, 3], [4, 3]]),
[[120, 6], [24, 6]])
# Some known values, integer math
assert_equal(special.factorial(0, exact=True), 1)
assert_equal(special.factorial(1, exact=True), 1)
assert_equal(special.factorial(2, exact=True), 2)
assert_equal(special.factorial(5, exact=True), 120)
assert_equal(special.factorial(15, exact=True), 1307674368000)
# ndarray shape is maintained
assert_equal(special.factorial([7, 4, 15, 10], exact=True),
[5040, 24, 1307674368000, 3628800])
assert_equal(special.factorial([[5, 3], [4, 3]], True),
[[120, 6], [24, 6]])
# object arrays
assert_equal(special.factorial(np.arange(-3, 22), True),
special.factorial(np.arange(-3, 22), False))
# int64 array
assert_equal(special.factorial(np.arange(-3, 15), True),
special.factorial(np.arange(-3, 15), False))
# int32 array
assert_equal(special.factorial(np.arange(-3, 5), True),
special.factorial(np.arange(-3, 5), False))
# Consistent output for n < 0
for exact in (True, False):
assert_array_equal(0, special.factorial(-3, exact))
assert_array_equal([1, 2, 0, 0],
special.factorial([1, 2, -5, -4], exact))
for n in range(0, 22):
# Compare all with math.factorial
correct = math.factorial(n)
assert_array_equal(correct, special.factorial(n, True))
assert_array_equal(correct, special.factorial([n], True)[0])
assert_allclose(float(correct), special.factorial(n, False))
assert_allclose(float(correct), special.factorial([n], False)[0])
# Compare exact=True vs False, scalar vs array
assert_array_equal(special.factorial(n, True),
special.factorial(n, False))
assert_array_equal(special.factorial([n], True),
special.factorial([n], False))
def test_factorial2(self):
assert_array_almost_equal([105., 384., 945.],
special.factorial2([7, 8, 9], exact=False))
assert_equal(special.factorial2(7, exact=True), 105)
def test_factorialk(self):
assert_equal(special.factorialk(5, 1, exact=True), 120)
assert_equal(special.factorialk(5, 3, exact=True), 10)
class TestFresnel(TestCase):
def test_fresnel(self):
frs = array(special.fresnel(.5))
assert_array_almost_equal(frs,array([0.064732432859999287, 0.49234422587144644]),8)
def test_fresnel_inf1(self):
frs = special.fresnel(np.inf)
assert_equal(frs, (0.5, 0.5))
def test_fresnel_inf2(self):
frs = special.fresnel(-np.inf)
assert_equal(frs, (-0.5, -0.5))
# values from pg 329 Table 7.11 of A & S
# slightly corrected in 4th decimal place
def test_fresnel_zeros(self):
szo, czo = special.fresnel_zeros(5)
assert_array_almost_equal(szo,
array([2.0093+0.2885j,
2.8335+0.2443j,
3.4675+0.2185j,
4.0026+0.2009j,
4.4742+0.1877j]),3)
assert_array_almost_equal(czo,
array([1.7437+0.3057j,
2.6515+0.2529j,
3.3204+0.2240j,
3.8757+0.2047j,
4.3611+0.1907j]),3)
vals1 = special.fresnel(szo)[0]
vals2 = special.fresnel(czo)[1]
assert_array_almost_equal(vals1,0,14)
assert_array_almost_equal(vals2,0,14)
def test_fresnelc_zeros(self):
szo, czo = special.fresnel_zeros(6)
frc = special.fresnelc_zeros(6)
assert_array_almost_equal(frc,czo,12)
def test_fresnels_zeros(self):
szo, czo = special.fresnel_zeros(5)
frs = special.fresnels_zeros(5)
assert_array_almost_equal(frs,szo,12)
class TestGamma(TestCase):
def test_gamma(self):
gam = special.gamma(5)
assert_equal(gam,24.0)
def test_gammaln(self):
gamln = special.gammaln(3)
lngam = log(special.gamma(3))
assert_almost_equal(gamln,lngam,8)
def test_gammainc(self):
gama = special.gammainc(.5,.5)
assert_almost_equal(gama,.7,1)
def test_gammaincnan(self):
gama = special.gammainc(-1,1)
assert_(isnan(gama))
def test_gammainczero(self):
# bad arg but zero integration limit
gama = special.gammainc(-1,0)
assert_equal(gama,0.0)
def test_gammaincinf(self):
gama = special.gammainc(0.5, np.inf)
assert_equal(gama,1.0)
def test_gammaincc(self):
gicc = special.gammaincc(.5,.5)
greal = 1 - special.gammainc(.5,.5)
assert_almost_equal(gicc,greal,8)
def test_gammainccnan(self):
gama = special.gammaincc(-1,1)
assert_(isnan(gama))
def test_gammainccinf(self):
gama = special.gammaincc(0.5,np.inf)
assert_equal(gama,0.0)
def test_gammainccinv(self):
gccinv = special.gammainccinv(.5,.5)
gcinv = special.gammaincinv(.5,.5)
assert_almost_equal(gccinv,gcinv,8)
@with_special_errors
def test_gammaincinv(self):
y = special.gammaincinv(.4,.4)
x = special.gammainc(.4,y)
assert_almost_equal(x,0.4,1)
y = special.gammainc(10, 0.05)
x = special.gammaincinv(10, 2.5715803516000736e-20)
assert_almost_equal(0.05, x, decimal=10)
assert_almost_equal(y, 2.5715803516000736e-20, decimal=10)
x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18)
assert_almost_equal(11.0, x, decimal=10)
@with_special_errors
def test_975(self):
# Regression test for ticket #975 -- switch point in algorithm
# check that things work OK at the point, immediately next floats
# around it, and a bit further away
pts = [0.25,
np.nextafter(0.25, 0), 0.25 - 1e-12,
np.nextafter(0.25, 1), 0.25 + 1e-12]
for xp in pts:
y = special.gammaincinv(.4, xp)
x = special.gammainc(0.4, y)
assert_tol_equal(x, xp, rtol=1e-12)
def test_rgamma(self):
rgam = special.rgamma(8)
rlgam = 1/special.gamma(8)
assert_almost_equal(rgam,rlgam,8)
def test_infinity(self):
assert_(np.isinf(special.gamma(-1)))
assert_equal(special.rgamma(-1), 0)
class TestHankel(TestCase):
def test_negv1(self):
assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14)
def test_hankel1(self):
hank1 = special.hankel1(1,.1)
hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j)
assert_almost_equal(hank1,hankrl,8)
def test_negv1e(self):
assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14)
def test_hankel1e(self):
hank1e = special.hankel1e(1,.1)
hankrle = special.hankel1(1,.1)*exp(-.1j)
assert_almost_equal(hank1e,hankrle,8)
def test_negv2(self):
assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14)
def test_hankel2(self):
hank2 = special.hankel2(1,.1)
hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j)
assert_almost_equal(hank2,hankrl2,8)
def test_neg2e(self):
assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14)
def test_hankl2e(self):
hank2e = special.hankel2e(1,.1)
hankrl2e = special.hankel2e(1,.1)
assert_almost_equal(hank2e,hankrl2e,8)
class TestHyper(TestCase):
def test_h1vp(self):
h1 = special.h1vp(1,.1)
h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j)
assert_almost_equal(h1,h1real,8)
def test_h2vp(self):
h2 = special.h2vp(1,.1)
h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j)
assert_almost_equal(h2,h2real,8)
def test_hyp0f1(self):
# scalar input
assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12)
assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15)
# float input, expected values match mpmath
x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5])
expected = np.array([0.58493659229143, 0.70566805723127, 1.0,
1.37789689539747, 1.60373685288480])
assert_allclose(x, expected, rtol=1e-12)
# complex input
x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j)
assert_allclose(x, expected.astype(complex), rtol=1e-12)
# test broadcasting
x1 = [0.5, 1.5, 2.5]
x2 = [0, 1, 0.5]
x = special.hyp0f1(x1, x2)
expected = [1.0, 1.8134302039235093, 1.21482702689997]
assert_allclose(x, expected, rtol=1e-12)
x = special.hyp0f1(np.row_stack([x1] * 2), x2)
assert_allclose(x, np.row_stack([expected] * 2), rtol=1e-12)
assert_raises(ValueError, special.hyp0f1,
np.row_stack([x1] * 3), [0, 1])
def test_hyp0f1_gh5764(self):
# Just checks the point that failed; there's a more systematic
# test in test_mpmath
res = special.hyp0f1(0.8, 0.5 + 0.5*1J)
# The expected value was generated using mpmath
assert_almost_equal(res, 1.6139719776441115 + 1J*0.80893054061790665)
def test_hyp1f1(self):
hyp1 = special.hyp1f1(.1,.1,.3)
assert_almost_equal(hyp1, 1.3498588075760032,7)
# test contributed by Moritz Deger (2008-05-29)
# http://projects.scipy.org/scipy/scipy/ticket/659
# reference data obtained from mathematica [ a, b, x, m(a,b,x)]:
# produced with test_hyp1f1.nb
ref_data = array([[-8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04],
[2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00],
[-1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05],
[5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08],
[-2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24],
[4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21],
[1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13],
[2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13],
[1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02],
[1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10],
[-4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01],
[8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21],
[1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20],
[-2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07],
[2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03],
[2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02],
[6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11],
[-1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03],
[2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17],
[8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01],
[1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00],
[-4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00],
[2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23],
[-2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01],
[3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04],
[-1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08],
[2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01],
[-9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07],
[1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03],
[-2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09],
[-8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06],
[-1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00],
[-3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01],
[3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02],
[6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02],
[-2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02],
[2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00],
[1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09],
[1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01],
[1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00],
[1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02],
[-1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05],
[-1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05],
[7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02],
[2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02],
[-2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13],
[-2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05],
[-1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12],
[-5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01],
[-1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16],
[2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37],
[5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06],
[-1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02],
[-1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12],
[5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27],
[-2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04],
[1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06],
[2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07],
[5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03],
[-2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07],
[1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27],
[6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12],
[1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32],
[-2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04],
[-4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01],
[-7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02],
[-2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19],
[1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09],
[2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31],
[-2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01],
[2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02],
[-2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08],
[2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09],
[1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33],
[-3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01],
[7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29],
[2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01],
[8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29],
[-1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02],
[-8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00],
[-1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08],
[-5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01],
[-5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01],
[-2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01],
[6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13],
[-2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11],
[-1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02],
[6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02],
[-1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01],
[7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31],
[-1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04],
[5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25],
[3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01],
[-2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00],
[2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02],
[2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05],
[-9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02],
[-5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01],
[-1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01],
[-5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]])
for a,b,c,expected in ref_data:
result = special.hyp1f1(a,b,c)
assert_(abs(expected - result)/expected < 1e-4)
def test_hyp1f1_gh2957(self):
hyp1 = special.hyp1f1(0.5, 1.5, -709.7827128933)
hyp2 = special.hyp1f1(0.5, 1.5, -709.7827128934)
assert_almost_equal(hyp1, hyp2, 12)
def test_hyp1f1_gh2282(self):
hyp = special.hyp1f1(0.5, 1.5, -1000)
assert_almost_equal(hyp, 0.028024956081989643, 12)
def test_hyp1f2(self):
pass
def test_hyp2f0(self):
pass
def test_hyp2f1(self):
# a collection of special cases taken from AMS 55
values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))],
[0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)],
[1, 1, 2, 0.2, -1/0.2*log(1-0.2)],
[3, 3.5, 1.5, 0.2**2,
0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))],
[-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)],
[3, 4, 8, 1, special.gamma(8)*special.gamma(8-4-3)/special.gamma(8-3)/special.gamma(8-4)],
[3, 2, 3-2+1, -1, 1./2**3*sqrt(pi) *
special.gamma(1+3-2)/special.gamma(1+0.5*3-2)/special.gamma(0.5+0.5*3)],
[5, 2, 5-2+1, -1, 1./2**5*sqrt(pi) *
special.gamma(1+5-2)/special.gamma(1+0.5*5-2)/special.gamma(0.5+0.5*5)],
[4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*special.gamma(4./3) *
special.gamma(1.5-2*4)/special.gamma(3./2)/special.gamma(4./3-2*4)],
# and some others
# ticket #424
[1.5, -0.5, 1.0, -10.0, 4.1300097765277476484],
# negative integer a or b, with c-a-b integer and x > 0.9
[-2,3,1,0.95,0.715],
[2,-3,1,0.95,-0.007],
[-6,3,1,0.95,0.0000810625],
[2,-5,1,0.95,-0.000029375],
# huge negative integers
(10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24),
(10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18),
]
for i, (a, b, c, x, v) in enumerate(values):
cv = special.hyp2f1(a, b, c, x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_hyp3f0(self):
pass
def test_hyperu(self):
val1 = special.hyperu(1,0.1,100)
assert_almost_equal(val1,0.0098153,7)
a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2]
a,b = asarray(a), asarray(b)
z = 0.5
hypu = special.hyperu(a,b,z)
hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z) /
(special.gamma(1+a-b)*special.gamma(b)) -
z**(1-b)*special.hyp1f1(1+a-b,2-b,z)
/ (special.gamma(a)*special.gamma(2-b)))
assert_array_almost_equal(hypu,hprl,12)
def test_hyperu_gh2287(self):
assert_almost_equal(special.hyperu(1, 1.5, 20.2),
0.048360918656699191, 12)
class TestBessel(TestCase):
def test_itj0y0(self):
it0 = array(special.itj0y0(.2))
assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8)
def test_it2j0y0(self):
it2 = array(special.it2j0y0(.2))
assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8)
def test_negv_iv(self):
assert_equal(special.iv(3,2), special.iv(-3,2))
def test_j0(self):
oz = special.j0(.1)
ozr = special.jn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_j1(self):
o1 = special.j1(.1)
o1r = special.jn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_jn(self):
jnnr = special.jn(1,.2)
assert_almost_equal(jnnr,0.099500832639235995,8)
def test_negv_jv(self):
assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14)
def test_jv(self):
values = [[0, 0.1, 0.99750156206604002],
[2./3, 1e-8, 0.3239028506761532e-5],
[2./3, 1e-10, 0.1503423854873779e-6],
[3.1, 1e-10, 0.1711956265409013e-32],
[2./3, 4.0, -0.2325440850267039],
]
for i, (v, x, y) in enumerate(values):
yc = special.jv(v, x)
assert_almost_equal(yc, y, 8, err_msg='test #%d' % i)
def test_negv_jve(self):
assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14)
def test_jve(self):
jvexp = special.jve(1,.2)
assert_almost_equal(jvexp,0.099500832639235995,8)
jvexp1 = special.jve(1,.2+1j)
z = .2+1j
jvexpr = special.jv(1,z)*exp(-abs(z.imag))
assert_almost_equal(jvexp1,jvexpr,8)
def test_jn_zeros(self):
jn0 = special.jn_zeros(0,5)
jn1 = special.jn_zeros(1,5)
assert_array_almost_equal(jn0,array([2.4048255577,
5.5200781103,
8.6537279129,
11.7915344391,
14.9309177086]),4)
assert_array_almost_equal(jn1,array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),4)
jn102 = special.jn_zeros(102,5)
assert_tol_equal(jn102, array([110.89174935992040343,
117.83464175788308398,
123.70194191713507279,
129.02417238949092824,
134.00114761868422559]), rtol=1e-13)
jn301 = special.jn_zeros(301,5)
assert_tol_equal(jn301, array([313.59097866698830153,
323.21549776096288280,
331.22338738656748796,
338.39676338872084500,
345.03284233056064157]), rtol=1e-13)
def test_jn_zeros_slow(self):
jn0 = special.jn_zeros(0, 300)
assert_tol_equal(jn0[260-1], 816.02884495068867280, rtol=1e-13)
assert_tol_equal(jn0[280-1], 878.86068707124422606, rtol=1e-13)
assert_tol_equal(jn0[300-1], 941.69253065317954064, rtol=1e-13)
jn10 = special.jn_zeros(10, 300)
assert_tol_equal(jn10[260-1], 831.67668514305631151, rtol=1e-13)
assert_tol_equal(jn10[280-1], 894.51275095371316931, rtol=1e-13)
assert_tol_equal(jn10[300-1], 957.34826370866539775, rtol=1e-13)
jn3010 = special.jn_zeros(3010,5)
assert_tol_equal(jn3010, array([3036.86590780927,
3057.06598526482,
3073.66360690272,
3088.37736494778,
3101.86438139042]), rtol=1e-8)
def test_jnjnp_zeros(self):
jn = special.jn
def jnp(n, x):
return (jn(n-1,x) - jn(n+1,x))/2
for nt in range(1, 30):
z, n, m, t = special.jnjnp_zeros(nt)
for zz, nn, tt in zip(z, n, t):
if tt == 0:
assert_allclose(jn(nn, zz), 0, atol=1e-6)
elif tt == 1:
assert_allclose(jnp(nn, zz), 0, atol=1e-6)
else:
raise AssertionError("Invalid t return for nt=%d" % nt)
def test_jnp_zeros(self):
jnp = special.jnp_zeros(1,5)
assert_array_almost_equal(jnp, array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),4)
jnp = special.jnp_zeros(443,5)
assert_tol_equal(special.jvp(443, jnp), 0, atol=1e-15)
def test_jnyn_zeros(self):
jnz = special.jnyn_zeros(1,5)
assert_array_almost_equal(jnz,(array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),
array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),
array([2.19714,
5.42968,
8.59601,
11.74915,
14.89744]),
array([3.68302,
6.94150,
10.12340,
13.28576,
16.44006])),5)
def test_jvp(self):
jvprim = special.jvp(2,2)
jv0 = (special.jv(1,2)-special.jv(3,2))/2
assert_almost_equal(jvprim,jv0,10)
def test_k0(self):
ozk = special.k0(.1)
ozkr = special.kv(0,.1)
assert_almost_equal(ozk,ozkr,8)
def test_k0e(self):
ozke = special.k0e(.1)
ozker = special.kve(0,.1)
assert_almost_equal(ozke,ozker,8)
def test_k1(self):
o1k = special.k1(.1)
o1kr = special.kv(1,.1)
assert_almost_equal(o1k,o1kr,8)
def test_k1e(self):
o1ke = special.k1e(.1)
o1ker = special.kve(1,.1)
assert_almost_equal(o1ke,o1ker,8)
def test_jacobi(self):
a = 5*np.random.random() - 1
b = 5*np.random.random() - 1
P0 = special.jacobi(0,a,b)
P1 = special.jacobi(1,a,b)
P2 = special.jacobi(2,a,b)
P3 = special.jacobi(3,a,b)
assert_array_almost_equal(P0.c,[1],13)
assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13)
cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)]
p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]]
assert_array_almost_equal(P2.c,array(p2c)/8.0,13)
cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3),
12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)]
p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]]
assert_array_almost_equal(P3.c,array(p3c)/48.0,13)
def test_kn(self):
kn1 = special.kn(0,.2)
assert_almost_equal(kn1,1.7527038555281462,8)
def test_negv_kv(self):
assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2))
def test_kv0(self):
kv0 = special.kv(0,.2)
assert_almost_equal(kv0, 1.7527038555281462, 10)
def test_kv1(self):
kv1 = special.kv(1,0.2)
assert_almost_equal(kv1, 4.775972543220472, 10)
def test_kv2(self):
kv2 = special.kv(2,0.2)
assert_almost_equal(kv2, 49.51242928773287, 10)
def test_kn_largeorder(self):
assert_allclose(special.kn(32, 1), 1.7516596664574289e+43)
def test_kv_largearg(self):
assert_equal(special.kv(0, 1e19), 0)
def test_negv_kve(self):
assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2))
def test_kve(self):
kve1 = special.kve(0,.2)
kv1 = special.kv(0,.2)*exp(.2)
assert_almost_equal(kve1,kv1,8)
z = .2+1j
kve2 = special.kve(0,z)
kv2 = special.kv(0,z)*exp(z)
assert_almost_equal(kve2,kv2,8)
def test_kvp_v0n1(self):
z = 2.2
assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10)
def test_kvp_n1(self):
v = 3.
z = 2.2
xc = -special.kv(v+1,z) + v/z*special.kv(v,z)
x = special.kvp(v,z, n=1)
assert_almost_equal(xc, x, 10) # this function (kvp) is broken
def test_kvp_n2(self):
v = 3.
z = 2.2
xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z
x = special.kvp(v, z, n=2)
assert_almost_equal(xc, x, 10)
def test_y0(self):
oz = special.y0(.1)
ozr = special.yn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_y1(self):
o1 = special.y1(.1)
o1r = special.yn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_y0_zeros(self):
yo,ypo = special.y0_zeros(2)
zo,zpo = special.y0_zeros(2,complex=1)
all = r_[yo,zo]
allval = r_[ypo,zpo]
assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11)
assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11)
def test_y1_zeros(self):
y1 = special.y1_zeros(1)
assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5)
def test_y1p_zeros(self):
y1p = special.y1p_zeros(1,complex=1)
assert_array_almost_equal(y1p,(array([0.5768+0.904j]), array([-0.7635+0.5892j])),3)
def test_yn_zeros(self):
an = special.yn_zeros(4,2)
assert_array_almost_equal(an,array([5.64515, 9.36162]),5)
an = special.yn_zeros(443,5)
assert_tol_equal(an, [450.13573091578090314, 463.05692376675001542,
472.80651546418663566, 481.27353184725625838,
488.98055964441374646], rtol=1e-15)
def test_ynp_zeros(self):
ao = special.ynp_zeros(0,2)
assert_array_almost_equal(ao,array([2.19714133, 5.42968104]),6)
ao = special.ynp_zeros(43,5)
assert_tol_equal(special.yvp(43, ao), 0, atol=1e-15)
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-9)
def test_ynp_zeros_large_order(self):
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-14)
def test_yn(self):
yn2n = special.yn(1,.2)
assert_almost_equal(yn2n,-3.3238249881118471,8)
def test_negv_yv(self):
assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14)
def test_yv(self):
yv2 = special.yv(1,.2)
assert_almost_equal(yv2,-3.3238249881118471,8)
def test_negv_yve(self):
assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14)
def test_yve(self):
yve2 = special.yve(1,.2)
assert_almost_equal(yve2,-3.3238249881118471,8)
yve2r = special.yv(1,.2+1j)*exp(-1)
yve22 = special.yve(1,.2+1j)
assert_almost_equal(yve22,yve2r,8)
def test_yvp(self):
yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0
yvp1 = special.yvp(2,.2)
assert_array_almost_equal(yvp1,yvpr,10)
def _cephes_vs_amos_points(self):
"""Yield points at which to compare Cephes implementation to AMOS"""
# check several points, including large-amplitude ones
for v in [-120, -100.3, -20., -10., -1., -.5,
0., 1., 12.49, 120., 301]:
for z in [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5,
700.6, 1300, 10003]:
yield v, z
# check half-integers; these are problematic points at least
# for cephes/iv
for v in 0.5 + arange(-60, 60):
yield v, 3.5
def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None):
for v, z in self._cephes_vs_amos_points():
if skip is not None and skip(v, z):
continue
c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z)
if np.isinf(c1):
assert_(np.abs(c2) >= 1e300, (v, z))
elif np.isnan(c1):
assert_(c2.imag != 0, (v, z))
else:
assert_tol_equal(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol)
if v == int(v):
assert_tol_equal(c3, c2, err_msg=(v, z),
rtol=rtol, atol=atol)
def test_jv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305)
def test_yv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305)
def test_yv_cephes_vs_amos_only_small_orders(self):
skipper = lambda v, z: (abs(v) > 50)
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, skip=skipper)
def test_iv_cephes_vs_amos(self):
olderr = np.seterr(all='ignore')
try:
self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305)
finally:
np.seterr(**olderr)
@dec.slow
def test_iv_cephes_vs_amos_mass_test(self):
N = 1000000
np.random.seed(1)
v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N)
x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N)
imsk = (np.random.randint(8, size=N) == 0)
v[imsk] = v[imsk].astype(int)
old_err = np.seterr(all='ignore')
try:
c1 = special.iv(v, x)
c2 = special.iv(v, x+0j)
# deal with differences in the inf and zero cutoffs
c1[abs(c1) > 1e300] = np.inf
c2[abs(c2) > 1e300] = np.inf
c1[abs(c1) < 1e-300] = 0
c2[abs(c2) < 1e-300] = 0
dc = abs(c1/c2 - 1)
dc[np.isnan(dc)] = 0
finally:
np.seterr(**old_err)
k = np.argmax(dc)
# Most error apparently comes from AMOS and not our implementation;
# there are some problems near integer orders there
assert_(dc[k] < 2e-7, (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j)))
def test_kv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305)
self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305)
def test_ticket_623(self):
assert_tol_equal(special.jv(3, 4), 0.43017147387562193)
assert_tol_equal(special.jv(301, 1300), 0.0183487151115275)
assert_tol_equal(special.jv(301, 1296.0682), -0.0224174325312048)
def test_ticket_853(self):
"""Negative-order Bessels"""
# cephes
assert_tol_equal(special.jv(-1, 1), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1), 0.43109886801837607952)
assert_tol_equal(special.yv(-0.5, 1), 0.6713967071418031)
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
assert_tol_equal(special.kv(-0.5, 1), 0.4610685044478945)
# amos
assert_tol_equal(special.jv(-1, 1+0j), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1+0j), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1+0j), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1+0j), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1+0j), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1+0j), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1+0j), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1+0j), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1+0j), 0.43109886801837607952)
assert_tol_equal(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j)
assert_tol_equal(special.yv(-0.5, 1+0j), 0.6713967071418031)
assert_tol_equal(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j)
assert_tol_equal(special.iv(-0.5, 1+0j), 1.231200214592967)
assert_tol_equal(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j)
assert_tol_equal(special.kv(-0.5, 1+0j), 0.4610685044478945)
assert_tol_equal(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j)
assert_tol_equal(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3))
assert_tol_equal(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j))
assert_tol_equal(special.hankel1(-0.5, 1+1j), special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j))
assert_tol_equal(special.hankel2(-0.5, 1+1j), special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j))
def test_ticket_854(self):
"""Real-valued Bessel domains"""
assert_(isnan(special.jv(0.5, -1)))
assert_(isnan(special.iv(0.5, -1)))
assert_(isnan(special.yv(0.5, -1)))
assert_(isnan(special.yv(1, -1)))
assert_(isnan(special.kv(0.5, -1)))
assert_(isnan(special.kv(1, -1)))
assert_(isnan(special.jve(0.5, -1)))
assert_(isnan(special.ive(0.5, -1)))
assert_(isnan(special.yve(0.5, -1)))
assert_(isnan(special.yve(1, -1)))
assert_(isnan(special.kve(0.5, -1)))
assert_(isnan(special.kve(1, -1)))
assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1))
assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1))
def test_ticket_503(self):
"""Real-valued Bessel I overflow"""
assert_tol_equal(special.iv(1, 700), 1.528500390233901e302)
assert_tol_equal(special.iv(1000, 1120), 1.301564549405821e301)
def test_iv_hyperg_poles(self):
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
def iv_series(self, v, z, n=200):
k = arange(0, n).astype(float_)
r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1)
r[isnan(r)] = inf
r = exp(r)
err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10
return r.sum(), err
def test_i0_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(0, z)
assert_tol_equal(special.i0(z), value, atol=err, err_msg=z)
def test_i1_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(1, z)
assert_tol_equal(special.i1(z), value, atol=err, err_msg=z)
def test_iv_series(self):
for v in [-20., -10., -1., 0., 1., 12.49, 120.]:
for z in [1., 10., 200.5, -1+2j]:
value, err = self.iv_series(v, z)
assert_tol_equal(special.iv(v, z), value, atol=err, err_msg=(v, z))
def test_i0(self):
values = [[0.0, 1.0],
[1e-10, 1.0],
[0.1, 0.9071009258],
[0.5, 0.6450352706],
[1.0, 0.4657596077],
[2.5, 0.2700464416],
[5.0, 0.1835408126],
[20.0, 0.0897803119],
]
for i, (x, v) in enumerate(values):
cv = special.i0(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i0e(self):
oize = special.i0e(.1)
oizer = special.ive(0,.1)
assert_almost_equal(oize,oizer,8)
def test_i1(self):
values = [[0.0, 0.0],
[1e-10, 0.4999999999500000e-10],
[0.1, 0.0452984468],
[0.5, 0.1564208032],
[1.0, 0.2079104154],
[5.0, 0.1639722669],
[20.0, 0.0875062222],
]
for i, (x, v) in enumerate(values):
cv = special.i1(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i1e(self):
oi1e = special.i1e(.1)
oi1er = special.ive(1,.1)
assert_almost_equal(oi1e,oi1er,8)
def test_iti0k0(self):
iti0 = array(special.iti0k0(5))
assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5)
def test_it2i0k0(self):
it2k = special.it2i0k0(.1)
assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6)
def test_iv(self):
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(iv1,0.90710092578230106,10)
def test_negv_ive(self):
assert_equal(special.ive(3,2), special.ive(-3,2))
def test_ive(self):
ive1 = special.ive(0,.1)
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(ive1,iv1,10)
def test_ivp0(self):
assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10)
def test_ivp(self):
y = (special.iv(0,2) + special.iv(2,2))/2
x = special.ivp(1,2)
assert_almost_equal(x,y,10)
class TestLaguerre(TestCase):
def test_laguerre(self):
lag0 = special.laguerre(0)
lag1 = special.laguerre(1)
lag2 = special.laguerre(2)
lag3 = special.laguerre(3)
lag4 = special.laguerre(4)
lag5 = special.laguerre(5)
assert_array_almost_equal(lag0.c,[1],13)
assert_array_almost_equal(lag1.c,[-1,1],13)
assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13)
assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13)
assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13)
assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13)
def test_genlaguerre(self):
k = 5*np.random.random() - 0.9
lag0 = special.genlaguerre(0,k)
lag1 = special.genlaguerre(1,k)
lag2 = special.genlaguerre(2,k)
lag3 = special.genlaguerre(3,k)
assert_equal(lag0.c,[1])
assert_equal(lag1.c,[-1,k+1])
assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0)
assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
# Base polynomials come from Abrahmowitz and Stegan
class TestLegendre(TestCase):
def test_legendre(self):
leg0 = special.legendre(0)
leg1 = special.legendre(1)
leg2 = special.legendre(2)
leg3 = special.legendre(3)
leg4 = special.legendre(4)
leg5 = special.legendre(5)
assert_equal(leg0.c, [1])
assert_equal(leg1.c, [1,0])
assert_almost_equal(leg2.c, array([3,0,-1])/2.0, decimal=13)
assert_almost_equal(leg3.c, array([5,0,-3,0])/2.0)
assert_almost_equal(leg4.c, array([35,0,-30,0,3])/8.0)
assert_almost_equal(leg5.c, array([63,0,-70,0,15,0])/8.0)
class TestLambda(TestCase):
def test_lmbda(self):
lam = special.lmbda(1,.1)
lamr = (array([special.jn(0,.1), 2*special.jn(1,.1)/.1]),
array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1]))
assert_array_almost_equal(lam,lamr,8)
class TestLog1p(TestCase):
def test_log1p(self):
l1p = (special.log1p(10), special.log1p(11), special.log1p(12))
l1prl = (log(11), log(12), log(13))
assert_array_almost_equal(l1p,l1prl,8)
def test_log1pmore(self):
l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2))
l1pmrl = (log(2),log(2.1),log(2.2))
assert_array_almost_equal(l1pm,l1pmrl,8)
class TestLegendreFunctions(TestCase):
def test_clpmn(self):
z = 0.5+0.3j
clp = special.clpmn(2, 2, z, 3)
assert_array_almost_equal(clp,
(array([[1.0000, z, 0.5*(3*z*z-1)],
[0.0000, sqrt(z*z-1), 3*z*sqrt(z*z-1)],
[0.0000, 0.0000, 3*(z*z-1)]]),
array([[0.0000, 1.0000, 3*z],
[0.0000, z/sqrt(z*z-1), 3*(2*z*z-1)/sqrt(z*z-1)],
[0.0000, 0.0000, 6*z]])),
7)
def test_clpmn_close_to_real_2(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 2)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 2)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x),
special.lpmv(m, n, x)]),
7)
def test_clpmn_close_to_real_3(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 3)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 3)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi),
special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)]),
7)
def test_clpmn_across_unit_circle(self):
eps = 1e-7
m = 1
n = 1
x = 1j
for type in [2, 3]:
assert_almost_equal(special.clpmn(m, n, x+1j*eps, type)[0][m, n],
special.clpmn(m, n, x-1j*eps, type)[0][m, n], 6)
def test_inf(self):
for z in (1, -1):
for n in range(4):
for m in range(1, n):
lp = special.clpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
lp = special.lpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
def test_deriv_clpmn(self):
# data inside and outside of the unit circle
zvals = [0.5+0.5j, -0.5+0.5j, -0.5-0.5j, 0.5-0.5j,
1+1j, -1+1j, -1-1j, 1-1j]
m = 2
n = 3
for type in [2, 3]:
for z in zvals:
for h in [1e-3, 1e-3j]:
approx_derivative = (special.clpmn(m, n, z+0.5*h, type)[0]
- special.clpmn(m, n, z-0.5*h, type)[0])/h
assert_allclose(special.clpmn(m, n, z, type)[1],
approx_derivative,
rtol=1e-4)
def test_lpmn(self):
lp = special.lpmn(0,2,.5)
assert_array_almost_equal(lp,(array([[1.00000,
0.50000,
-0.12500]]),
array([[0.00000,
1.00000,
1.50000]])),4)
def test_lpn(self):
lpnf = special.lpn(2,.5)
assert_array_almost_equal(lpnf,(array([1.00000,
0.50000,
-0.12500]),
array([0.00000,
1.00000,
1.50000])),4)
def test_lpmv(self):
lp = special.lpmv(0,2,.5)
assert_almost_equal(lp,-0.125,7)
lp = special.lpmv(0,40,.001)
assert_almost_equal(lp,0.1252678976534484,7)
# XXX: this is outside the domain of the current implementation,
# so ensure it returns a NaN rather than a wrong answer.
olderr = np.seterr(all='ignore')
try:
lp = special.lpmv(-1,-1,.001)
finally:
np.seterr(**olderr)
assert_(lp != 0 or np.isnan(lp))
def test_lqmn(self):
lqmnf = special.lqmn(0,2,.5)
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqmnf[0][0],lqf[0],4)
assert_array_almost_equal(lqmnf[1][0],lqf[1],4)
def test_lqmn_gt1(self):
"""algorithm for real arguments changes at 1.0001
test against analytical result for m=2, n=1
"""
x0 = 1.0001
delta = 0.00002
for x in (x0-delta, x0+delta):
lq = special.lqmn(2, 1, x)[0][-1, -1]
expected = 2/(x*x-1)
assert_almost_equal(lq, expected)
def test_lqmn_shape(self):
a, b = special.lqmn(4, 4, 1.1)
assert_equal(a.shape, (5, 5))
assert_equal(b.shape, (5, 5))
a, b = special.lqmn(4, 0, 1.1)
assert_equal(a.shape, (5, 1))
assert_equal(b.shape, (5, 1))
def test_lqn(self):
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqf,(array([0.5493, -0.7253, -0.8187]),
array([1.3333, 1.216, -0.8427])),4)
class TestMathieu(TestCase):
def test_mathieu_a(self):
pass
def test_mathieu_even_coef(self):
mc = special.mathieu_even_coef(2,5)
# Q not defined broken and cannot figure out proper reporting order
def test_mathieu_odd_coef(self):
# same problem as above
pass
class TestFresnelIntegral(TestCase):
def test_modfresnelp(self):
pass
def test_modfresnelm(self):
pass
class TestOblCvSeq(TestCase):
def test_obl_cv_seq(self):
obl = special.obl_cv_seq(0,3,1)
assert_array_almost_equal(obl,array([-0.348602,
1.393206,
5.486800,
11.492120]),5)
class TestParabolicCylinder(TestCase):
def test_pbdn_seq(self):
pb = special.pbdn_seq(1,.1)
assert_array_almost_equal(pb,(array([0.9975,
0.0998]),
array([-0.0499,
0.9925])),4)
def test_pbdv(self):
pbv = special.pbdv(1,.2)
derrl = 1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0]
def test_pbdv_seq(self):
pbn = special.pbdn_seq(1,.1)
pbv = special.pbdv_seq(1,.1)
assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4)
def test_pbdv_points(self):
# simple case
eta = np.linspace(-10, 10, 5)
z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta)
assert_tol_equal(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14)
# some points
assert_tol_equal(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12)
assert_tol_equal(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12)
def test_pbdv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbdv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
def test_pbvv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbvv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
class TestPolygamma(TestCase):
# from Table 6.2 (pg. 271) of A&S
def test_polygamma(self):
poly2 = special.polygamma(2,1)
poly3 = special.polygamma(3,1)
assert_almost_equal(poly2,-2.4041138063,10)
assert_almost_equal(poly3,6.4939394023,10)
# Test polygamma(0, x) == psi(x)
x = [2, 3, 1.1e14]
assert_almost_equal(special.polygamma(0, x), special.psi(x))
# Test broadcasting
n = [0, 1, 2]
x = [0.5, 1.5, 2.5]
expected = [-1.9635100260214238, 0.93480220054467933,
-0.23620405164172739]
assert_almost_equal(special.polygamma(n, x), expected)
expected = np.row_stack([expected]*2)
assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
expected)
assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
expected)
class TestProCvSeq(TestCase):
def test_pro_cv_seq(self):
prol = special.pro_cv_seq(0,3,1)
assert_array_almost_equal(prol,array([0.319000,
2.593084,
6.533471,
12.514462]),5)
class TestPsi(TestCase):
def test_psi(self):
ps = special.psi(1)
assert_almost_equal(ps,-0.57721566490153287,8)
class TestRadian(TestCase):
def test_radian(self):
rad = special.radian(90,0,0)
assert_almost_equal(rad,pi/2.0,5)
def test_radianmore(self):
rad1 = special.radian(90,1,60)
assert_almost_equal(rad1,pi/2+0.0005816135199345904,5)
class TestRiccati(TestCase):
def test_riccati_jn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
jnrl = (special.sph_jn(1,.2)[0]*.2,special.sph_jn(1,.2)[0]+special.sph_jn(1,.2)[1]*.2)
ricjn = special.riccati_jn(1,.2)
assert_array_almost_equal(ricjn,jnrl,8)
def test_riccati_yn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
ynrl = (special.sph_yn(1,.2)[0]*.2,special.sph_yn(1,.2)[0]+special.sph_yn(1,.2)[1]*.2)
ricyn = special.riccati_yn(1,.2)
assert_array_almost_equal(ricyn,ynrl,8)
class TestRound(TestCase):
def test_round(self):
rnd = list(map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6))))
# Note: According to the documentation, scipy.special.round is
# supposed to round to the nearest even number if the fractional
# part is exactly 0.5. On some platforms, this does not appear
# to work and thus this test may fail. However, this unit test is
# correctly written.
rndrl = (10,10,10,11)
assert_array_equal(rnd,rndrl)
def test_sph_harm():
# Tests derived from tables in
# http://en.wikipedia.org/wiki/Table_of_spherical_harmonics
sh = special.sph_harm
pi = np.pi
exp = np.exp
sqrt = np.sqrt
sin = np.sin
cos = np.cos
yield (assert_array_almost_equal, sh(0,0,0,0),
0.5/sqrt(pi))
yield (assert_array_almost_equal, sh(-2,2,0.,pi/4),
0.25*sqrt(15./(2.*pi)) *
(sin(pi/4))**2.)
yield (assert_array_almost_equal, sh(-2,2,0.,pi/2),
0.25*sqrt(15./(2.*pi)))
yield (assert_array_almost_equal, sh(2,2,pi,pi/2),
0.25*sqrt(15/(2.*pi)) *
exp(0+2.*pi*1j)*sin(pi/2.)**2.)
yield (assert_array_almost_equal, sh(2,4,pi/4.,pi/3.),
(3./8.)*sqrt(5./(2.*pi)) *
exp(0+2.*pi/4.*1j) *
sin(pi/3.)**2. *
(7.*cos(pi/3.)**2.-1))
yield (assert_array_almost_equal, sh(4,4,pi/8.,pi/6.),
(3./16.)*sqrt(35./(2.*pi)) *
exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.)
def test_sph_harm_ufunc_loop_selection():
# see https://github.com/scipy/scipy/issues/4895
dt = np.dtype(np.complex128)
assert_equal(special.sph_harm(0, 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm([0], 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, [0], 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, [0], 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, 0, [0]).dtype, dt)
assert_equal(special.sph_harm([0], [0], [0], [0]).dtype, dt)
class TestSpherical(TestCase):
def test_sph_harm(self):
# see test_sph_harm function
pass
def test_sph_in(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
i1n = special.sph_in(1,.2)
inp0 = (i1n[0][1])
inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1])
assert_array_almost_equal(i1n[0],array([1.0066800127054699381,
0.066933714568029540839]),12)
assert_array_almost_equal(i1n[1],[inp0,inp1],12)
def test_sph_inkn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
spikn = r_[special.sph_in(1,.2) + special.sph_kn(1,.2)]
inkn = r_[special.sph_inkn(1,.2)]
assert_array_almost_equal(inkn,spikn,10)
def test_sph_in_kn_order0(self):
x = 1.
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
sph_i0 = special.sph_in(0, x)
sph_i0_expected = np.array([np.sinh(x)/x,
np.cosh(x)/x-np.sinh(x)/x**2])
assert_array_almost_equal(r_[sph_i0], sph_i0_expected)
sph_k0 = special.sph_kn(0, x)
sph_k0_expected = np.array([0.5*pi*exp(-x)/x,
-0.5*pi*exp(-x)*(1/x+1/x**2)])
assert_array_almost_equal(r_[sph_k0], sph_k0_expected)
sph_i0k0 = special.sph_inkn(0, x)
assert_array_almost_equal(r_[sph_i0+sph_k0],
r_[sph_i0k0],
10)
def test_sph_jn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
s1 = special.sph_jn(2,.2)
s10 = -s1[0][1]
s11 = s1[0][0]-2.0/0.2*s1[0][1]
s12 = s1[0][1]-3.0/0.2*s1[0][2]
assert_array_almost_equal(s1[0],[0.99334665397530607731,
0.066400380670322230863,
0.0026590560795273856680],12)
assert_array_almost_equal(s1[1],[s10,s11,s12],12)
def test_sph_jnyn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
jnyn = r_[special.sph_jn(1,.2) + special.sph_yn(1,.2)] # tuple addition
jnyn1 = r_[special.sph_jnyn(1,.2)]
assert_array_almost_equal(jnyn1,jnyn,9)
def test_sph_kn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
kn = special.sph_kn(2,.2)
kn0 = -kn[0][1]
kn1 = -kn[0][0]-2.0/0.2*kn[0][1]
kn2 = -kn[0][1]-3.0/0.2*kn[0][2]
assert_array_almost_equal(kn[0],[6.4302962978445670140,
38.581777787067402086,
585.15696310385559829],12)
assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9)
def test_sph_yn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
sy1 = special.sph_yn(2,.2)[0][2]
sy2 = special.sph_yn(0,.2)[0][0]
sphpy = (special.sph_yn(1,.2)[0][0]-2*special.sph_yn(2,.2)[0][2])/3 # correct derivative value
assert_almost_equal(sy1,-377.52483,5) # previous values in the system
assert_almost_equal(sy2,-4.9003329,5)
sy3 = special.sph_yn(1,.2)[1][1]
assert_almost_equal(sy3,sphpy,4) # compare correct derivative val. (correct =-system val).
class TestStruve(object):
def _series(self, v, z, n=100):
"""Compute Struve function & error estimate from its power series."""
k = arange(0, n)
r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5)
err = abs(r).max() * finfo(float_).eps * n
return r.sum(), err
def test_vs_series(self):
"""Check Struve function versus its power series"""
for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]:
for z in [1, 10, 19, 21, 30]:
value, err = self._series(v, z)
assert_tol_equal(special.struve(v, z), value, rtol=0, atol=err), (v, z)
def test_some_values(self):
assert_tol_equal(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7)
assert_tol_equal(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8)
assert_tol_equal(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12)
assert_tol_equal(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11)
assert_equal(special.struve(-12, -41), -special.struve(-12, 41))
assert_equal(special.struve(+12, -41), -special.struve(+12, 41))
assert_equal(special.struve(-11, -41), +special.struve(-11, 41))
assert_equal(special.struve(+11, -41), +special.struve(+11, 41))
assert_(isnan(special.struve(-7.1, -1)))
assert_(isnan(special.struve(-10.1, -1)))
def test_regression_679(self):
"""Regression test for #679"""
assert_tol_equal(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8))
assert_tol_equal(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8))
assert_tol_equal(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8))
def test_chi2_smalldf():
assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110)
def test_ch2_inf():
assert_equal(special.chdtr(0.7,np.inf), 1.0)
def test_chi2c_smalldf():
assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110)
def test_chi2_inv_smalldf():
assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3)
def test_agm_simple():
assert_allclose(special.agm(24, 6), 13.4581714817)
assert_allclose(special.agm(1e30, 1), 2.2292230559453832047768593e28)
def test_legacy():
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
# Legacy behavior: truncating arguments to integers
assert_equal(special.bdtrc(1, 2, 0.3), special.bdtrc(1.8, 2.8, 0.3))
assert_equal(special.bdtr(1, 2, 0.3), special.bdtr(1.8, 2.8, 0.3))
assert_equal(special.bdtri(1, 2, 0.3), special.bdtri(1.8, 2.8, 0.3))
assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3))
assert_equal(special.hyp2f0(1, 2, 0.3, 1), special.hyp2f0(1, 2, 0.3, 1.8))
assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3))
assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3))
assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3))
assert_equal(special.pdtrc(1, 0.3), special.pdtrc(1.8, 0.3))
assert_equal(special.pdtr(1, 0.3), special.pdtr(1.8, 0.3))
assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3))
assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3))
assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3))
assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3))
assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3))
@with_special_errors
def test_error_raising():
assert_raises(special.SpecialFunctionWarning, special.iv, 1, 1e99j)
def test_xlogy():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x*np.log(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float)
z2 = np.r_[z1, [(0, 1j), (1, 1j)]]
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13)
w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1])
assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13)
def test_xlog1py():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x * np.log1p(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0),
(1, 1e-30)], dtype=float)
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13)
def test_entr():
def xfunc(x):
if x < 0:
return -np.inf
else:
return -special.xlogy(x, x)
values = (0, 0.5, 1.0, np.inf)
signs = [-1, 1]
arr = []
for sgn, v in itertools.product(signs, values):
arr.append(sgn * v)
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z)
assert_func_equal(special.entr, w, z, rtol=1e-13, atol=1e-13)
def test_kl_div():
def xfunc(x, y):
if x < 0 or y < 0 or (y == 0 and x != 0):
# extension of natural domain to preserve convexity
return np.inf
elif np.isposinf(x) or np.isposinf(y):
# limits within the natural domain
return np.inf
elif x == 0:
return y
else:
return special.xlogy(x, x/y) - x + y
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.kl_div, w, z, rtol=1e-13, atol=1e-13)
def test_rel_entr():
def xfunc(x, y):
if x > 0 and y > 0:
return special.xlogy(x, x/y)
elif x == 0 and y >= 0:
return 0
else:
return np.inf
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.rel_entr, w, z, rtol=1e-13, atol=1e-13)
def test_huber():
assert_equal(special.huber(-1, 1.5), np.inf)
assert_allclose(special.huber(2, 1.5), 0.5 * np.square(1.5))
assert_allclose(special.huber(2, 2.5), 2 * (2.5 - 0.5 * 2))
def xfunc(delta, r):
if delta < 0:
return np.inf
elif np.abs(r) < delta:
return 0.5 * np.square(r)
else:
return delta * (np.abs(r) - 0.5 * delta)
z = np.random.randn(10, 2)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.huber, w, z, rtol=1e-13, atol=1e-13)
def test_pseudo_huber():
def xfunc(delta, r):
if delta < 0:
return np.inf
elif (not delta) or (not r):
return 0
else:
return delta**2 * (np.sqrt(1 + (r/delta)**2) - 1)
z = np.array(np.random.randn(10, 2).tolist() + [[0, 0.5], [0.5, 0]])
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.pseudo_huber, w, z, rtol=1e-13, atol=1e-13)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
forslund/mycroft-core | mycroft/session/__init__.py | 4 | 2407 | # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from threading import Lock
from uuid import uuid4
from mycroft.configuration import Configuration
from mycroft.util.log import LOG
class Session:
"""
An class representing a Mycroft Session Identifier
"""
def __init__(self, session_id, expiration_seconds=180):
self.session_id = session_id
self.touch_time = int(time.time())
self.expiration_seconds = expiration_seconds
def touch(self):
"""
update the touch_time on the session
:return:
"""
self.touch_time = int(time.time())
def expired(self):
"""
determine if the session has expired
:return:
"""
return int(time.time()) - self.touch_time > self.expiration_seconds
def __str__(self):
return "{%s,%d}" % (str(self.session_id), self.touch_time)
class SessionManager:
""" Keeps track of the current active session. """
__current_session = None
__lock = Lock()
@staticmethod
def get():
"""
get the active session.
:return: An active session
"""
config = Configuration.get().get('session')
with SessionManager.__lock:
if (not SessionManager.__current_session or
SessionManager.__current_session.expired()):
SessionManager.__current_session = Session(
str(uuid4()), expiration_seconds=config.get('ttl', 180))
LOG.info(
"New Session Start: " +
SessionManager.__current_session.session_id)
return SessionManager.__current_session
@staticmethod
def touch():
"""
Update the last_touch timestamp on the current session
:return: None
"""
SessionManager.get().touch()
| apache-2.0 |
sbalde/edxplatform | openedx/core/djangoapps/user_api/migrations/0002_auto__add_usercoursetags__add_unique_usercoursetags_user_course_id_key.py | 114 | 5638 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserCourseTags'
db.create_table('user_api_usercoursetags', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['auth.User'])),
('key', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('value', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('user_api', ['UserCourseTags'])
# Adding unique constraint on 'UserCourseTags', fields ['user', 'course_id', 'key']
db.create_unique('user_api_usercoursetags', ['user_id', 'course_id', 'key'])
def backwards(self, orm):
# Removing unique constraint on 'UserCourseTags', fields ['user', 'course_id', 'key']
db.delete_unique('user_api_usercoursetags', ['user_id', 'course_id', 'key'])
# Deleting model 'UserCourseTags'
db.delete_table('user_api_usercoursetags')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'user_api.usercoursetags': {
'Meta': {'unique_together': "(('user', 'course_id', 'key'),)", 'object_name': 'UserCourseTags'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'user_api.userpreference': {
'Meta': {'unique_together': "(('user', 'key'),)", 'object_name': 'UserPreference'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"}),
'value': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['user_api']
| agpl-3.0 |
vCentre/vFRP-6233 | frappe/desk/report_dump.py | 54 | 2841 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import json
import copy
@frappe.whitelist()
def get_data(doctypes, last_modified):
data_map = {}
for dump_report_map in frappe.get_hooks().dump_report_map:
data_map.update(frappe.get_attr(dump_report_map))
import datetime
out = {}
doctypes = json.loads(doctypes)
last_modified = json.loads(last_modified)
start = datetime.datetime.now()
for d in doctypes:
args = copy.deepcopy(data_map[d])
dt = d.find("[") != -1 and d[:d.find("[")] or d
out[dt] = {}
if args.get("from"):
modified_table = "item."
else:
modified_table = ""
conditions = order_by = ""
table = args.get("from") or ("`tab%s`" % dt)
if d in last_modified:
if not args.get("conditions"):
args['conditions'] = []
args['conditions'].append(modified_table + "modified > '" + last_modified[d] + "'")
out[dt]["modified_names"] = frappe.db.sql_list("""select %sname from %s
where %smodified > %s""" % (modified_table, table, modified_table, "%s"), last_modified[d])
if args.get("force_index"):
conditions = " force index (%s) " % args["force_index"]
if args.get("conditions"):
conditions += " where " + " and ".join(args["conditions"])
if args.get("order_by"):
order_by = " order by " + args["order_by"]
out[dt]["data"] = [list(t) for t in frappe.db.sql("""select %s from %s %s %s""" \
% (",".join(args["columns"]), table, conditions, order_by))]
# last modified
modified_table = table
if "," in table:
modified_table = " ".join(table.split(",")[0].split(" ")[:-1])
tmp = frappe.db.sql("""select `modified`
from %s order by modified desc limit 1""" % modified_table)
out[dt]["last_modified"] = tmp and tmp[0][0] or ""
out[dt]["columns"] = map(lambda c: c.split(" as ")[-1], args["columns"])
if args.get("links"):
out[dt]["links"] = args["links"]
for d in out:
unused_links = []
# only compress full dumps (not partial)
if out[d].get("links") and (d not in last_modified):
for link_key in out[d]["links"]:
link = out[d]["links"][link_key]
if link[0] in out and (link[0] not in last_modified):
# make a map of link ids
# to index
link_map = {}
doctype_data = out[link[0]]
col_idx = doctype_data["columns"].index(link[1])
for row_idx in xrange(len(doctype_data["data"])):
row = doctype_data["data"][row_idx]
link_map[row[col_idx]] = row_idx
for row in out[d]["data"]:
col_idx = out[d]["columns"].index(link_key)
# replace by id
if row[col_idx]:
row[col_idx] = link_map.get(row[col_idx])
else:
unused_links.append(link_key)
for link in unused_links:
del out[d]["links"][link]
return out
| mit |
Shedino/SherpaHighLevel | catkin_ws/src/casy_rover/mavlink/pymavlink/build/scripts-2.7/magfit_delta.py | 2 | 4535 | #!/usr/bin/python
'''
fit best estimate of magnetometer offsets using the algorithm from
Bill Premerlani
'''
import sys, time, os, math
# command line option handling
from optparse import OptionParser
parser = OptionParser("magfit_delta.py [options]")
parser.add_option("--no-timestamps",dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
parser.add_option("--condition",dest="condition", default=None, help="select packets by condition")
parser.add_option("--verbose", action='store_true', default=False, help="verbose offset output")
parser.add_option("--gain", type='float', default=0.01, help="algorithm gain")
parser.add_option("--noise", type='float', default=0, help="noise to add")
parser.add_option("--max-change", type='float', default=10, help="max step change")
parser.add_option("--min-diff", type='float', default=50, help="min mag vector delta")
parser.add_option("--history", type='int', default=20, help="how many points to keep")
parser.add_option("--repeat", type='int', default=1, help="number of repeats through the data")
(opts, args) = parser.parse_args()
from pymavlink import mavutil
from pymavlink.rotmat import Vector3, Matrix3
if len(args) < 1:
print("Usage: magfit_delta.py [options] <LOGFILE...>")
sys.exit(1)
def noise():
'''a noise vector'''
from random import gauss
v = Vector3(gauss(0, 1), gauss(0, 1), gauss(0, 1))
v.normalize()
return v * opts.noise
def find_offsets(data, ofs):
'''find mag offsets by applying Bills "offsets revisited" algorithm
on the data
This is an implementation of the algorithm from:
http://gentlenav.googlecode.com/files/MagnetometerOffsetNullingRevisited.pdf
'''
# a limit on the maximum change in each step
max_change = opts.max_change
# the gain factor for the algorithm
gain = opts.gain
data2 = []
for d in data:
d = d.copy() + noise()
d.x = float(int(d.x + 0.5))
d.y = float(int(d.y + 0.5))
d.z = float(int(d.z + 0.5))
data2.append(d)
data = data2
history_idx = 0
mag_history = data[0:opts.history]
for i in range(opts.history, len(data)):
B1 = mag_history[history_idx] + ofs
B2 = data[i] + ofs
diff = B2 - B1
diff_length = diff.length()
if diff_length <= opts.min_diff:
# the mag vector hasn't changed enough - we don't get any
# information from this
history_idx = (history_idx+1) % opts.history
continue
mag_history[history_idx] = data[i]
history_idx = (history_idx+1) % opts.history
# equation 6 of Bills paper
delta = diff * (gain * (B2.length() - B1.length()) / diff_length)
# limit the change from any one reading. This is to prevent
# single crazy readings from throwing off the offsets for a long
# time
delta_length = delta.length()
if max_change != 0 and delta_length > max_change:
delta *= max_change / delta_length
# set the new offsets
ofs = ofs - delta
if opts.verbose:
print ofs
return ofs
def magfit(logfile):
'''find best magnetometer offset fit to a log file'''
print("Processing log %s" % filename)
# open the log file
mlog = mavutil.mavlink_connection(filename, notimestamps=opts.notimestamps)
data = []
mag = None
offsets = Vector3(0,0,0)
# now gather all the data
while True:
# get the next MAVLink message in the log
m = mlog.recv_match(condition=opts.condition)
if m is None:
break
if m.get_type() == "SENSOR_OFFSETS":
# update offsets that were used during this flight
offsets = Vector3(m.mag_ofs_x, m.mag_ofs_y, m.mag_ofs_z)
if m.get_type() == "RAW_IMU" and offsets != None:
# extract one mag vector, removing the offsets that were
# used during that flight to get the raw sensor values
mag = Vector3(m.xmag, m.ymag, m.zmag) - offsets
data.append(mag)
print("Extracted %u data points" % len(data))
print("Current offsets: %s" % offsets)
# run the fitting algorithm
ofs = offsets
ofs = Vector3(0,0,0)
for r in range(opts.repeat):
ofs = find_offsets(data, ofs)
print('Loop %u offsets %s' % (r, ofs))
sys.stdout.flush()
print("New offsets: %s" % ofs)
total = 0.0
for filename in args:
magfit(filename)
| bsd-3-clause |
millen1m/flask-restplus-server-example | app/modules/auth/models.py | 1 | 3854 | # encoding: utf-8
"""
OAuth2 provider models.
It is based on the code from the example:
https://github.com/lepture/example-oauth2-server
More details are available here:
* http://flask-oauthlib.readthedocs.org/en/latest/oauth2.html
* http://lepture.com/en/2013/create-oauth-server
"""
import enum
from sqlalchemy_utils.types import ScalarListType
from app.extensions import db
from app.modules.users.models import User
class OAuth2Client(db.Model):
"""
Model that binds OAuth2 Client ID and Secret to a specific User.
"""
__tablename__ = 'oauth2_client'
client_id = db.Column(db.String(length=40), primary_key=True)
client_secret = db.Column(db.String(length=55), nullable=False)
user_id = db.Column(db.ForeignKey('user.id', ondelete='CASCADE'), index=True, nullable=False)
user = db.relationship('User')
class ClientTypes(str, enum.Enum):
public = 'public'
confidential = 'confidential'
client_type = db.Column(db.Enum(ClientTypes), default=ClientTypes.public, nullable=False)
redirect_uris = db.Column(ScalarListType(separator=' '), default=[], nullable=False)
default_scopes = db.Column(ScalarListType(separator=' '), nullable=False)
@property
def default_redirect_uri(self):
redirect_uris = self.redirect_uris
if redirect_uris:
return redirect_uris[0]
return None
@classmethod
def find(cls, client_id):
if not client_id:
return
return cls.query.get(client_id)
class OAuth2Grant(db.Model):
"""
Intermediate temporary helper for OAuth2 Grants.
"""
__tablename__ = 'oauth2_grant'
id = db.Column(db.Integer, primary_key=True) # pylint: disable=invalid-name
user_id = db.Column(db.ForeignKey('user.id', ondelete='CASCADE'), index=True, nullable=False)
user = db.relationship('User')
client_id = db.Column(
db.String(length=40),
db.ForeignKey('oauth2_client.client_id'),
index=True,
nullable=False,
)
client = db.relationship('OAuth2Client')
code = db.Column(db.String(length=255), index=True, nullable=False)
redirect_uri = db.Column(db.String(length=255), nullable=False)
expires = db.Column(db.DateTime, nullable=False)
scopes = db.Column(ScalarListType(separator=' '), nullable=False)
def delete(self):
db.session.delete(self)
db.session.commit()
return self
@classmethod
def find(cls, client_id, code):
return cls.query.filter_by(client_id=client_id, code=code).first()
class OAuth2Token(db.Model):
"""
OAuth2 Access Tokens storage model.
"""
__tablename__ = 'oauth2_token'
id = db.Column(db.Integer, primary_key=True) # pylint: disable=invalid-name
client_id = db.Column(
db.String(length=40),
db.ForeignKey('oauth2_client.client_id'),
index=True,
nullable=False,
)
client = db.relationship('OAuth2Client')
user_id = db.Column(db.ForeignKey('user.id', ondelete='CASCADE'), index=True, nullable=False)
user = db.relationship('User')
class TokenTypes(str, enum.Enum):
# currently only bearer is supported
Bearer = 'Bearer'
token_type = db.Column(db.Enum(TokenTypes), nullable=False)
access_token = db.Column(db.String(length=255), unique=True, nullable=False)
refresh_token = db.Column(db.String(length=255), unique=True, nullable=True)
expires = db.Column(db.DateTime, nullable=False)
scopes = db.Column(ScalarListType(separator=' '), nullable=False)
@classmethod
def find(cls, access_token=None, refresh_token=None):
if access_token:
return cls.query.filter_by(access_token=access_token).first()
elif refresh_token:
return cls.query.filter_by(refresh_token=refresh_token).first()
| mit |
nanolearningllc/edx-platform-cypress-2 | common/test/acceptance/tests/video/test_video_events.py | 50 | 13897 | """Ensure videos emit proper events"""
import datetime
import json
import ddt
import unittest
from ..helpers import EventsTestMixin
from .test_video_module import VideoBaseTest
from ...pages.lms.video.video import _parse_time_str
from openedx.core.lib.tests.assertions.events import assert_event_matches, assert_events_equal
from opaque_keys.edx.keys import UsageKey, CourseKey
class VideoEventsTestMixin(EventsTestMixin, VideoBaseTest):
"""
Useful helper methods to test video player event emission.
"""
def assert_payload_contains_ids(self, video_event):
"""
Video events should all contain "id" and "code" attributes in their payload.
This function asserts that those fields are present and have correct values.
"""
video_descriptors = self.course_fixture.get_nested_xblocks(category='video')
video_desc = video_descriptors[0]
video_locator = UsageKey.from_string(video_desc.locator)
expected_event = {
'event': {
'id': video_locator.html_id(),
'code': '3_yD_cEKoCk'
}
}
self.assert_events_match([expected_event], [video_event])
def assert_valid_control_event_at_time(self, video_event, time_in_seconds):
"""
Video control events should contain valid ID fields and a valid "currentTime" field.
This function asserts that those fields are present and have correct values.
"""
current_time = json.loads(video_event['event'])['currentTime']
self.assertAlmostEqual(current_time, time_in_seconds, delta=1)
def assert_field_type(self, event_dict, field, field_type):
"""Assert that a particular `field` in the `event_dict` has a particular type"""
self.assertIn(field, event_dict, '{0} not found in the root of the event'.format(field))
self.assertTrue(
isinstance(event_dict[field], field_type),
'Expected "{key}" to be a "{field_type}", but it has the value "{value}" of type "{t}"'.format(
key=field,
value=event_dict[field],
t=type(event_dict[field]),
field_type=field_type,
)
)
class VideoEventsTest(VideoEventsTestMixin):
""" Test video player event emission """
@unittest.skip('AN-5867')
def test_video_control_events(self):
"""
Scenario: Video component is rendered in the LMS in Youtube mode without HTML5 sources
Given the course has a Video component in "Youtube" mode
And I play the video
And I watch 5 seconds of it
And I pause the video
Then a "load_video" event is emitted
And a "play_video" event is emitted
And a "pause_video" event is emitted
"""
def is_video_event(event):
"""Filter out anything other than the video events of interest"""
return event['event_type'] in ('load_video', 'play_video', 'pause_video')
captured_events = []
with self.capture_events(is_video_event, number_of_matches=3, captured_events=captured_events):
self.navigate_to_video()
self.video.click_player_button('play')
self.video.wait_for_position('0:05')
self.video.click_player_button('pause')
for idx, video_event in enumerate(captured_events):
self.assert_payload_contains_ids(video_event)
if idx == 0:
assert_event_matches({'event_type': 'load_video'}, video_event)
elif idx == 1:
assert_event_matches({'event_type': 'play_video'}, video_event)
self.assert_valid_control_event_at_time(video_event, 0)
elif idx == 2:
assert_event_matches({'event_type': 'pause_video'}, video_event)
self.assert_valid_control_event_at_time(video_event, self.video.seconds)
def test_strict_event_format(self):
"""
This test makes a very strong assertion about the fields present in events. The goal of it is to ensure that new
fields are not added to all events mistakenly. It should be the only existing test that is updated when new top
level fields are added to all events.
"""
captured_events = []
with self.capture_events(lambda e: e['event_type'] == 'load_video', captured_events=captured_events):
self.navigate_to_video()
load_video_event = captured_events[0]
# Validate the event payload
self.assert_payload_contains_ids(load_video_event)
# We cannot predict the value of these fields so we make weaker assertions about them
dynamic_string_fields = (
'accept_language',
'agent',
'host',
'ip',
'event',
'session'
)
for field in dynamic_string_fields:
self.assert_field_type(load_video_event, field, basestring)
self.assertIn(field, load_video_event, '{0} not found in the root of the event'.format(field))
del load_video_event[field]
# A weak assertion for the timestamp as well
self.assert_field_type(load_video_event, 'time', datetime.datetime)
del load_video_event['time']
# Note that all unpredictable fields have been deleted from the event at this point
course_key = CourseKey.from_string(self.course_id)
static_fields_pattern = {
'context': {
'course_id': unicode(course_key),
'org_id': course_key.org,
'path': '/event',
'user_id': self.user_info['user_id']
},
'event_source': 'browser',
'event_type': 'load_video',
'username': self.user_info['username'],
'page': self.browser.current_url,
'referer': self.browser.current_url,
'name': 'load_video',
}
assert_events_equal(static_fields_pattern, load_video_event)
@ddt.ddt
class VideoBumperEventsTest(VideoEventsTestMixin):
""" Test bumper video event emission """
# helper methods
def watch_video_and_skip(self):
"""
Wait 5 seconds and press "skip" button.
"""
self.video.wait_for_position('0:05')
self.video.click_player_button('skip_bumper')
def watch_video_and_dismiss(self):
"""
Wait 5 seconds and press "do not show again" button.
"""
self.video.wait_for_position('0:05')
self.video.click_player_button('do_not_show_again')
def wait_for_state(self, state='finished'):
"""
Wait until video will be in given state.
Finished state means that video is played to the end.
"""
self.video.wait_for_state(state)
def add_bumper(self):
"""
Add video bumper to the course.
"""
additional_data = {
u'video_bumper': {
u'value': {
"transcripts": {},
"video_id": "video_001"
}
}
}
self.course_fixture.add_advanced_settings(additional_data)
@ddt.data(
('edx.video.bumper.skipped', watch_video_and_skip),
('edx.video.bumper.dismissed', watch_video_and_dismiss),
('edx.video.bumper.stopped', wait_for_state)
)
@ddt.unpack
def test_video_control_events(self, event_type, action):
"""
Scenario: Video component with pre-roll emits events correctly
Given the course has a Video component in "Youtube" mode with pre-roll enabled
And I click on the video poster
And the pre-roll video start playing
And I watch (5 seconds/5 seconds/to the end of) it
And I click (skip/do not show again) video button
Then a "edx.video.bumper.loaded" event is emitted
And a "edx.video.bumper.played" event is emitted
And a "edx.video.bumper.skipped/dismissed/stopped" event is emitted
And a "load_video" event is emitted
And a "play_video" event is emitted
"""
def is_video_event(event):
"""Filter out anything other than the video events of interest"""
return event['event_type'] in (
'edx.video.bumper.loaded',
'edx.video.bumper.played',
'edx.video.bumper.skipped',
'edx.video.bumper.dismissed',
'edx.video.bumper.stopped',
'load_video',
'play_video',
'pause_video'
) and self.video.state != 'buffering'
captured_events = []
self.add_bumper()
with self.capture_events(is_video_event, number_of_matches=5, captured_events=captured_events):
self.navigate_to_video_no_render()
self.video.click_on_poster()
self.video.wait_for_video_bumper_render()
sources, duration = self.video.sources[0], self.video.duration
action(self)
# Filter subsequent events that appear due to bufferisation: edx.video.bumper.played
# As bumper does not emit pause event, we filter subsequent edx.video.bumper.played events from
# the list, except first.
filtered_events = []
for video_event in captured_events:
is_played_event = video_event['event_type'] == 'edx.video.bumper.played'
appears_again = filtered_events and video_event['event_type'] == filtered_events[-1]['event_type']
if is_played_event and appears_again:
continue
filtered_events.append(video_event)
for idx, video_event in enumerate(filtered_events):
if idx < 3:
self.assert_bumper_payload_contains_ids(video_event, sources, duration)
else:
self.assert_payload_contains_ids(video_event)
if idx == 0:
assert_event_matches({'event_type': 'edx.video.bumper.loaded'}, video_event)
elif idx == 1:
assert_event_matches({'event_type': 'edx.video.bumper.played'}, video_event)
self.assert_valid_control_event_at_time(video_event, 0)
elif idx == 2:
assert_event_matches({'event_type': event_type}, video_event)
elif idx == 3:
assert_event_matches({'event_type': 'load_video'}, video_event)
elif idx == 4:
assert_event_matches({'event_type': 'play_video'}, video_event)
self.assert_valid_control_event_at_time(video_event, 0)
def assert_bumper_payload_contains_ids(self, video_event, sources, duration):
"""
Bumper video events should all contain "host_component_id", "bumper_id",
"duration", "code" attributes in their payload.
This function asserts that those fields are present and have correct values.
"""
self.add_bumper()
video_descriptors = self.course_fixture.get_nested_xblocks(category='video')
video_desc = video_descriptors[0]
video_locator = UsageKey.from_string(video_desc.locator)
expected_event = {
'event': {
'host_component_id': video_locator.html_id(),
'bumper_id': sources,
'duration': _parse_time_str(duration),
'code': 'html5'
}
}
self.assert_events_match([expected_event], [video_event])
def test_strict_event_format(self):
"""
This test makes a very strong assertion about the fields present in events. The goal of it is to ensure that new
fields are not added to all events mistakenly. It should be the only existing test that is updated when new top
level fields are added to all events.
"""
captured_events = []
self.add_bumper()
filter_event = lambda e: e['event_type'] == 'edx.video.bumper.loaded'
with self.capture_events(filter_event, captured_events=captured_events):
self.navigate_to_video_no_render()
self.video.click_on_poster()
load_video_event = captured_events[0]
# Validate the event payload
sources, duration = self.video.sources[0], self.video.duration
self.assert_bumper_payload_contains_ids(load_video_event, sources, duration)
# We cannot predict the value of these fields so we make weaker assertions about them
dynamic_string_fields = (
'accept_language',
'agent',
'host',
'ip',
'event',
'session'
)
for field in dynamic_string_fields:
self.assert_field_type(load_video_event, field, basestring)
self.assertIn(field, load_video_event, '{0} not found in the root of the event'.format(field))
del load_video_event[field]
# A weak assertion for the timestamp as well
self.assert_field_type(load_video_event, 'time', datetime.datetime)
del load_video_event['time']
# Note that all unpredictable fields have been deleted from the event at this point
course_key = CourseKey.from_string(self.course_id)
static_fields_pattern = {
'context': {
'course_id': unicode(course_key),
'org_id': course_key.org,
'path': '/event',
'user_id': self.user_info['user_id']
},
'event_source': 'browser',
'event_type': 'edx.video.bumper.loaded',
'username': self.user_info['username'],
'page': self.browser.current_url,
'referer': self.browser.current_url,
'name': 'edx.video.bumper.loaded',
}
assert_events_equal(static_fields_pattern, load_video_event)
| agpl-3.0 |
40223112/w16test | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_discovery.py | 785 | 13838 | import os
import re
import sys
import unittest
class TestableTestProgram(unittest.TestProgram):
module = '__main__'
exit = True
defaultTest = failfast = catchbreak = buffer = None
verbosity = 1
progName = ''
testRunner = testLoader = None
def __init__(self):
pass
class TestDiscovery(unittest.TestCase):
# Heavily mocked tests so I can avoid hitting the filesystem
def test_get_name_from_path(self):
loader = unittest.TestLoader()
loader._top_level_dir = '/foo'
name = loader._get_name_from_path('/foo/bar/baz.py')
self.assertEqual(name, 'bar.baz')
if not __debug__:
# asserts are off
return
with self.assertRaises(AssertionError):
loader._get_name_from_path('/bar/baz.py')
def test_find_tests(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
path_lists = [['test1.py', 'test2.py', 'not_a_test.py', 'test_dir',
'test.foo', 'test-not-a-module.py', 'another_dir'],
['test3.py', 'test4.py', ]]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
def isdir(path):
return path.endswith('dir')
os.path.isdir = isdir
self.addCleanup(restore_isdir)
def isfile(path):
# another_dir is not a package and so shouldn't be recursed into
return not path.endswith('dir') and not 'another_dir' in path
os.path.isfile = isfile
self.addCleanup(restore_isfile)
loader._get_module_from_name = lambda path: path + ' module'
loader.loadTestsFromModule = lambda module: module + ' tests'
top_level = os.path.abspath('/foo')
loader._top_level_dir = top_level
suite = list(loader._find_tests(top_level, 'test*.py'))
expected = [name + ' module tests' for name in
('test1', 'test2')]
expected.extend([('test_dir.%s' % name) + ' module tests' for name in
('test3', 'test4')])
self.assertEqual(suite, expected)
def test_find_tests_with_package(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
directories = ['a_directory', 'test_directory', 'test_directory2']
path_lists = [directories, [], [], []]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
os.path.isdir = lambda path: True
self.addCleanup(restore_isdir)
os.path.isfile = lambda path: os.path.basename(path) not in directories
self.addCleanup(restore_isfile)
class Module(object):
paths = []
load_tests_args = []
def __init__(self, path):
self.path = path
self.paths.append(path)
if os.path.basename(path) == 'test_directory':
def load_tests(loader, tests, pattern):
self.load_tests_args.append((loader, tests, pattern))
return 'load_tests'
self.load_tests = load_tests
def __eq__(self, other):
return self.path == other.path
loader._get_module_from_name = lambda name: Module(name)
def loadTestsFromModule(module, use_load_tests):
if use_load_tests:
raise self.failureException('use_load_tests should be False for packages')
return module.path + ' module tests'
loader.loadTestsFromModule = loadTestsFromModule
loader._top_level_dir = '/foo'
# this time no '.py' on the pattern so that it can match
# a test package
suite = list(loader._find_tests('/foo', 'test*'))
# We should have loaded tests from the test_directory package by calling load_tests
# and directly from the test_directory2 package
self.assertEqual(suite,
['load_tests', 'test_directory2' + ' module tests'])
self.assertEqual(Module.paths, ['test_directory', 'test_directory2'])
# load_tests should have been called once with loader, tests and pattern
self.assertEqual(Module.load_tests_args,
[(loader, 'test_directory' + ' module tests', 'test*')])
def test_discover(self):
loader = unittest.TestLoader()
original_isfile = os.path.isfile
original_isdir = os.path.isdir
def restore_isfile():
os.path.isfile = original_isfile
os.path.isfile = lambda path: False
self.addCleanup(restore_isfile)
orig_sys_path = sys.path[:]
def restore_path():
sys.path[:] = orig_sys_path
self.addCleanup(restore_path)
full_path = os.path.abspath(os.path.normpath('/foo'))
with self.assertRaises(ImportError):
loader.discover('/foo/bar', top_level_dir='/foo')
self.assertEqual(loader._top_level_dir, full_path)
self.assertIn(full_path, sys.path)
os.path.isfile = lambda path: True
os.path.isdir = lambda path: True
def restore_isdir():
os.path.isdir = original_isdir
self.addCleanup(restore_isdir)
_find_tests_args = []
def _find_tests(start_dir, pattern):
_find_tests_args.append((start_dir, pattern))
return ['tests']
loader._find_tests = _find_tests
loader.suiteClass = str
suite = loader.discover('/foo/bar/baz', 'pattern', '/foo/bar')
top_level_dir = os.path.abspath('/foo/bar')
start_dir = os.path.abspath('/foo/bar/baz')
self.assertEqual(suite, "['tests']")
self.assertEqual(loader._top_level_dir, top_level_dir)
self.assertEqual(_find_tests_args, [(start_dir, 'pattern')])
self.assertIn(top_level_dir, sys.path)
def test_discover_with_modules_that_fail_to_import(self):
loader = unittest.TestLoader()
listdir = os.listdir
os.listdir = lambda _: ['test_this_does_not_exist.py']
isfile = os.path.isfile
os.path.isfile = lambda _: True
orig_sys_path = sys.path[:]
def restore():
os.path.isfile = isfile
os.listdir = listdir
sys.path[:] = orig_sys_path
self.addCleanup(restore)
suite = loader.discover('.')
self.assertIn(os.getcwd(), sys.path)
self.assertEqual(suite.countTestCases(), 1)
test = list(list(suite)[0])[0] # extract test from suite
with self.assertRaises(ImportError):
test.test_this_does_not_exist()
def test_command_line_handling_parseArgs(self):
program = TestableTestProgram()
args = []
def do_discovery(argv):
args.extend(argv)
program._do_discovery = do_discovery
program.parseArgs(['something', 'discover'])
self.assertEqual(args, [])
program.parseArgs(['something', 'discover', 'foo', 'bar'])
self.assertEqual(args, ['foo', 'bar'])
def test_command_line_handling_discover_by_default(self):
program = TestableTestProgram()
program.module = None
self.called = False
def do_discovery(argv):
self.called = True
self.assertEqual(argv, [])
program._do_discovery = do_discovery
program.parseArgs(['something'])
self.assertTrue(self.called)
def test_command_line_handling_discover_by_default_with_options(self):
program = TestableTestProgram()
program.module = None
args = ['something', '-v', '-b', '-v', '-c', '-f']
self.called = False
def do_discovery(argv):
self.called = True
self.assertEqual(argv, args[1:])
program._do_discovery = do_discovery
program.parseArgs(args)
self.assertTrue(self.called)
def test_command_line_handling_do_discovery_too_many_arguments(self):
class Stop(Exception):
pass
def usageExit():
raise Stop
program = TestableTestProgram()
program.usageExit = usageExit
with self.assertRaises(Stop):
# too many args
program._do_discovery(['one', 'two', 'three', 'four'])
def test_command_line_handling_do_discovery_calls_loader(self):
program = TestableTestProgram()
class Loader(object):
args = []
def discover(self, start_dir, pattern, top_level_dir):
self.args.append((start_dir, pattern, top_level_dir))
return 'tests'
program._do_discovery(['-v'], Loader=Loader)
self.assertEqual(program.verbosity, 2)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['--verbose'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery([], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish', 'eggs'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish', 'eggs', 'ham'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', 'ham')])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-s', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-t', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', 'fish')])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-p', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'fish', None)])
self.assertFalse(program.failfast)
self.assertFalse(program.catchbreak)
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-p', 'eggs', '-s', 'fish', '-v', '-f', '-c'],
Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', None)])
self.assertEqual(program.verbosity, 2)
self.assertTrue(program.failfast)
self.assertTrue(program.catchbreak)
def test_detect_module_clash(self):
class Module(object):
__file__ = 'bar/foo.py'
sys.modules['foo'] = Module
full_path = os.path.abspath('foo')
original_listdir = os.listdir
original_isfile = os.path.isfile
original_isdir = os.path.isdir
def cleanup():
os.listdir = original_listdir
os.path.isfile = original_isfile
os.path.isdir = original_isdir
del sys.modules['foo']
if full_path in sys.path:
sys.path.remove(full_path)
self.addCleanup(cleanup)
def listdir(_):
return ['foo.py']
def isfile(_):
return True
def isdir(_):
return True
os.listdir = listdir
os.path.isfile = isfile
os.path.isdir = isdir
loader = unittest.TestLoader()
mod_dir = os.path.abspath('bar')
expected_dir = os.path.abspath('foo')
msg = re.escape(r"'foo' module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?" % (mod_dir, expected_dir))
self.assertRaisesRegex(
ImportError, '^%s$' % msg, loader.discover,
start_dir='foo', pattern='foo.py'
)
self.assertEqual(sys.path[0], full_path)
def test_discovery_from_dotted_path(self):
loader = unittest.TestLoader()
tests = [self]
expectedPath = os.path.abspath(os.path.dirname(unittest.test.__file__))
self.wasRun = False
def _find_tests(start_dir, pattern):
self.wasRun = True
self.assertEqual(start_dir, expectedPath)
return tests
loader._find_tests = _find_tests
suite = loader.discover('unittest.test')
self.assertTrue(self.wasRun)
self.assertEqual(suite._tests, tests)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
battlemidget/conjure-up | conjureup/controllers/newcloud/gui.py | 1 | 6549 | import json
import os
import os.path as path
from functools import partial
from subprocess import check_output
import petname
from conjureup import async, controllers, juju, utils
from conjureup.api.models import model_info
from conjureup.app_config import app
from conjureup.models.provider import Schema
from conjureup.ui.views.newcloud import NewCloudView
from ubuntui.ev import EventLoop
from . import common
class NewCloudController:
def __init__(self):
self.cloud = None
def __handle_exception(self, exc):
utils.pollinate(app.session_id, 'EB')
return app.ui.show_exception_message(exc)
def __handle_bootstrap_done(self, future):
app.log.debug("handle bootstrap")
result = future.result()
if result.returncode < 0:
# bootstrap killed via user signal, we're quitting
return
if result.returncode > 0:
err = result.stderr.read().decode()
app.log.error(err)
return self.__handle_exception(Exception("error "))
utils.pollinate(app.session_id, 'J004')
EventLoop.remove_alarms()
app.ui.set_footer('Bootstrap complete...')
self.__post_bootstrap_exec()
def __do_bootstrap(self, cloud=None, credential=None):
""" We call self in two seperate places so add self for clarity
"""
if cloud is None:
cloud = self.cloud
app.log.debug("Performing bootstrap: {} {}".format(
app.current_controller, cloud))
app.ui.set_footer('Bootstrapping Juju controller in the background...')
future = juju.bootstrap_async(
controller=app.current_controller,
cloud=cloud,
credential=credential,
exc_cb=self.__handle_exception)
app.bootstrap.running = future
future.add_done_callback(
self.__handle_bootstrap_done)
def __post_bootstrap_exec(self):
""" Executes post-bootstrap.sh if exists
"""
info = model_info(app.current_model)
# Set our provider type environment var so that it is
# exposed in future processing tasks
app.env['JUJU_PROVIDERTYPE'] = info['provider-type']
_post_bootstrap_sh = path.join(app.config['spell-dir'],
'steps/00_post-bootstrap')
app.log.debug(
'Checking for post bootstrap task: {}'.format(_post_bootstrap_sh))
if path.isfile(_post_bootstrap_sh) \
and os.access(_post_bootstrap_sh, os.X_OK):
app.ui.set_footer('Running post-bootstrap tasks...')
utils.pollinate(app.session_id, 'J001')
app.log.debug("post_bootstrap running: {}".format(
_post_bootstrap_sh
))
try:
future = async.submit(partial(check_output,
_post_bootstrap_sh,
shell=True,
env=app.env),
self.__handle_exception)
future.add_done_callback(self.__post_bootstrap_done)
except Exception as e:
return self.__handle_exception(e)
def __post_bootstrap_done(self, future):
try:
result = json.loads(future.result().decode('utf8'))
except Exception as e:
return self.__handle_exception(e)
app.log.debug("post_bootstrap_done: {}".format(result))
if result['returnCode'] > 0:
utils.pollinate(app.session_id, 'E001')
return self.__handle_exception(Exception(
'There was an error during the post '
'bootstrap processing phase: {}.'.format(result)))
utils.pollinate(app.session_id, 'J002')
app.ui.set_footer('')
controllers.use('deploy').render()
def finish(self, credentials=None, back=False):
""" Load the Model controller passing along the selected cloud.
Arguments:
credentials: credentials to store for provider
back: if true loads previous controller
"""
if back:
return controllers.use('clouds').render()
if credentials is not None:
common.save_creds(self.cloud, credentials)
credentials_key = common.try_get_creds(self.cloud)
if self.cloud == 'maas':
self.cloud = '{}/{}'.format(self.cloud,
credentials['@maas-server'].value)
utils.pollinate(app.session_id, 'CA')
self.__do_bootstrap(credential=credentials_key)
return controllers.use('deploy').render()
def render(self, cloud):
""" Render
Arguments:
cloud: The cloud to create credentials for
"""
self.cloud = cloud
if app.current_controller is None:
app.current_controller = petname.Name()
if app.current_model is None:
app.current_model = 'conjure-up'
# LXD is a special case as we want to make sure a bridge
# is configured. If not we'll bring up a new view to allow
# a user to configure a LXD bridge with suggested network
# information.
if self.cloud == 'localhost':
if not utils.check_bridge_exists():
return controllers.use('lxdsetup').render()
app.log.debug("Found an IPv4 address, "
"assuming LXD is configured.")
self.__do_bootstrap()
return controllers.use('deploy').render()
# XXX: always prompt for maas information for now as there is no way to
# logically store the maas server ip for future sessions.
if common.try_get_creds(self.cloud) \
is not None and self.cloud != 'maas':
self.__do_bootstrap(credential=common.try_get_creds(self.cloud))
return controllers.use('deploy').render()
# show credentials editor otherwise
try:
creds = Schema[self.cloud]
except KeyError as e:
utils.pollinate(app.session_id, 'EC')
return app.ui.show_exception_message(e)
view = NewCloudView(app,
self.cloud,
creds,
self.finish)
app.ui.set_header(
title="New cloud setup",
)
app.ui.set_body(view)
app.ui.set_footer("")
_controller_class = NewCloudController
| mit |
olgabrani/synnefo | snf-cyclades-app/synnefo/userdata/util.py | 9 | 1974 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import binascii
from synnefo.userdata.asn1 import DerObject, DerSequence
def exportKey(keyobj, format='PEM'):
"""Export the RSA key. A string is returned
with the encoded public or the private half
under the selected format.
format: 'DER' (PKCS#1) or 'PEM' (RFC1421)
"""
der = DerSequence()
if keyobj.has_private():
keyType = "RSA PRIVATE"
der[:] = [ 0, keyobj.n, keyobj.e, keyobj.d, keyobj.p, keyobj.q,
keyobj.d % (keyobj.p-1), keyobj.d % (keyobj.q-1),
keyobj.u ]
else:
keyType = "PUBLIC"
der.append('\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00')
bitmap = DerObject('BIT STRING')
derPK = DerSequence()
derPK[:] = [ keyobj.n, keyobj.e ]
bitmap.payload = '\x00' + derPK.encode()
der.append(bitmap.encode())
if format=='DER':
return der.encode()
if format=='PEM':
pem = "-----BEGIN %s KEY-----\n" % keyType
binaryKey = der.encode()
# Each BASE64 line can take up to 64 characters (=48 bytes of data)
chunks = [ binascii.b2a_base64(binaryKey[i:i+48]) for i in range(0, len(binaryKey), 48) ]
pem += ''.join(chunks)
pem += "-----END %s KEY-----" % keyType
return pem
return ValueError("")
| gpl-3.0 |
sernaleon/charlie | Android/www/Blockly/i18n/create_messages.py | 18 | 5189 | #!/usr/bin/python
# Generate .js files defining Blockly core and language messages.
#
# Copyright 2013 Google Inc.
# https://blockly.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import codecs
import os
import re
import sys
from common import read_json_file
_NEWLINE_PATTERN = re.compile('[\n\r]')
def main():
"""Generate .js files defining Blockly core and language messages."""
# Process command-line arguments.
parser = argparse.ArgumentParser(description='Convert JSON files to JS.')
parser.add_argument('--source_lang', default='en',
help='ISO 639-1 source language code')
parser.add_argument('--source_lang_file',
default=os.path.join('json', 'en.json'),
help='Path to .json file for source language')
parser.add_argument('--source_synonym_file',
default=os.path.join('json', 'synonyms.json'),
help='Path to .json file with synonym definitions')
parser.add_argument('--output_dir', default='js/',
help='relative directory for output files')
parser.add_argument('--key_file', default='keys.json',
help='relative path to input keys file')
parser.add_argument('--quiet', action='store_true', default=False,
help='do not write anything to standard output')
parser.add_argument('files', nargs='+', help='input files')
args = parser.parse_args()
if not args.output_dir.endswith(os.path.sep):
args.output_dir += os.path.sep
# Read in source language .json file, which provides any values missing
# in target languages' .json files.
source_defs = read_json_file(os.path.join(os.curdir, args.source_lang_file))
# Make sure the source file doesn't contain a newline or carriage return.
for key, value in source_defs.items():
if _NEWLINE_PATTERN.search(value):
print('ERROR: definition of {0} in {1} contained a newline character.'.
format(key, args.source_lang_file))
sys.exit(1)
sorted_keys = source_defs.keys()
sorted_keys.sort()
# Read in synonyms file, which must be output in every language.
synonym_defs = read_json_file(os.path.join(
os.curdir, args.source_synonym_file))
synonym_text = '\n'.join(['Blockly.Msg.{0} = Blockly.Msg.{1};'.format(
key, synonym_defs[key]) for key in synonym_defs])
# Create each output file.
for arg_file in args.files:
(_, filename) = os.path.split(arg_file)
target_lang = filename[:filename.index('.')]
if target_lang not in ('qqq', 'keys', 'synonyms'):
target_defs = read_json_file(os.path.join(os.curdir, arg_file))
# If there's a '\n' or '\r', remove it and print a warning.
for key, value in target_defs.items():
if _NEWLINE_PATTERN.search(value):
print('WARNING: definition of {0} in {1} contained '
'a newline character.'.
format(key, arg_file))
target_defs[key] = _NEWLINE_PATTERN.sub(' ', value)
# Output file.
outname = os.path.join(os.curdir, args.output_dir, target_lang + '.js')
with codecs.open(outname, 'w', 'utf-8') as outfile:
outfile.write(
"""// This file was automatically generated. Do not modify.
'use strict';
goog.provide('Blockly.Msg.{0}');
goog.require('Blockly.Msg');
""".format(target_lang))
# For each key in the source language file, output the target value
# if present; otherwise, output the source language value with a
# warning comment.
for key in sorted_keys:
if key in target_defs:
value = target_defs[key]
comment = ''
del target_defs[key]
else:
value = source_defs[key]
comment = ' // untranslated'
value = value.replace('"', '\\"')
outfile.write(u'Blockly.Msg.{0} = "{1}";{2}\n'.format(
key, value, comment))
# Announce any keys defined only for target language.
if target_defs:
extra_keys = [key for key in target_defs if key not in synonym_defs]
synonym_keys = [key for key in target_defs if key in synonym_defs]
if not args.quiet:
if extra_keys:
print('These extra keys appeared in {0}: {1}'.format(
filename, ', '.join(extra_keys)))
if synonym_keys:
print('These synonym keys appeared in {0}: {1}'.format(
filename, ', '.join(synonym_keys)))
outfile.write(synonym_text)
if not args.quiet:
print('Created {0}.'.format(outname))
if __name__ == '__main__':
main()
| apache-2.0 |
DeltaEpsilon-HackFMI2/FMICalendar-REST | venv/lib/python2.7/site-packages/django/views/decorators/csrf.py | 120 | 2841 | import warnings
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.utils.decorators import decorator_from_middleware, available_attrs
from functools import wraps
csrf_protect = decorator_from_middleware(CsrfViewMiddleware)
csrf_protect.__name__ = "csrf_protect"
csrf_protect.__doc__ = """
This decorator adds CSRF protection in exactly the same way as
CsrfViewMiddleware, but it can be used on a per view basis. Using both, or
using the decorator multiple times, is harmless and efficient.
"""
class _EnsureCsrfToken(CsrfViewMiddleware):
# We need this to behave just like the CsrfViewMiddleware, but not reject
# requests.
def _reject(self, request, reason):
return None
requires_csrf_token = decorator_from_middleware(_EnsureCsrfToken)
requires_csrf_token.__name__ = 'requires_csrf_token'
requires_csrf_token.__doc__ = """
Use this decorator on views that need a correct csrf_token available to
RequestContext, but without the CSRF protection that csrf_protect
enforces.
"""
class _EnsureCsrfCookie(CsrfViewMiddleware):
def _reject(self, request, reason):
return None
def process_view(self, request, callback, callback_args, callback_kwargs):
retval = super(_EnsureCsrfCookie, self).process_view(request, callback, callback_args, callback_kwargs)
# Forces process_response to send the cookie
get_token(request)
return retval
ensure_csrf_cookie = decorator_from_middleware(_EnsureCsrfCookie)
ensure_csrf_cookie.__name__ = 'ensure_csrf_cookie'
ensure_csrf_cookie.__doc__ = """
Use this decorator to ensure that a view sets a CSRF cookie, whether or not it
uses the csrf_token template tag, or the CsrfViewMiddleware is used.
"""
def csrf_response_exempt(view_func):
"""
Modifies a view function so that its response is exempt
from the post-processing of the CSRF middleware.
"""
warnings.warn("csrf_response_exempt is deprecated. It no longer performs a "
"function, and calls to it can be removed.",
DeprecationWarning)
return view_func
def csrf_view_exempt(view_func):
"""
Marks a view function as being exempt from CSRF view protection.
"""
warnings.warn("csrf_view_exempt is deprecated. Use csrf_exempt instead.",
DeprecationWarning)
return csrf_exempt(view_func)
def csrf_exempt(view_func):
"""
Marks a view function as being exempt from the CSRF view protection.
"""
# We could just do view_func.csrf_exempt = True, but decorators
# are nicer if they don't have side-effects, so we return a new
# function.
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.csrf_exempt = True
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
| mit |
ikaee/bfr-attendant | facerecognitionlibrary/jni-build/jni/include/tensorflow/python/ops/sparse_ops.py | 21 | 65964 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Sparse Tensor Representation. See the @{python/sparse_ops} guide.
@@SparseTensor
@@SparseTensorValue
@@sparse_to_dense
@@sparse_tensor_to_dense
@@sparse_to_indicator
@@sparse_merge
@@sparse_concat
@@sparse_reorder
@@sparse_reshape
@@sparse_split
@@sparse_retain
@@sparse_reset_shape
@@sparse_fill_empty_rows
@@sparse_transpose
@@sparse_reduce_sum
@@sparse_reduce_sum_sparse
@@sparse_add
@@sparse_softmax
@@sparse_tensor_dense_matmul
@@sparse_maximum
@@sparse_minimum
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_sparse_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import deprecation
def _convert_to_sparse_tensor(sp_input):
"""Convert `sp_input` to `SparseTensor` and return it.
Args:
sp_input: `SparseTensor` or `SparseTensorValue`.
Returns:
`sp_input` converted to `SparseTensor`.
Raises:
ValueError: if `sp_input` is neither `SparseTensor` nor `SparseTensorValue`.
"""
if isinstance(sp_input, sparse_tensor.SparseTensorValue):
return sparse_tensor.SparseTensor.from_value(sp_input)
if not isinstance(sp_input, sparse_tensor.SparseTensor):
raise TypeError("Input must be a SparseTensor.")
return sp_input
def _convert_to_sparse_tensors(sp_inputs):
"""Convert `sp_inputs` to `SparseTensor` objects and return them.
Args:
sp_inputs: `list` or `tuple` of `SparseTensor` or `SparseTensorValue`
objects.
Returns:
`sp_inputs` converted to `SparseTensor` objects.
Raises:
ValueError: if any item in `sp_inputs` is neither `SparseTensor` nor
`SparseTensorValue`.
"""
if isinstance(sp_inputs, list):
return [_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs]
if isinstance(sp_inputs, tuple):
return (_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs)
raise TypeError("Inputs must be a list or tuple.")
# pylint: disable=protected-access
def sparse_concat(axis,
sp_inputs,
name=None,
expand_nonconcat_dim=False,
concat_dim=None):
"""Concatenates a list of `SparseTensor` along the specified dimension.
Concatenation is with respect to the dense versions of each sparse input.
It is assumed that each inputs is a `SparseTensor` whose elements are ordered
along increasing dimension number.
If expand_nonconcat_dim is False, all inputs' shapes must match, except for
the concat dimension. If expand_nonconcat_dim is True, then inputs' shapes are
allowed to vary among all inputs.
The `indices`, `values`, and `shapes` lists must have the same length.
If expand_nonconcat_dim is False, then the output shape is identical to the
inputs', except along the concat dimension, where it is the sum of the inputs'
sizes along that dimension.
If expand_nonconcat_dim is True, then the output shape along the non-concat
dimensions will be expand to be the largest among all inputs, and it is the
sum of the inputs sizes along the concat dimension.
The output elements will be resorted to preserve the sort order along
increasing dimension number.
This op runs in `O(M log M)` time, where `M` is the total number of non-empty
values across all inputs. This is due to the need for an internal sort in
order to concatenate efficiently across an arbitrary dimension.
For example, if `axis = 1` and the inputs are
sp_inputs[0]: shape = [2, 3]
[0, 2]: "a"
[1, 0]: "b"
[1, 1]: "c"
sp_inputs[1]: shape = [2, 4]
[0, 1]: "d"
[0, 2]: "e"
then the output will be
shape = [2, 7]
[0, 2]: "a"
[0, 4]: "d"
[0, 5]: "e"
[1, 0]: "b"
[1, 1]: "c"
Graphically this is equivalent to doing
[ a] concat [ d e ] = [ a d e ]
[b c ] [ ] [b c ]
Another example, if 'axis = 1' and the inputs are
sp_inputs[0]: shape = [3, 3]
[0, 2]: "a"
[1, 0]: "b"
[2, 1]: "c"
sp_inputs[1]: shape = [2, 4]
[0, 1]: "d"
[0, 2]: "e"
if expand_nonconcat_dim = False, this will result in an error. But if
expand_nonconcat_dim = True, this will result in:
shape = [3, 7]
[0, 2]: "a"
[0, 4]: "d"
[0, 5]: "e"
[1, 0]: "b"
[2, 1]: "c"
Graphically this is equivalent to doing
[ a] concat [ d e ] = [ a d e ]
[b ] [ ] [b ]
[ c ] [ c ]
Args:
axis: Dimension to concatenate along. Must be in range [-rank, rank),
where rank is the number of dimensions in each input `SparseTensor`.
sp_inputs: List of `SparseTensor` to concatenate.
name: A name prefix for the returned tensors (optional).
expand_nonconcat_dim: Whether to allow the expansion in the non-concat
dimensions. Defaulted to False.
concat_dim: The old (deprecated) name for axis.
Returns:
A `SparseTensor` with the concatenated output.
Raises:
TypeError: If `sp_inputs` is not a list of `SparseTensor`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "concat_dim",
concat_dim)
sp_inputs = _convert_to_sparse_tensors(sp_inputs)
if len(sp_inputs) == 1: # Degenerate case of one tensor.
return sp_inputs[0]
inds = [sp_input.indices for sp_input in sp_inputs]
vals = [sp_input.values for sp_input in sp_inputs]
shapes = [sp_input.dense_shape for sp_input in sp_inputs]
if expand_nonconcat_dim:
max_shape = math_ops.reduce_max(
array_ops.concat(
[array_ops.reshape(shape, [1, -1]) for shape in shapes], 0), 0)
shapes = [
array_ops.concat([
max_shape[:axis], shape[-1:] if axis == -1 else
shape[axis:axis + 1], [] if axis == -1 else max_shape[axis + 1:]
], 0) for shape in shapes
]
output_ind, output_val, output_shape = (gen_sparse_ops._sparse_concat(
inds, vals, shapes, axis, name=name))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
def sparse_add(a, b, thresh=0):
"""Adds two tensors, at least one of each is a `SparseTensor`.
If one `SparseTensor` and one `Tensor` are passed in, returns a `Tensor`. If
both arguments are `SparseTensor`s, this returns a `SparseTensor`. The order
of arguments does not matter. Use vanilla `tf.add()` for adding two dense
`Tensor`s.
The indices of any input `SparseTensor` are assumed ordered in standard
lexicographic order. If this is not the case, before this step run
`SparseReorder` to restore index ordering.
If both arguments are sparse, we perform "clipping" as follows. By default,
if two values sum to zero at some index, the output `SparseTensor` would still
include that particular location in its index, storing a zero in the
corresponding value slot. To override this, callers can specify `thresh`,
indicating that if the sum has a magnitude strictly smaller than `thresh`, its
corresponding value and index would then not be included. In particular,
`thresh == 0.0` (default) means everything is kept and actual thresholding
happens only for a positive value.
For example, suppose the logical sum of two sparse operands is (densified):
[ 2]
[.1 0]
[ 6 -.2]
Then,
* `thresh == 0` (the default): all 5 index/value pairs will be returned.
* `thresh == 0.11`: only .1 and 0 will vanish, and the remaining three
index/value pairs will be returned.
* `thresh == 0.21`: .1, 0, and -.2 will vanish.
Args:
a: The first operand; `SparseTensor` or `Tensor`.
b: The second operand; `SparseTensor` or `Tensor`. At least one operand
must be sparse.
thresh: A 0-D `Tensor`. The magnitude threshold that determines if an
output value/index pair takes space. Its dtype should match that of the
values if they are real; if the latter are complex64/complex128, then the
dtype should be float32/float64, correspondingly.
Returns:
A `SparseTensor` or a `Tensor`, representing the sum.
Raises:
TypeError: If both `a` and `b` are `Tensor`s. Use `tf.add()` instead.
"""
sparse_classes = (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)
if not any(isinstance(inp, sparse_classes) for inp in [a, b]):
raise TypeError("At least one input should be SparseTensor; do you mean to"
" use tf.add()?")
if all(isinstance(inp, sparse_classes) for inp in [a, b]):
a = _convert_to_sparse_tensor(a)
thresh = ops.convert_to_tensor(
thresh, dtype=a.values.dtype.real_dtype, name="thresh")
output_ind, output_val, output_shape = (gen_sparse_ops._sparse_add(
a.indices, a.values, a.dense_shape,
b.indices, b.values, b.dense_shape,
thresh))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
else:
# swap to make `a` the SparseTensor.
if isinstance(b, sparse_classes):
a, b = b, a
return gen_sparse_ops._sparse_tensor_dense_add(
a.indices, a.values, a.dense_shape, b)
def sparse_dense_cwise_add(sp_t, dense_t):
"""Adds up a SparseTensor and a dense Tensor, using these special rules:
(1) Broadcasts the dense side to have the same shape as the sparse side, if
eligible;
(2) Then, only the dense values pointed to by the indices of the SparseTensor
participate in the cwise addition.
By the rules, the result is a logical SparseTensor with exactly the same
indices and shape, but possibly with different non-zero values. The output of
this Op is the resultant non-zero values.
Args:
sp_t: the SparseTensor operand.
dense_t: the dense Tensor operand; must have the same dtype and a
broadcast-compatible shape as `sp_t`.
Returns:
output: the SparseTensor output.
"""
result = gen_sparse_ops.sparse_dense_cwise_add(sp_t.indices, sp_t.values,
sp_t.dense_shape, dense_t)
return sparse_tensor.SparseTensor(sp_t.indices, result, sp_t.dense_shape)
def sparse_reorder(sp_input, name=None):
"""Reorders a `SparseTensor` into the canonical, row-major ordering.
Note that by convention, all sparse ops preserve the canonical ordering
along increasing dimension number. The only time ordering can be violated
is during manual manipulation of the indices and values to add entries.
Reordering does not affect the shape of the `SparseTensor`.
For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`:
[0, 3]: b
[0, 1]: a
[3, 1]: d
[2, 0]: c
then the output will be a `SparseTensor` of shape `[4, 5]` and
`indices` / `values`:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
Args:
sp_input: The input `SparseTensor`.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` with the same shape and non-empty values, but in
canonical ordering.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
reordered_ind, reordered_val = (gen_sparse_ops._sparse_reorder(
sp_input.indices, sp_input.values, sp_input.dense_shape, name=name))
return sparse_tensor.SparseTensor(reordered_ind, reordered_val,
array_ops.identity(sp_input.dense_shape))
def sparse_reshape(sp_input, shape, name=None):
"""Reshapes a `SparseTensor` to represent values in a new dense shape.
This operation has the same semantics as `reshape` on the represented dense
tensor. The indices of non-empty values in `sp_input` are recomputed based
on the new dense shape, and a new `SparseTensor` is returned containing the
new indices and new shape. The order of non-empty values in `sp_input` is
unchanged.
If one component of `shape` is the special value -1, the size of that
dimension is computed so that the total dense size remains constant. At
most one component of `shape` can be -1. The number of dense elements
implied by `shape` must be the same as the number of dense elements
originally represented by `sp_input`.
For example, if `sp_input` has shape `[2, 3, 6]` and `indices` / `values`:
[0, 0, 0]: a
[0, 0, 1]: b
[0, 1, 0]: c
[1, 0, 0]: d
[1, 2, 3]: e
and `shape` is `[9, -1]`, then the output will be a `SparseTensor` of
shape `[9, 4]` and `indices` / `values`:
[0, 0]: a
[0, 1]: b
[1, 2]: c
[4, 2]: d
[8, 1]: e
Args:
sp_input: The input `SparseTensor`.
shape: A 1-D (vector) int64 `Tensor` specifying the new dense shape of the
represented `SparseTensor`.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` with the same non-empty values but with indices calculated
by the new dense shape.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
with ops.name_scope(name, "SparseReshape", [sp_input]) as name:
reshaped_ind, reshaped_shape = gen_sparse_ops._sparse_reshape(
sp_input.indices, sp_input.dense_shape, shape, name=name)
return sparse_tensor.SparseTensor(
reshaped_ind, array_ops.identity(sp_input.values),
reshaped_shape)
# TODO(aselle): Remove keyword required once for 1.0 final
class KeywordRequired(object):
def __repr__(self):
# This is needed to make documentation without fully qualified module paths
return "KeywordRequired()"
def sparse_split(keyword_required=KeywordRequired(),
sp_input=None, num_split=None, axis=None,
name=None, split_dim=None):
"""Split a `SparseTensor` into `num_split` tensors along `axis`.
If the `sp_input.dense_shape[axis]` is not an integer multiple of `num_split`
each slice starting from 0:`shape[axis] % num_split` gets extra one
dimension. For example, if `axis = 1` and `num_split = 2` and the
input is:
input_tensor = shape = [2, 7]
[ a d e ]
[b c ]
Graphically the output tensors are:
output_tensor[0] =
[ a ]
[b c ]
output_tensor[1] =
[ d e ]
[ ]
Args:
keyword_required: Python 2 standin for * (temporary for argument reorder)
sp_input: The `SparseTensor` to split.
num_split: A Python integer. The number of ways to split.
axis: A 0-D `int32` `Tensor`. The dimension along which to split.
name: A name for the operation (optional).
split_dim: Deprecated old name for axis.
Returns:
`num_split` `SparseTensor` objects resulting from splitting `value`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
ValueError: If the deprecated `split_dim` and `axis` are both non None.
"""
if not isinstance(keyword_required, KeywordRequired):
raise ValueError("Keyword arguments are required for this function.")
if sp_input is None:
raise ValueError("sp_input is required")
if num_split is None:
raise ValueError("num_split is required")
if axis is None:
raise ValueError("axis is required")
axis = deprecation.deprecated_argument_lookup("axis", axis, "split_dim",
split_dim)
sp_input = _convert_to_sparse_tensor(sp_input)
output_inds, output_vals, output_shapes = (gen_sparse_ops._sparse_split(
axis,
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
num_split,
name=name))
sparse_tensors = []
for i in range(0, num_split):
sparse_tensors.append(
sparse_tensor.SparseTensor(
output_inds[i], output_vals[i], output_shapes[i]))
return sparse_tensors
def sparse_to_dense(sparse_indices,
output_shape,
sparse_values,
default_value=0,
validate_indices=True,
name=None):
"""Converts a sparse representation into a dense tensor.
Builds an array `dense` with shape `output_shape` such that
```python
# If sparse_indices is scalar
dense[i] = (i == sparse_indices ? sparse_values : default_value)
# If sparse_indices is a vector, then for each i
dense[sparse_indices[i]] = sparse_values[i]
# If sparse_indices is an n by d matrix, then for each i in [0, n)
dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
```
All other values in `dense` are set to `default_value`. If `sparse_values`
is a scalar, all sparse indices are set to this single value.
Indices should be sorted in lexicographic order, and indices must not
contain any repeats. If `validate_indices` is True, these properties
are checked during execution.
Args:
sparse_indices: A 0-D, 1-D, or 2-D `Tensor` of type `int32` or `int64`.
`sparse_indices[i]` contains the complete index where `sparse_values[i]`
will be placed.
output_shape: A 1-D `Tensor` of the same type as `sparse_indices`. Shape
of the dense output tensor.
sparse_values: A 0-D or 1-D `Tensor`. Values corresponding to each row of
`sparse_indices`, or a scalar value to be used for all sparse indices.
default_value: A 0-D `Tensor` of the same type as `sparse_values`. Value
to set for indices not specified in `sparse_indices`. Defaults to zero.
validate_indices: A boolean value. If True, indices are checked to make
sure they are sorted in lexicographic order and that there are no repeats.
name: A name for the operation (optional).
Returns:
Dense `Tensor` of shape `output_shape`. Has the same type as
`sparse_values`.
"""
return gen_sparse_ops._sparse_to_dense(
sparse_indices,
output_shape,
sparse_values,
default_value=default_value,
validate_indices=validate_indices,
name=name)
def sparse_reduce_sum(sp_input, axis=None, keep_dims=False,
reduction_axes=None):
"""Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`
instead of a sparse one.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
similar to the indexing rules in Python.
For example:
```python
# 'x' represents [[1, ?, 1]
# [?, 1, ?]]
# where ? is implicitly-zero.
tf.sparse_reduce_sum(x) ==> 3
tf.sparse_reduce_sum(x, 0) ==> [1, 1, 1]
tf.sparse_reduce_sum(x, 1) ==> [2, 1] # Can also use -1 as the axis.
tf.sparse_reduce_sum(x, 1, keep_dims=True) ==> [[2], [1]]
tf.sparse_reduce_sum(x, [0, 1]) ==> 3
```
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keep_dims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis.
Returns:
The reduced Tensor.
"""
return gen_sparse_ops.sparse_reduce_sum(
sp_input.indices, sp_input.values,
sp_input.dense_shape,
math_ops._ReductionDims(sp_input, axis, reduction_axes),
keep_dims)
def sparse_reduce_sum_sparse(sp_input, axis=None, keep_dims=False,
reduction_axes=None):
"""Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a
SparseTensor.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
which are interpreted according to the indexing rules in Python.
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keep_dims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis
Returns:
The reduced SparseTensor.
"""
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_reduce_sum_sparse(
sp_input.indices, sp_input.values,
sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis,
reduction_axes),
keep_dims))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
def sparse_tensor_to_dense(sp_input,
default_value=0,
validate_indices=True,
name=None):
"""Converts a `SparseTensor` into a dense tensor.
This op is a convenience wrapper around `sparse_to_dense` for `SparseTensor`s.
For example, if `sp_input` has shape `[3, 5]` and non-empty string values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
and `default_value` is `x`, then the output will be a dense `[3, 5]`
string tensor with values:
[[x a x b x]
[x x x x x]
[c x x x x]]
Indices must be without repeats. This is only
tested if validate_indices is True.
Args:
sp_input: The input `SparseTensor`.
default_value: Scalar value to set for indices not specified in
`sp_input`. Defaults to zero.
validate_indices: A boolean value. If `True`, indices are checked to make
sure they are sorted in lexicographic order and that there are no repeats.
name: A name prefix for the returned tensors (optional).
Returns:
A dense tensor with shape `sp_input.dense_shape` and values specified by
the non-empty values in `sp_input`. Indices not in `sp_input` are assigned
`default_value`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return sparse_to_dense(
sp_input.indices,
sp_input.dense_shape,
sp_input.values,
default_value=default_value,
validate_indices=validate_indices,
name=name)
def sparse_to_indicator(sp_input, vocab_size, name=None):
"""Converts a `SparseTensor` of ids into a dense bool indicator tensor.
The last dimension of `sp_input.indices` is discarded and replaced with
the values of `sp_input`. If `sp_input.dense_shape = [D0, D1, ..., Dn, K]`,
then `output.shape = [D0, D1, ..., Dn, vocab_size]`, where
output[d_0, d_1, ..., d_n, sp_input[d_0, d_1, ..., d_n, k]] = True
and False elsewhere in `output`.
For example, if `sp_input.dense_shape = [2, 3, 4]` with non-empty values:
[0, 0, 0]: 0
[0, 1, 0]: 10
[1, 0, 3]: 103
[1, 1, 2]: 150
[1, 1, 3]: 149
[1, 1, 4]: 150
[1, 2, 1]: 121
and `vocab_size = 200`, then the output will be a `[2, 3, 200]` dense bool
tensor with False everywhere except at positions
(0, 0, 0), (0, 1, 10), (1, 0, 103), (1, 1, 149), (1, 1, 150),
(1, 2, 121).
Note that repeats are allowed in the input SparseTensor.
This op is useful for converting `SparseTensor`s into dense formats for
compatibility with ops that expect dense tensors.
The input `SparseTensor` must be in row-major order.
Args:
sp_input: A `SparseTensor` with `values` property of type `int32` or
`int64`.
vocab_size: A scalar int64 Tensor (or Python int) containing the new size
of the last dimension, `all(0 <= sp_input.values < vocab_size)`.
name: A name prefix for the returned tensors (optional)
Returns:
A dense bool indicator tensor representing the indices with specified value.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
with ops.name_scope(name, "SparseToIndicator", [sp_input]) as name:
num_entries = array_ops.shape(sp_input.indices)[0]
new_values = array_ops.fill(array_ops.expand_dims(num_entries, 0), True)
sp_values = sparse_tensor.SparseTensor(
sp_input.indices, new_values, sp_input.dense_shape)
sp_new = sparse_merge(sp_input, sp_values, vocab_size, name)
# validate_indices may be False because we allow duplicates in new_indices:
# repeated indices are allowed when creating an indicator matrix.
return sparse_tensor_to_dense(
sp_new, default_value=False, validate_indices=False, name=name)
def sparse_merge(sp_ids, sp_values, vocab_size, name=None,
already_sorted=False):
"""Combines a batch of feature ids and values into a single `SparseTensor`.
The most common use case for this function occurs when feature ids and
their corresponding values are stored in `Example` protos on disk.
`parse_example` will return a batch of ids and a batch of values, and this
function joins them into a single logical `SparseTensor` for use in
functions such as `sparse_tensor_dense_matmul`, `sparse_to_dense`, etc.
The `SparseTensor` returned by this function has the following properties:
- `indices` is equivalent to `sp_ids.indices` with the last
dimension discarded and replaced with `sp_ids.values`.
- `values` is simply `sp_values.values`.
- If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then
`output.shape = [D0, D1, ..., Dn, vocab_size]`.
For example, consider the following feature vectors:
```python
vector1 = [-3, 0, 0, 0, 0, 0]
vector2 = [ 0, 1, 0, 4, 1, 0]
vector3 = [ 5, 0, 0, 9, 0, 0]
```
These might be stored sparsely in the following Example protos by storing
only the feature ids (column number if the vectors are treated as a matrix)
of the non-zero elements and the corresponding values:
```python
examples = [Example(features={
"ids": Feature(int64_list=Int64List(value=[0])),
"values": Feature(float_list=FloatList(value=[-3]))}),
Example(features={
"ids": Feature(int64_list=Int64List(value=[1, 4, 3])),
"values": Feature(float_list=FloatList(value=[1, 1, 4]))}),
Example(features={
"ids": Feature(int64_list=Int64List(value=[0, 3])),
"values": Feature(float_list=FloatList(value=[5, 9]))})]
```
The result of calling parse_example on these examples will produce a
dictionary with entries for "ids" and "values". Passing those two objects
to this function along with vocab_size=6, will produce a `SparseTensor` that
sparsely represents all three instances. Namely, the `indices` property will
contain the coordinates of the non-zero entries in the feature matrix (the
first dimension is the row number in the matrix, i.e., the index within the
batch, and the second dimension is the column number, i.e., the feature id);
`values` will contain the actual values. `shape` will be the shape of the
original matrix, i.e., (3, 6). For our example above, the output will be
equal to:
```python
SparseTensor(indices=[[0, 0], [1, 1], [1, 3], [1, 4], [2, 0], [2, 3]],
values=[-3, 1, 4, 1, 5, 9],
dense_shape=[3, 6])
```
This method generalizes to higher-dimensions by simply providing a list for
both the sp_ids as well as the vocab_size.
In this case the resulting `SparseTensor` has the following properties:
- `indices` is equivalent to `sp_ids[0].indices` with the last
dimension discarded and concatenated with
`sp_ids[0].values, sp_ids[1].values, ...`.
- `values` is simply `sp_values.values`.
- If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then
`output.shape = [D0, D1, ..., Dn] + vocab_size`.
Args:
sp_ids: A single `SparseTensor` with `values` property of type `int32`
or `int64` or a Python list of such `SparseTensor`s or a list thereof.
sp_values: A`SparseTensor` of any type.
vocab_size: A scalar `int64` Tensor (or Python int) containing the new size
of the last dimension, `all(0 <= sp_ids.values < vocab_size)`.
Or a list thereof with `all(0 <= sp_ids[i].values < vocab_size[i])` for
all `i`.
name: A name prefix for the returned tensors (optional)
already_sorted: A boolean to specify whether the per-batch values in
`sp_values` are already sorted. If so skip sorting, False by default
(optional).
Returns:
A `SparseTensor` compactly representing a batch of feature ids and values,
useful for passing to functions that expect such a `SparseTensor`.
Raises:
TypeError: If `sp_values` is not a `SparseTensor`. Or if `sp_ids` is neither
a `SparseTensor` nor a list thereof. Or if `vocab_size` is not a
`Tensor` or a Python int and `sp_ids` is a `SparseTensor`. Or if
`vocab_size` is not a or list thereof and `sp_ids` is a list.
ValueError: If `sp_ids` and `vocab_size` are lists of different lengths.
"""
if isinstance(sp_ids, sparse_tensor.SparseTensorValue) or isinstance(
sp_ids, sparse_tensor.SparseTensor):
sp_ids = [sp_ids]
if not (isinstance(vocab_size, ops.Tensor) or
isinstance(vocab_size, numbers.Integral)):
raise TypeError("vocab_size has to be a Tensor or Python int. Found %s" %
type(vocab_size))
vocab_size = [vocab_size]
else:
if not isinstance(sp_ids, collections.Iterable):
raise TypeError("sp_ids has to be a SparseTensor or list thereof. "
"Found %s" % type(sp_ids))
if not isinstance(vocab_size, collections.Iterable):
raise TypeError("vocab_size has to be a list of Tensors or Python ints. "
"Found %s" % type(vocab_size))
for dim in vocab_size:
if not (isinstance(dim, ops.Tensor) or
isinstance(dim, numbers.Integral)):
raise TypeError(
"vocab_size has to be a list of Tensors or Python ints. Found %s" %
type(dim))
if len(sp_ids) != len(vocab_size):
raise ValueError("sp_ids and vocab_size have to have equal lengths.")
with ops.name_scope(name, "SparseMerge", [sp_ids, sp_values]):
sp_ids = [_convert_to_sparse_tensor(sp_ids_dim) for sp_ids_dim in sp_ids]
sp_values = _convert_to_sparse_tensor(sp_values)
ids = []
for sp_ids_dim in sp_ids:
ids_dim = sp_ids_dim.values
if sp_ids_dim.dtype != dtypes.int64:
ids_dim = math_ops.cast(ids_dim, dtypes.int64)
ids += [array_ops.expand_dims(ids_dim, axis=1)]
vocab_size = [math_ops.cast(x, dtypes.int64) for x in vocab_size]
# Slice off the last dimension of indices, then tack on the ids
indices_columns_to_preserve = sp_ids[0].indices[:, :-1]
new_indices = array_ops.concat([indices_columns_to_preserve] + ids, 1)
new_values = sp_values.values
new_shape = array_ops.concat([sp_ids[0].dense_shape[:-1], vocab_size], 0)
result = sparse_tensor.SparseTensor(new_indices, new_values, new_shape)
return result if already_sorted else sparse_reorder(result)
def sparse_retain(sp_input, to_retain):
"""Retains specified non-empty values within a `SparseTensor`.
For example, if `sp_input` has shape `[4, 5]` and 4 non-empty string values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
and `to_retain = [True, False, False, True]`, then the output will
be a `SparseTensor` of shape `[4, 5]` with 2 non-empty values:
[0, 1]: a
[3, 1]: d
Args:
sp_input: The input `SparseTensor` with `N` non-empty elements.
to_retain: A bool vector of length `N` with `M` true values.
Returns:
A `SparseTensor` with the same shape as the input and `M` non-empty
elements corresponding to the true positions in `to_retain`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
to_retain = ops.convert_to_tensor(to_retain)
# Shape checking, if shape is known at graph construction time
retain_shape = to_retain.get_shape()
retain_shape.assert_has_rank(1)
sp_input.values.get_shape()[0].merge_with(retain_shape[0])
where_true = array_ops.reshape(array_ops.where(to_retain), [-1])
new_indices = array_ops.gather(sp_input.indices, where_true)
new_values = array_ops.gather(sp_input.values, where_true)
return sparse_tensor.SparseTensor(new_indices, new_values,
array_ops.identity(sp_input.dense_shape))
def sparse_reset_shape(sp_input, new_shape=None):
"""Resets the shape of a `SparseTensor` with indices and values unchanged.
If `new_shape` is None, returns a copy of `sp_input` with its shape reset
to the tight bounding box of `sp_input`.
If `new_shape` is provided, then it must be larger or equal in all dimensions
compared to the shape of `sp_input`. When this condition is met, the returned
SparseTensor will have its shape reset to `new_shape` and its indices and
values unchanged from that of `sp_input.`
For example:
Consider a `sp_input` with shape [2, 3, 5]:
[0, 0, 1]: a
[0, 1, 0]: b
[0, 2, 2]: c
[1, 0, 3]: d
- It is an error to set `new_shape` as [3, 7] since this represents a
rank-2 tensor while `sp_input` is rank-3. This is either a ValueError
during graph construction (if both shapes are known) or an OpError during
run time.
- Setting `new_shape` as [2, 3, 6] will be fine as this shape is larger or
equal in every dimension compared to the original shape [2, 3, 5].
- On the other hand, setting new_shape as [2, 3, 4] is also an error: The
third dimension is smaller than the original shape [2, 3, 5] (and an
`InvalidArgumentError` will be raised).
- If `new_shape` is None, the returned SparseTensor will have a shape
[2, 3, 4], which is the tight bounding box of `sp_input`.
Args:
sp_input: The input `SparseTensor`.
new_shape: None or a vector representing the new shape for the returned
`SparseTensor`.
Returns:
A `SparseTensor` indices and values unchanged from `input_sp`. Its shape is
`new_shape` if that is set. Otherwise it is the tight bounding box of
`input_sp`
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
ValueError: If `new_shape` represents a tensor with a different rank from
that of `sp_input` (if shapes are known when graph is constructed).
OpError:
- If `new_shape` has dimension sizes that are too small.
- If shapes are not known during graph construction time, and during run
time it is found out that the ranks do not match.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
in_indices = array_ops.identity(sp_input.indices)
in_values = array_ops.identity(sp_input.values)
in_shape = array_ops.identity(sp_input.dense_shape)
if new_shape is None:
dim_low_bound = math_ops.reduce_max(in_indices, 0)
output_shape_tensor = math_ops.add(dim_low_bound,
array_ops.ones_like(in_shape))
else:
output_shape_tensor = ops.convert_to_tensor(new_shape)
output_shape_tensor.get_shape().assert_has_rank(1)
output_shape_tensor = math_ops.cast(output_shape_tensor, dtypes.int64)
# For cases when shape is known during graph construction, this catches the
# error before the sparse_tensor.SparseTensor catches it.
output_shape_tensor.get_shape()[0].merge_with(in_shape.get_shape()[0])
# For cases where shape is not known during graph construction.
output_shape_tensor = control_flow_ops.with_dependencies(
[check_ops.assert_equal(
array_ops.shape(in_shape), array_ops.shape(output_shape_tensor))],
output_shape_tensor)
output_shape_tensor = control_flow_ops.with_dependencies(
[check_ops.assert_less_equal(in_shape, output_shape_tensor)],
output_shape_tensor)
return sparse_tensor.SparseTensor(in_indices, in_values, output_shape_tensor)
def sparse_fill_empty_rows(sp_input, default_value, name=None):
"""Fills empty rows in the input 2-D `SparseTensor` with a default value.
This op adds entries with the specified `default_value` at index
`[row, 0]` for any row in the input that does not already have a value.
For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
[0, 1]: a
[0, 3]: b
[1, 0]: default_value
[2, 0]: c
[3, 1]: d
[4, 0]: default_value
Note that the input may have empty columns at the end, with no effect on
this op.
The output `SparseTensor` will be in row-major order and will have the
same shape as the input.
This op also returns an indicator vector such that
empty_row_indicator[i] = True iff row i was an empty row.
Args:
sp_input: A `SparseTensor` with shape `[N, M]`.
default_value: The value to fill for empty rows, with the same type as
`sp_input.`
name: A name prefix for the returned tensors (optional)
Returns:
sp_ordered_output: A `SparseTensor` with shape `[N, M]`, and with all empty
rows filled in with `default_value`.
empty_row_indicator: A bool vector of length `N` indicating whether each
input row was empty.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
with ops.name_scope(name, "SparseFillEmptyRows", [sp_input]):
default_value = ops.convert_to_tensor(
default_value, dtype=sp_input.values.dtype)
num_rows = math_ops.cast(sp_input.dense_shape[0], dtypes.int32)
all_row_indices = math_ops.cast(math_ops.range(num_rows), dtypes.int64)
empty_row_indices, _ = array_ops.setdiff1d(all_row_indices,
sp_input.indices[:, 0])
empty_row_indicator = sparse_to_dense(
empty_row_indices,
array_ops.expand_dims(sp_input.dense_shape[0], -1), True,
False)
empty_row_indices_as_column = array_ops.reshape(empty_row_indices, [-1, 1])
additional_indices = array_ops.concat([
empty_row_indices_as_column,
array_ops.zeros_like(empty_row_indices_as_column)
], 1)
additional_values = array_ops.fill(
array_ops.shape(empty_row_indices), default_value)
all_indices_unordered = array_ops.concat(
[sp_input.indices, additional_indices], 0)
all_values_unordered = array_ops.concat(
[sp_input.values, additional_values], 0)
sp_unordered_output = sparse_tensor.SparseTensor(
all_indices_unordered,
all_values_unordered, sp_input.dense_shape)
sp_ordered_output = sparse_reorder(sp_unordered_output)
return sp_ordered_output, empty_row_indicator
def serialize_sparse(sp_input, name=None):
"""Serialize a `SparseTensor` into a string 3-vector (1-D `Tensor`) object.
Args:
sp_input: The input `SparseTensor`.
name: A name prefix for the returned tensors (optional).
Returns:
A string 3-vector (1D `Tensor`), with each column representing the
serialized `SparseTensor`'s indices, values, and shape (respectively).
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops._serialize_sparse(
sp_input.indices, sp_input.values, sp_input.dense_shape, name=name)
def serialize_many_sparse(sp_input, name=None):
"""Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` string `Tensor`.
The `SparseTensor` must have rank `R` greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The serialized
`SparseTensor` objects going into each row of the output `Tensor` will have
rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
Args:
sp_input: The input rank `R` `SparseTensor`.
name: A name prefix for the returned tensors (optional).
Returns:
A string matrix (2-D `Tensor`) with `N` rows and `3` columns.
Each column represents serialized `SparseTensor`'s indices, values, and
shape (respectively).
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops._serialize_many_sparse(
sp_input.indices, sp_input.values, sp_input.dense_shape, name=name)
def deserialize_many_sparse(serialized_sparse, dtype, rank=None, name=None):
"""Deserialize and concatenate `SparseTensors` from a serialized minibatch.
The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`serialize_sparse`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects (they have been
concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `sparse_reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
serialized_sparse: 2-D `Tensor` of type `string` of shape `[N, 3]`.
The serialized and packed `SparseTensor` objects.
dtype: The `dtype` of the serialized `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` representing the deserialized `SparseTensor`s,
concatenated along the `SparseTensor`s' first dimension.
All of the serialized `SparseTensor`s must have had the same rank and type.
"""
output_indices, output_values, output_shape = (
gen_sparse_ops._deserialize_many_sparse(
serialized_sparse, dtype, name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
def sparse_tensor_dense_matmul(sp_a,
b,
adjoint_a=False,
adjoint_b=False,
name=None):
# pylint: disable=line-too-long
"""Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
No validity checking is performed on the indices of A. However, the following
input format is recommended for optimal behavior:
if adjoint_a == false:
A should be sorted in lexicographically increasing order. Use
sparse_reorder if you're not sure.
if adjoint_a == true:
A should be sorted in order of increasing dimension 1 (i.e., "column major"
order instead of "row major" order).
Deciding when to use sparse_tensor_dense_matmul vs. matmul(sp_a=True):
There are a number of questions to ask in the decision process, including:
* Will the SparseTensor A fit in memory if densified?
* Is the column count of the product large (>> 1)?
* Is the density of A larger than approximately 15%?
If the answer to several of these questions is yes, consider
converting the `SparseTensor` to a dense one and using `tf.matmul` with
`sp_a=True`.
This operation tends to perform well when A is more sparse, if the column size
of the product is small (e.g. matrix-vector multiplication), if
`sp_a.dense_shape` takes on large values.
Below is a rough speed comparison between sparse_tensor_dense_matmul,
labelled 'sparse', and matmul(sp_a=True), labelled 'dense'. For purposes of
the comparison, the time spent converting from a SparseTensor to a dense
Tensor is not included, so it is overly conservative with respect to
the time ratio.
Benchmark system:
CPU: Intel Ivybridge with HyperThreading (6 cores) dL1:32KB dL2:256KB dL3:12MB
GPU: NVidia Tesla k40c
Compiled with:
`-c opt --config=cuda --copt=-mavx`
```
tensorflow/python/sparse_tensor_dense_matmul_op_test --benchmarks
A sparse [m, k] with % nonzero values between 1% and 80%
B dense [k, n]
% nnz n gpu m k dt(dense) dt(sparse) dt(sparse)/dt(dense)
0.01 1 True 100 100 0.000221166 0.00010154 0.459112
0.01 1 True 100 1000 0.00033858 0.000109275 0.322745
0.01 1 True 1000 100 0.000310557 9.85661e-05 0.317385
0.01 1 True 1000 1000 0.0008721 0.000100875 0.115669
0.01 1 False 100 100 0.000208085 0.000107603 0.51711
0.01 1 False 100 1000 0.000327112 9.51118e-05 0.290762
0.01 1 False 1000 100 0.000308222 0.00010345 0.335635
0.01 1 False 1000 1000 0.000865721 0.000101397 0.117124
0.01 10 True 100 100 0.000218522 0.000105537 0.482958
0.01 10 True 100 1000 0.000340882 0.000111641 0.327506
0.01 10 True 1000 100 0.000315472 0.000117376 0.372064
0.01 10 True 1000 1000 0.000905493 0.000123263 0.136128
0.01 10 False 100 100 0.000221529 9.82571e-05 0.44354
0.01 10 False 100 1000 0.000330552 0.000112615 0.340687
0.01 10 False 1000 100 0.000341277 0.000114097 0.334324
0.01 10 False 1000 1000 0.000819944 0.000120982 0.147549
0.01 25 True 100 100 0.000207806 0.000105977 0.509981
0.01 25 True 100 1000 0.000322879 0.00012921 0.400181
0.01 25 True 1000 100 0.00038262 0.00014158 0.370035
0.01 25 True 1000 1000 0.000865438 0.000202083 0.233504
0.01 25 False 100 100 0.000209401 0.000104696 0.499979
0.01 25 False 100 1000 0.000321161 0.000130737 0.407076
0.01 25 False 1000 100 0.000377012 0.000136801 0.362856
0.01 25 False 1000 1000 0.000861125 0.00020272 0.235413
0.2 1 True 100 100 0.000206952 9.69219e-05 0.46833
0.2 1 True 100 1000 0.000348674 0.000147475 0.422959
0.2 1 True 1000 100 0.000336908 0.00010122 0.300439
0.2 1 True 1000 1000 0.001022 0.000203274 0.198898
0.2 1 False 100 100 0.000207532 9.5412e-05 0.459746
0.2 1 False 100 1000 0.000356127 0.000146824 0.41228
0.2 1 False 1000 100 0.000322664 0.000100918 0.312764
0.2 1 False 1000 1000 0.000998987 0.000203442 0.203648
0.2 10 True 100 100 0.000211692 0.000109903 0.519165
0.2 10 True 100 1000 0.000372819 0.000164321 0.440753
0.2 10 True 1000 100 0.000338651 0.000144806 0.427596
0.2 10 True 1000 1000 0.00108312 0.000758876 0.70064
0.2 10 False 100 100 0.000215727 0.000110502 0.512231
0.2 10 False 100 1000 0.000375419 0.0001613 0.429653
0.2 10 False 1000 100 0.000336999 0.000145628 0.432132
0.2 10 False 1000 1000 0.00110502 0.000762043 0.689618
0.2 25 True 100 100 0.000218705 0.000129913 0.594009
0.2 25 True 100 1000 0.000394794 0.00029428 0.745402
0.2 25 True 1000 100 0.000404483 0.0002693 0.665788
0.2 25 True 1000 1000 0.0012002 0.00194494 1.62052
0.2 25 False 100 100 0.000221494 0.0001306 0.589632
0.2 25 False 100 1000 0.000396436 0.000297204 0.74969
0.2 25 False 1000 100 0.000409346 0.000270068 0.659754
0.2 25 False 1000 1000 0.00121051 0.00193737 1.60046
0.5 1 True 100 100 0.000214981 9.82111e-05 0.456836
0.5 1 True 100 1000 0.000415328 0.000223073 0.537101
0.5 1 True 1000 100 0.000358324 0.00011269 0.314492
0.5 1 True 1000 1000 0.00137612 0.000437401 0.317851
0.5 1 False 100 100 0.000224196 0.000101423 0.452386
0.5 1 False 100 1000 0.000400987 0.000223286 0.556841
0.5 1 False 1000 100 0.000368825 0.00011224 0.304318
0.5 1 False 1000 1000 0.00136036 0.000429369 0.31563
0.5 10 True 100 100 0.000222125 0.000112308 0.505608
0.5 10 True 100 1000 0.000461088 0.00032357 0.701753
0.5 10 True 1000 100 0.000394624 0.000225497 0.571422
0.5 10 True 1000 1000 0.00158027 0.00190898 1.20801
0.5 10 False 100 100 0.000232083 0.000114978 0.495418
0.5 10 False 100 1000 0.000454574 0.000324632 0.714146
0.5 10 False 1000 100 0.000379097 0.000227768 0.600817
0.5 10 False 1000 1000 0.00160292 0.00190168 1.18638
0.5 25 True 100 100 0.00023429 0.000151703 0.647501
0.5 25 True 100 1000 0.000497462 0.000598873 1.20386
0.5 25 True 1000 100 0.000460778 0.000557038 1.20891
0.5 25 True 1000 1000 0.00170036 0.00467336 2.74845
0.5 25 False 100 100 0.000228981 0.000155334 0.678371
0.5 25 False 100 1000 0.000496139 0.000620789 1.25124
0.5 25 False 1000 100 0.00045473 0.000551528 1.21287
0.5 25 False 1000 1000 0.00171793 0.00467152 2.71927
0.8 1 True 100 100 0.000222037 0.000105301 0.47425
0.8 1 True 100 1000 0.000410804 0.000329327 0.801664
0.8 1 True 1000 100 0.000349735 0.000131225 0.375212
0.8 1 True 1000 1000 0.00139219 0.000677065 0.48633
0.8 1 False 100 100 0.000214079 0.000107486 0.502085
0.8 1 False 100 1000 0.000413746 0.000323244 0.781261
0.8 1 False 1000 100 0.000348983 0.000131983 0.378193
0.8 1 False 1000 1000 0.00136296 0.000685325 0.50282
0.8 10 True 100 100 0.000229159 0.00011825 0.516017
0.8 10 True 100 1000 0.000498845 0.000532618 1.0677
0.8 10 True 1000 100 0.000383126 0.00029935 0.781336
0.8 10 True 1000 1000 0.00162866 0.00307312 1.88689
0.8 10 False 100 100 0.000230783 0.000124958 0.541452
0.8 10 False 100 1000 0.000493393 0.000550654 1.11606
0.8 10 False 1000 100 0.000377167 0.000298581 0.791642
0.8 10 False 1000 1000 0.00165795 0.00305103 1.84024
0.8 25 True 100 100 0.000233496 0.000175241 0.75051
0.8 25 True 100 1000 0.00055654 0.00102658 1.84458
0.8 25 True 1000 100 0.000463814 0.000783267 1.68875
0.8 25 True 1000 1000 0.00186905 0.00755344 4.04132
0.8 25 False 100 100 0.000240243 0.000175047 0.728625
0.8 25 False 100 1000 0.000578102 0.00104499 1.80763
0.8 25 False 1000 100 0.000485113 0.000776849 1.60138
0.8 25 False 1000 1000 0.00211448 0.00752736 3.55992
```
Args:
sp_a: SparseTensor A, of rank 2.
b: A dense Matrix with the same dtype as sp_a.
adjoint_a: Use the adjoint of A in the matrix multiply. If A is complex,
this is transpose(conj(A)). Otherwise it's transpose(A).
adjoint_b: Use the adjoint of B in the matrix multiply. If B is complex,
this is transpose(conj(B)). Otherwise it's transpose(B).
name: A name prefix for the returned tensors (optional)
Returns:
A dense matrix (pseudo-code in dense np.matrix notation):
A = A.H if adjoint_a else A
B = B.H if adjoint_b else B
return A*B
"""
# pylint: enable=line-too-long
sp_a = _convert_to_sparse_tensor(sp_a)
with ops.name_scope(name, "SparseTensorDenseMatMul",
[sp_a.indices, sp_a.values, b]) as name:
b = ops.convert_to_tensor(b, name="b")
return gen_sparse_ops._sparse_tensor_dense_mat_mul(
a_indices=sp_a.indices,
a_values=sp_a.values,
a_shape=sp_a.dense_shape,
b=b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
def sparse_softmax(sp_input, name=None):
"""Applies softmax to a batched N-D `SparseTensor`.
The inputs represent an N-D SparseTensor with logical shape `[..., B, C]`
(where `N >= 2`), and with indices sorted in the canonical lexicographic
order.
This op is equivalent to applying the normal `tf.nn.softmax()` to each
innermost logical submatrix with shape `[B, C]`, but with the catch that *the
implicitly zero elements do not participate*. Specifically, the algorithm is
equivalent to:
(1) Applies `tf.nn.softmax()` to a densified view of each innermost
submatrix with shape `[B, C]`, along the size-C dimension;
(2) Masks out the original implicitly-zero locations;
(3) Renormalizes the remaining elements.
Hence, the `SparseTensor` result has exactly the same non-zero indices and
shape.
Example:
```python
# First batch:
# [? e.]
# [1. ? ]
# Second batch:
# [e ? ]
# [e e ]
shape = [2, 2, 2] # 3-D SparseTensor
values = np.asarray([[[0., np.e], [1., 0.]], [[np.e, 0.], [np.e, np.e]]])
indices = np.vstack(np.where(values)).astype(np.int64).T
result = tf.sparse_softmax(tf.SparseTensor(indices, values, shape))
# ...returning a 3-D SparseTensor, equivalent to:
# [? 1.] [1 ?]
# [1. ? ] and [.5 .5]
# where ? means implicitly zero.
```
Args:
sp_input: N-D `SparseTensor`, where `N >= 2`.
name: optional name of the operation.
Returns:
output: N-D `SparseTensor` representing the results.
"""
with ops.name_scope(name, "SparseSoftmax",
[sp_input.indices, sp_input.values]) as name:
out_vals = gen_sparse_ops.sparse_softmax(sp_input.indices, sp_input.values,
sp_input.dense_shape)
return sparse_tensor.SparseTensor(
sp_input.indices, out_vals, sp_input.dense_shape)
def sparse_maximum(sp_a, sp_b, name=None):
"""Returns the element-wise max of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
Example:
```python
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])
res = tf.sparse_maximum(sp_zero, sp_one).eval()
# "res" should be equal to SparseTensor([[0], [1]], [0, 1], [7]).
```
Args:
sp_a: a `SparseTensor` operand whose dtype is real, and indices
lexicographically ordered.
sp_b: the other `SparseTensor` operand with the same requirements (and the
same shape).
name: optional name of the operation.
Returns:
output: the output SparseTensor.
"""
with ops.name_scope(name, "SparseSparseMaximum", [sp_a.indices, sp_a.values,
sp_b.indices,
sp_b.values]) as name:
out_indices, out_values = gen_sparse_ops.sparse_sparse_maximum(
sp_a.indices,
sp_a.values,
sp_a.dense_shape,
sp_b.indices,
sp_b.values,
sp_b.dense_shape,
name=name)
return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)
def sparse_minimum(sp_a, sp_b, name=None):
"""Returns the element-wise min of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
Example:
```python
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])
res = tf.sparse_minimum(sp_zero, sp_one).eval()
# "res" should be equal to SparseTensor([[0], [1]], [0, 0], [7]).
```
Args:
sp_a: a `SparseTensor` operand whose dtype is real, and indices
lexicographically ordered.
sp_b: the other `SparseTensor` operand with the same requirements (and the
same shape).
name: optional name of the operation.
Returns:
output: the output SparseTensor.
"""
with ops.name_scope(name, "SparseSparseMinimum", [sp_a.indices, sp_a.values,
sp_b.indices,
sp_b.values]) as name:
out_indices, out_values = gen_sparse_ops.sparse_sparse_minimum(
sp_a.indices,
sp_a.values,
sp_a.dense_shape,
sp_b.indices,
sp_b.values,
sp_b.dense_shape,
name=name)
return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)
def sparse_transpose(sp_input, perm=None, name=None):
"""Transposes a `SparseTensor`
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors.
For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`:
[0, 3]: b
[0, 1]: a
[3, 1]: d
[2, 0]: c
then the output will be a `SparseTensor` of shape `[5, 4]` and
`indices` / `values`:
[0, 2]: c
[1, 0]: a
[1, 3]: d
[3, 0]: b
Args:
sp_input: The input `SparseTensor`.
perm: A permutation of the dimensions of `sp_input`.
name: A name prefix for the returned tensors (optional)
Returns:
A transposed `SparseTensor`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
with ops.op_scope([sp_input], name, "SparseTranspose") as name:
if perm is None:
rank = array_ops.rank(sp_input)
perm = (rank - 1) - math_ops.range(0, rank, 1)
indices = sp_input.indices
transposed_indices = array_ops.transpose(
array_ops.gather(array_ops.transpose(indices), perm))
dense_shape = sp_input.dense_shape
transposed_dense_shape = array_ops.gather(dense_shape, perm)
transposed_st = sparse_tensor.SparseTensor(
transposed_indices, sp_input.values,
transposed_dense_shape)
transposed_st = sparse_reorder(transposed_st)
return transposed_st
def _add_sparse_to_tensors_map(sp_input, container=None,
shared_name=None, name=None):
"""Add a `SparseTensor` to a `SparseTensorsMap` and return its handle.
Args:
sp_input: The input `SparseTensor`.
container: The container for the underlying `SparseTensorsMap` (optional).
shared_name: The shared name for the underlying `SparseTensorsMap`
(optional, defaults to the name of the newly created op).
name: A name prefix for the returned tensors (optional).
Returns:
A string 1-vector (1D `Tensor`), with the single element representing the
a unique handle to a `SparseTensor` stored by the `SparseTensorMap`
underlying this op.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops._add_sparse_to_tensors_map(
sp_input.indices, sp_input.values, sp_input.dense_shape,
container=container, shared_name=shared_name, name=name)
def _add_many_sparse_to_tensors_map(sp_input, container=None,
shared_name=None, name=None):
"""Add a minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.
The `SparseTensor` must have rank `R` greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The serialized
`SparseTensor` objects going into each row of the output `Tensor` will have
rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
Args:
sp_input: The input rank `R` `SparseTensor`.
container: The container for the underlying `SparseTensorsMap` (optional).
shared_name: The shared name for the underlying `SparseTensorsMap`
(optional, defaults to the name of the newly created op).
name: A name prefix for the returned tensors (optional).
Returns:
A string matrix (2-D `Tensor`) with `N` rows and `1` column.
Each row represents a unique handle to a `SparseTensor` stored by
the `SparseTensorMap` underlying this op.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops._add_many_sparse_to_tensors_map(
sp_input.indices, sp_input.values, sp_input.dense_shape,
container=container, shared_name=shared_name, name=name)
def _take_many_sparse_from_tensors_map(
sparse_map_op, sparse_handles, rank=None, name=None):
"""Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
The input `sparse_handles` must be a string matrix of shape `[N, 1]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`add_sparse_to_tensors_map`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects (they have been
concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `sparse_reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
sparse_map_op: The `Operation` that created the original handles.
Usually this is, e.g., `add_sparse_to_tensors_map(...).op`.
sparse_handles: 2-D `Tensor` of type `string` of shape `[N, 1]`.
The serialized and packed `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` representing the deserialized `SparseTensor`s,
concatenated along the `SparseTensor`s' first dimension.
All of the serialized `SparseTensor`s must have had the same rank and type.
"""
if not isinstance(sparse_map_op, ops.Operation):
raise TypeError("sparse_map_op be an Operation")
if sparse_map_op.type not in ("AddSparseToTensorsMap",
"AddManySparseToTensorsMap"):
raise TypeError("sparse_map_op must be one of AddSparseToTensorsMap or "
"AddSparseToTensorsMap. Instead, found `%s`." %
sparse_map_op.type)
with ops.colocate_with(sparse_map_op):
shared_name = sparse_map_op.get_attr("shared_name") or sparse_map_op.name
output_indices, output_values, output_shape = (
gen_sparse_ops._take_many_sparse_from_tensors_map(
sparse_handles, dtype=sparse_map_op.get_attr("T"),
container=sparse_map_op.get_attr("container"),
shared_name=shared_name, name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
| apache-2.0 |
AuyaJackie/odoo | addons/account_asset/wizard/wizard_asset_compute.py | 382 | 2518 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class asset_depreciation_confirmation_wizard(osv.osv_memory):
_name = "asset.depreciation.confirmation.wizard"
_description = "asset.depreciation.confirmation.wizard"
_columns = {
'period_id': fields.many2one('account.period', 'Period', required=True, help="Choose the period for which you want to automatically post the depreciation lines of running assets"),
}
def _get_period(self, cr, uid, context=None):
periods = self.pool.get('account.period').find(cr, uid, context=context)
if periods:
return periods[0]
return False
_defaults = {
'period_id': _get_period,
}
def asset_compute(self, cr, uid, ids, context):
ass_obj = self.pool.get('account.asset.asset')
asset_ids = ass_obj.search(cr, uid, [('state','=','open')], context=context)
data = self.browse(cr, uid, ids, context=context)
period_id = data[0].period_id.id
created_move_ids = ass_obj._compute_entries(cr, uid, asset_ids, period_id, context=context)
return {
'name': _('Created Asset Moves'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.move',
'view_id': False,
'domain': "[('id','in',["+','.join(map(str,created_move_ids))+"])]",
'type': 'ir.actions.act_window',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
martynovp/edx-platform | lms/djangoapps/django_comment_client/tests/test_middleware.py | 134 | 1878 | import django.http
from django.test import TestCase
from nose.plugins.attrib import attr
import json
import lms.lib.comment_client
import django_comment_client.middleware as middleware
@attr('shard_1')
class AjaxExceptionTestCase(TestCase):
def setUp(self):
super(AjaxExceptionTestCase, self).setUp()
self.a = middleware.AjaxExceptionMiddleware()
self.request1 = django.http.HttpRequest()
self.request0 = django.http.HttpRequest()
self.exception1 = lms.lib.comment_client.CommentClientRequestError('{}', 401)
self.exception2 = lms.lib.comment_client.CommentClientRequestError('Foo!', 404)
self.exception0 = lms.lib.comment_client.CommentClient500Error("Holy crap the server broke!")
self.request1.META['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
self.request0.META['HTTP_X_REQUESTED_WITH'] = "SHADOWFAX"
def test_process_exception(self):
response1 = self.a.process_exception(self.request1, self.exception1)
self.assertIsInstance(response1, middleware.JsonError)
self.assertEqual(self.exception1.status_code, response1.status_code)
self.assertEqual(
{"errors": json.loads(self.exception1.message)},
json.loads(response1.content)
)
response2 = self.a.process_exception(self.request1, self.exception2)
self.assertIsInstance(response2, middleware.JsonError)
self.assertEqual(self.exception2.status_code, response2.status_code)
self.assertEqual(
{"errors": [self.exception2.message]},
json.loads(response2.content)
)
self.assertIsNone(self.a.process_exception(self.request1, self.exception0))
self.assertIsNone(self.a.process_exception(self.request0, self.exception1))
self.assertIsNone(self.a.process_exception(self.request0, self.exception0))
| agpl-3.0 |
darrowco/quality | node_modules/node-gyp/gyp/PRESUBMIT.py | 1369 | 3662 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for GYP.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
PYLINT_BLACKLIST = [
# TODO: fix me.
# From SCons, not done in google style.
'test/lib/TestCmd.py',
'test/lib/TestCommon.py',
'test/lib/TestGyp.py',
]
PYLINT_DISABLED_WARNINGS = [
# TODO: fix me.
# Many tests include modules they don't use.
'W0611',
# Possible unbalanced tuple unpacking with sequence.
'W0632',
# Attempting to unpack a non-sequence.
'W0633',
# Include order doesn't properly include local files?
'F0401',
# Some use of built-in names.
'W0622',
# Some unused variables.
'W0612',
# Operator not preceded/followed by space.
'C0323',
'C0322',
# Unnecessary semicolon.
'W0301',
# Unused argument.
'W0613',
# String has no effect (docstring in wrong place).
'W0105',
# map/filter on lambda could be replaced by comprehension.
'W0110',
# Use of eval.
'W0123',
# Comma not followed by space.
'C0324',
# Access to a protected member.
'W0212',
# Bad indent.
'W0311',
# Line too long.
'C0301',
# Undefined variable.
'E0602',
# Not exception type specified.
'W0702',
# No member of that name.
'E1101',
# Dangerous default {}.
'W0102',
# Cyclic import.
'R0401',
# Others, too many to sort.
'W0201', 'W0232', 'E1103', 'W0621', 'W0108', 'W0223', 'W0231',
'R0201', 'E0101', 'C0321',
# ************* Module copy
# W0104:427,12:_test.odict.__setitem__: Statement seems to have no effect
'W0104',
]
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
# Accept any year number from 2009 to the current year.
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2009, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + ')'
# The (c) is deprecated, but tolerate it until it's removed from all files.
license = (
r'.*? Copyright (\(c\) )?%(year)s Google Inc\. All rights reserved\.\n'
r'.*? Use of this source code is governed by a BSD-style license that '
r'can be\n'
r'.*? found in the LICENSE file\.\n'
) % {
'year': years_re,
}
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, license_header=license))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
'http://gyp-status.appspot.com/status',
'http://gyp-status.appspot.com/current'))
import os
import sys
old_sys_path = sys.path
try:
sys.path = ['pylib', 'test/lib'] + sys.path
blacklist = PYLINT_BLACKLIST
if sys.platform == 'win32':
blacklist = [os.path.normpath(x).replace('\\', '\\\\')
for x in PYLINT_BLACKLIST]
report.extend(input_api.canned_checks.RunPylint(
input_api,
output_api,
black_list=blacklist,
disabled_warnings=PYLINT_DISABLED_WARNINGS))
finally:
sys.path = old_sys_path
return report
TRYBOTS = [
'linux_try',
'mac_try',
'win_try',
]
def GetPreferredTryMasters(_, change):
return {
'client.gyp': { t: set(['defaulttests']) for t in TRYBOTS },
}
| mit |
MeteorAdminz/autopep8 | test/suite/utf-8.py | 55 | 2557 | # -*- coding: utf-8 -*-
class Rectangle(Blob):
def __init__(self, width, height,
color='black', emphasis=None, highlight=0):
if width == 0 and height == 0 and \
color == 'red' and emphasis == 'strong' or \
highlight > 100:
raise ValueError("sorry, you lose")
if width == 0 and height == 0 and (color == 'red' or
emphasis is None):
raise ValueError("I don't think so -- values are %s, %s" %
(width, height))
Blob.__init__(self, width, height,
color, emphasis, highlight)
# Some random text with multi-byte characters (utf-8 encoded)
#
# Εδώ μάτσο κειμένων τη, τρόπο πιθανό διευθυντές ώρα μη. Νέων απλό παράγει ροή
# κι, το επί δεδομένη καθορίζουν. Πάντως ζητήσεις περιβάλλοντος ένα με, τη
# ξέχασε αρπάζεις φαινόμενο όλη. Τρέξει εσφαλμένη χρησιμοποίησέ νέα τι. Θα όρο
# πετάνε φακέλους, άρα με διακοπής λαμβάνουν εφαμοργής. Λες κι μειώσει
# καθυστερεί.
# 79 narrow chars
# 01 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 [79]
# 78 narrow chars (Na) + 1 wide char (W)
# 01 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8情
# 3 narrow chars (Na) + 40 wide chars (W)
# 情 情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情
# 3 narrow chars (Na) + 76 wide chars (W)
# 情 情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情
#
#: E501
# 80 narrow chars (Na)
# 01 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 [80]
#
#: E501
# 78 narrow chars (Na) + 2 wide char (W)
# 01 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8情情
#
#: E501
# 3 narrow chars (Na) + 77 wide chars (W)
# 情 情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情
#
| mit |
guschmue/tensorflow | tensorflow/python/keras/_impl/keras/layers/core_test.py | 4 | 7222 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras core layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.keras._impl import keras
from tensorflow.python.keras._impl.keras import testing_utils
from tensorflow.python.ops import init_ops
from tensorflow.python.platform import test
class CoreLayersTest(test.TestCase):
def test_masking(self):
with self.test_session():
testing_utils.layer_test(
keras.layers.Masking, kwargs={}, input_shape=(3, 2, 3))
def test_dropout(self):
with self.test_session():
testing_utils.layer_test(
keras.layers.Dropout, kwargs={'rate': 0.5}, input_shape=(3, 2))
with self.test_session():
testing_utils.layer_test(
keras.layers.Dropout,
kwargs={'rate': 0.5,
'noise_shape': [3, 1]},
input_shape=(3, 2))
with self.test_session():
testing_utils.layer_test(
keras.layers.SpatialDropout1D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4))
with self.test_session():
testing_utils.layer_test(
keras.layers.SpatialDropout2D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4, 5))
with self.test_session():
testing_utils.layer_test(
keras.layers.SpatialDropout2D,
kwargs={'rate': 0.5, 'data_format': 'channels_first'},
input_shape=(2, 3, 4, 5))
with self.test_session():
testing_utils.layer_test(
keras.layers.SpatialDropout3D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4, 4, 5))
with self.test_session():
testing_utils.layer_test(
keras.layers.SpatialDropout3D,
kwargs={'rate': 0.5, 'data_format': 'channels_first'},
input_shape=(2, 3, 4, 4, 5))
def test_activation(self):
# with string argument
with self.test_session():
testing_utils.layer_test(
keras.layers.Activation,
kwargs={'activation': 'relu'},
input_shape=(3, 2))
# with function argument
with self.test_session():
testing_utils.layer_test(
keras.layers.Activation,
kwargs={'activation': keras.backend.relu},
input_shape=(3, 2))
def test_reshape(self):
with self.test_session():
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (8, 1)},
input_shape=(3, 2, 4))
with self.test_session():
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (-1, 1)},
input_shape=(3, 2, 4))
with self.test_session():
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (1, -1)},
input_shape=(3, 2, 4))
with self.test_session():
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (-1, 1)},
input_shape=(None, None, 2))
def test_permute(self):
with self.test_session():
testing_utils.layer_test(
keras.layers.Permute, kwargs={'dims': (2, 1)}, input_shape=(3, 2, 4))
def test_flatten(self):
with self.test_session():
testing_utils.layer_test(
keras.layers.Flatten, kwargs={}, input_shape=(3, 2, 4))
def test_repeat_vector(self):
with self.test_session():
testing_utils.layer_test(
keras.layers.RepeatVector, kwargs={'n': 3}, input_shape=(3, 2))
def test_lambda(self):
with self.test_session():
testing_utils.layer_test(
keras.layers.Lambda,
kwargs={'function': lambda x: x + 1},
input_shape=(3, 2))
with self.test_session():
testing_utils.layer_test(
keras.layers.Lambda,
kwargs={
'function': lambda x, a, b: x * a + b,
'arguments': {
'a': 0.6,
'b': 0.4
}
},
input_shape=(3, 2))
with self.test_session():
# test serialization with function
def f(x):
return x + 1
ld = keras.layers.Lambda(f)
config = ld.get_config()
ld = keras.layers.deserialize({
'class_name': 'Lambda',
'config': config
})
# test with lambda
ld = keras.layers.Lambda(
lambda x: keras.backend.concatenate([keras.backend.square(x), x]))
config = ld.get_config()
ld = keras.layers.Lambda.from_config(config)
def test_dense(self):
with self.test_session():
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 2))
with self.test_session():
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 4, 2))
with self.test_session():
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(None, None, 2))
with self.test_session():
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 4, 5, 2))
# Test regularization
with self.test_session():
layer = keras.layers.Dense(
3,
kernel_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l1',
activity_regularizer='l2',
name='dense_reg')
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(3, len(layer.losses))
# Test constraints
with self.test_session():
k_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = keras.layers.Dense(
3, kernel_constraint=k_constraint, bias_constraint=b_constraint)
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_eager_dense(self):
with context.eager_mode():
l = keras.layers.Dense(units=3,
kernel_initializer=init_ops.zeros_initializer())
self.assertAllEqual(l(constant_op.constant([[1.0]])), [[0., 0., 0.]])
def test_activity_regularization(self):
with self.test_session():
layer = keras.layers.ActivityRegularization(l1=0.1)
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(1, len(layer.losses))
_ = layer.get_config()
if __name__ == '__main__':
test.main()
| apache-2.0 |
helldorado/ansible | lib/ansible/plugins/strategy/host_pinned.py | 75 | 1963 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
strategy: host_pinned
short_description: Executes tasks on each host without interruption
description:
- Task execution is as fast as possible per host in batch as defined by C(serial) (default all).
Ansible will not start a play for a host unless the play can be finished without interruption by tasks for another host,
i.e. the number of hosts with an active play does not exceed the number of forks.
Ansible will not wait for other hosts to finish the current task before queuing the next task for a host that has finished.
Once a host is done with the play, it opens it's slot to a new host that was waiting to start.
Other than that, it behaves just like the "free" strategy.
version_added: "2.7"
author: Ansible Core Team
'''
from ansible.plugins.strategy.free import StrategyModule as FreeStrategyModule
from ansible.utils.display import Display
display = Display()
class StrategyModule(FreeStrategyModule):
def __init__(self, tqm):
super(StrategyModule, self).__init__(tqm)
self._host_pinned = True
| gpl-3.0 |
heri/openaccess | browser-ext/third_party/firefox-addon-sdk/python-lib/cuddlefish/tests/test_property_parser.py | 37 | 3063 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
from cuddlefish.property_parser import parse, MalformedLocaleFileError
class TestParser(unittest.TestCase):
def test_parse(self):
lines = [
# Comments are striped only if `#` is the first non-space character
"sharp=#can be in value",
"# comment",
"#key=value",
" # comment2",
"keyWithNoValue=",
"valueWithSpaces= ",
"valueWithMultilineSpaces= \\",
" \\",
" ",
# All spaces before/after are striped
" key = value ",
"key2=value2",
# Keys can contain '%'
"%s key=%s value",
# Accept empty lines
"",
" ",
# Multiline string must use backslash at end of lines
"multi=line\\", "value",
# With multiline string, left spaces are stripped ...
"some= spaces\\", " are\\ ", " stripped ",
# ... but not right spaces, except the last line!
"but=not \\", "all of \\", " them ",
# Explicit [other] plural definition
"explicitPlural[one] = one",
"explicitPlural[other] = other",
# Implicit [other] plural definition
"implicitPlural[one] = one",
"implicitPlural = other", # This key is the [other] one
]
# Ensure that all lines end with a `\n`
# And that strings are unicode ones (parser code relies on it)
lines = [unicode(l + "\n") for l in lines]
pairs = parse(lines)
expected = {
"sharp": "#can be in value",
"key": "value",
"key2": "value2",
"%s key": "%s value",
"keyWithNoValue": "",
"valueWithSpaces": "",
"valueWithMultilineSpaces": "",
"multi": "linevalue",
"some": "spacesarestripped",
"but": "not all of them",
"implicitPlural": {
"one": "one",
"other": "other"
},
"explicitPlural": {
"one": "one",
"other": "other"
},
}
self.assertEqual(pairs, expected)
def test_exceptions(self):
self.failUnlessRaises(MalformedLocaleFileError, parse,
["invalid line with no key value"])
self.failUnlessRaises(MalformedLocaleFileError, parse,
["plural[one]=plural with no [other] value"])
self.failUnlessRaises(MalformedLocaleFileError, parse,
["multiline with no last empty line=\\"])
self.failUnlessRaises(MalformedLocaleFileError, parse,
["=no key"])
self.failUnlessRaises(MalformedLocaleFileError, parse,
[" =only spaces in key"])
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
erebus/erebus | erebus/util/__init__.py | 1 | 2167 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of Erebus, a web dashboard for tor relays.
#
# :copyright: (c) 2015, The Tor Project, Inc.
# (c) 2015, Damian Johnson
# (c) 2015, Cristobal Leiva
#
# :license: See LICENSE for licensing information.
"""
General purpose utilities.
"""
import os
import sys
import stem.connection
import stem.util.conf
import stem.util.enum
import stem.util.log
BASE_DIR = os.path.sep.join(__file__.split(os.path.sep)[:-2])
DUAL_MODE, TESTING = False, False
TOR_CONTROLLER, LOG_HANDLER = None, None
try:
uses_settings = stem.util.conf.uses_settings(
'erebus', os.path.join(BASE_DIR, 'config'), lazy_load=False)
except IOError as exc:
print("Unable to load rwsd's internal configurations: %s" % exc)
sys.exit(1)
def tor_controller():
"""
Provides the TOR_CONTROLLER singleton.
:returns: :class:`~stem.control.Controller`
"""
return TOR_CONTROLLER
def init_tor_controller(*args, **kwargs):
"""
Initializes the tor controller instance. This is a passthrough for
Stem's :func:`~stem.connection.connect` function.
:returns: :class:`~stem.control.Controller`
"""
global TOR_CONTROLLER
TOR_CONTROLLER = stem.connection.connect(*args, **kwargs)
return TOR_CONTROLLER
@uses_settings
def msg(message, config, **attr):
"""
Provides the given message read from strings config file.
:param str message: message handle to log
:param dict attr: attributes to format the message with
:returns: **str** that was requested
"""
try:
return config.get('msg.%s' % message).format(**attr)
except:
msg = 'BUG: We attempted to use an undefined string \
resource (%s)' % message
if TESTING:
raise ValueError(msg)
stem.util.log.notice(msg)
return ''
def dual_mode():
"""
Whether to run erebus with server or client functionalities, or both.
:returns: **bool** answer
"""
return DUAL_MODE
def set_dual_mode():
"""
Sets DUAL_MODE to True.
"""
global DUAL_MODE
DUAL_MODE = True
| bsd-3-clause |
aYukiSekiguchi/ACCESS-Chromium | third_party/tlslite/tlslite/utils/OpenSSL_RSAKey.py | 359 | 5014 | """OpenSSL/M2Crypto RSA implementation."""
from cryptomath import *
from RSAKey import *
from Python_RSAKey import Python_RSAKey
#copied from M2Crypto.util.py, so when we load the local copy of m2
#we can still use it
def password_callback(v, prompt1='Enter private key passphrase:',
prompt2='Verify passphrase:'):
from getpass import getpass
while 1:
try:
p1=getpass(prompt1)
if v:
p2=getpass(prompt2)
if p1==p2:
break
else:
break
except KeyboardInterrupt:
return None
return p1
if m2cryptoLoaded:
class OpenSSL_RSAKey(RSAKey):
def __init__(self, n=0, e=0):
self.rsa = None
self._hasPrivateKey = False
if (n and not e) or (e and not n):
raise AssertionError()
if n and e:
self.rsa = m2.rsa_new()
m2.rsa_set_n(self.rsa, numberToMPI(n))
m2.rsa_set_e(self.rsa, numberToMPI(e))
def __del__(self):
if self.rsa:
m2.rsa_free(self.rsa)
def __getattr__(self, name):
if name == 'e':
if not self.rsa:
return 0
return mpiToNumber(m2.rsa_get_e(self.rsa))
elif name == 'n':
if not self.rsa:
return 0
return mpiToNumber(m2.rsa_get_n(self.rsa))
else:
raise AttributeError
def hasPrivateKey(self):
return self._hasPrivateKey
def hash(self):
return Python_RSAKey(self.n, self.e).hash()
def _rawPrivateKeyOp(self, m):
s = numberToString(m)
byteLength = numBytes(self.n)
if len(s)== byteLength:
pass
elif len(s) == byteLength-1:
s = '\0' + s
else:
raise AssertionError()
c = stringToNumber(m2.rsa_private_encrypt(self.rsa, s,
m2.no_padding))
return c
def _rawPublicKeyOp(self, c):
s = numberToString(c)
byteLength = numBytes(self.n)
if len(s)== byteLength:
pass
elif len(s) == byteLength-1:
s = '\0' + s
else:
raise AssertionError()
m = stringToNumber(m2.rsa_public_decrypt(self.rsa, s,
m2.no_padding))
return m
def acceptsPassword(self): return True
def write(self, password=None):
bio = m2.bio_new(m2.bio_s_mem())
if self._hasPrivateKey:
if password:
def f(v): return password
m2.rsa_write_key(self.rsa, bio, m2.des_ede_cbc(), f)
else:
def f(): pass
m2.rsa_write_key_no_cipher(self.rsa, bio, f)
else:
if password:
raise AssertionError()
m2.rsa_write_pub_key(self.rsa, bio)
s = m2.bio_read(bio, m2.bio_ctrl_pending(bio))
m2.bio_free(bio)
return s
def writeXMLPublicKey(self, indent=''):
return Python_RSAKey(self.n, self.e).write(indent)
def generate(bits):
key = OpenSSL_RSAKey()
def f():pass
key.rsa = m2.rsa_generate_key(bits, 3, f)
key._hasPrivateKey = True
return key
generate = staticmethod(generate)
def parse(s, passwordCallback=None):
if s.startswith("-----BEGIN "):
if passwordCallback==None:
callback = password_callback
else:
def f(v, prompt1=None, prompt2=None):
return passwordCallback()
callback = f
bio = m2.bio_new(m2.bio_s_mem())
try:
m2.bio_write(bio, s)
key = OpenSSL_RSAKey()
if s.startswith("-----BEGIN RSA PRIVATE KEY-----"):
def f():pass
key.rsa = m2.rsa_read_key(bio, callback)
if key.rsa == None:
raise SyntaxError()
key._hasPrivateKey = True
elif s.startswith("-----BEGIN PUBLIC KEY-----"):
key.rsa = m2.rsa_read_pub_key(bio)
if key.rsa == None:
raise SyntaxError()
key._hasPrivateKey = False
else:
raise SyntaxError()
return key
finally:
m2.bio_free(bio)
else:
raise SyntaxError()
parse = staticmethod(parse)
| bsd-3-clause |
wangjun/xunlei-lixian | lixian_commands/delete.py | 10 | 1239 |
from lixian_commands.util import *
from lixian_cli_parser import *
from lixian_config import get_config
from lixian_encoding import default_encoding
from lixian_colors import colors
import lixian_help
import lixian_query
@command_line_parser(help=lixian_help.delete)
@with_parser(parse_login)
@with_parser(parse_colors)
@with_parser(parse_logging)
@command_line_option('i')
@command_line_option('all')
@command_line_option('failed')
@command_line_value('limit', default=get_config('limit'))
@command_line_value('page-size', default=get_config('page-size'))
def delete_task(args):
client = create_client(args)
to_delete = lixian_query.search_tasks(client, args)
if not to_delete:
print 'Nothing to delete'
return
with colors(args.colors).red.bold():
print "Below files are going to be deleted:"
for x in to_delete:
print x['name'].encode(default_encoding)
if args.i:
yes_or_no = raw_input('Are your sure to delete them from Xunlei cloud? (y/n) ')
while yes_or_no.lower() not in ('y', 'yes', 'n', 'no'):
yes_or_no = raw_input('yes or no? ')
if yes_or_no.lower() in ('y', 'yes'):
pass
elif yes_or_no.lower() in ('n', 'no'):
print 'Deletion abort per user request.'
return
client.delete_tasks(to_delete)
| mit |
thunderace/newtifry | appengine/mako/ext/extract.py | 60 | 3882 | import re
from mako import compat
from mako import lexer
from mako import parsetree
class MessageExtractor(object):
def process_file(self, fileobj):
template_node = lexer.Lexer(
fileobj.read(),
input_encoding=self.config['encoding']).parse()
for extracted in self.extract_nodes(template_node.get_children()):
yield extracted
def extract_nodes(self, nodes):
translator_comments = []
in_translator_comments = False
comment_tags = list(
filter(None, re.split(r'\s+', self.config['comment-tags'])))
for node in nodes:
child_nodes = None
if in_translator_comments and \
isinstance(node, parsetree.Text) and \
not node.content.strip():
# Ignore whitespace within translator comments
continue
if isinstance(node, parsetree.Comment):
value = node.text.strip()
if in_translator_comments:
translator_comments.extend(
self._split_comment(node.lineno, value))
continue
for comment_tag in comment_tags:
if value.startswith(comment_tag):
in_translator_comments = True
translator_comments.extend(
self._split_comment(node.lineno, value))
continue
if isinstance(node, parsetree.DefTag):
code = node.function_decl.code
child_nodes = node.nodes
elif isinstance(node, parsetree.BlockTag):
code = node.body_decl.code
child_nodes = node.nodes
elif isinstance(node, parsetree.CallTag):
code = node.code.code
child_nodes = node.nodes
elif isinstance(node, parsetree.PageTag):
code = node.body_decl.code
elif isinstance(node, parsetree.CallNamespaceTag):
code = node.expression
child_nodes = node.nodes
elif isinstance(node, parsetree.ControlLine):
if node.isend:
in_translator_comments = False
continue
code = node.text
elif isinstance(node, parsetree.Code):
in_translator_comments = False
code = node.code.code
elif isinstance(node, parsetree.Expression):
code = node.code.code
else:
continue
# Comments don't apply unless they immediately preceed the message
if translator_comments and \
translator_comments[-1][0] < node.lineno - 1:
translator_comments = []
translator_strings = [
comment[1] for comment in translator_comments]
if isinstance(code, compat.text_type):
code = code.encode('ascii', 'backslashreplace')
used_translator_comments = False
code = compat.byte_buffer(code)
for message in self.process_python(
code, node.lineno, translator_strings):
yield message
used_translator_comments = True
if used_translator_comments:
translator_comments = []
in_translator_comments = False
if child_nodes:
for extracted in self.extract_nodes(child_nodes):
yield extracted
@staticmethod
def _split_comment(lineno, comment):
"""Return the multiline comment at lineno split into a list of
comment line numbers and the accompanying comment line"""
return [(lineno + index, line) for index, line in
enumerate(comment.splitlines())]
| apache-2.0 |
171121130/SWI | venv/Lib/site-packages/odf/element.py | 2 | 21290 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2007-2010 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
# Note: This script has copied a lot of text from xml.dom.minidom.
# Whatever license applies to that file also applies to this file.
#
import sys, os.path
sys.path.append(os.path.dirname(__file__))
import xml.dom
from xml.dom.minicompat import *
from odf.namespaces import nsdict
import odf.grammar as grammar
from odf.attrconverters import AttrConverters
if sys.version_info[0] == 3:
unicode=str # unicode function does not exist
# The following code is pasted form xml.sax.saxutils
# Tt makes it possible to run the code without the xml sax package installed
# To make it possible to have <rubbish> in your text elements, it is necessary to escape the texts
def _escape(data, entities={}):
""" Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("&", "&")
data = data.replace("<", "<")
data = data.replace(">", ">")
for chars, entity in entities.items():
data = data.replace(chars, entity)
return data
def _quoteattr(data, entities={}):
""" Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities['\n']=' '
entities['\r']=''
data = _escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
def _nssplit(qualifiedName):
""" Split a qualified name into namespace part and local part. """
fields = qualifiedName.split(':', 1)
if len(fields) == 2:
return fields
else:
return (None, fields[0])
def _nsassign(namespace):
return nsdict.setdefault(namespace,"ns" + str(len(nsdict)))
# Exceptions
class IllegalChild(Exception):
""" Complains if you add an element to a parent where it is not allowed """
class IllegalText(Exception):
""" Complains if you add text or cdata to an element where it is not allowed """
class Node(xml.dom.Node):
""" super class for more specific nodes """
parentNode = None
nextSibling = None
previousSibling = None
def hasChildNodes(self):
""" Tells whether this element has any children; text nodes,
subelements, whatever.
"""
if self.childNodes:
return True
else:
return False
def _get_childNodes(self):
return self.childNodes
def _get_firstChild(self):
if self.childNodes:
return self.childNodes[0]
def _get_lastChild(self):
if self.childNodes:
return self.childNodes[-1]
def insertBefore(self, newChild, refChild):
""" Inserts the node newChild before the existing child node refChild.
If refChild is null, insert newChild at the end of the list of children.
"""
if newChild.nodeType not in self._child_node_types:
raise IllegalChild( "%s cannot be child of %s" % (newChild.tagName, self.tagName))
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
if refChild is None:
self.appendChild(newChild)
else:
try:
index = self.childNodes.index(refChild)
except ValueError:
raise xml.dom.NotFoundErr()
self.childNodes.insert(index, newChild)
newChild.nextSibling = refChild
refChild.previousSibling = newChild
if index:
node = self.childNodes[index-1]
node.nextSibling = newChild
newChild.previousSibling = node
else:
newChild.previousSibling = None
newChild.parentNode = self
return newChild
def appendChild(self, newChild):
""" Adds the node newChild to the end of the list of children of this node.
If the newChild is already in the tree, it is first removed.
"""
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(newChild.childNodes):
self.appendChild(c)
### The DOM does not clearly specify what to return in this case
return newChild
if newChild.nodeType not in self._child_node_types:
raise IllegalChild( "<%s> is not allowed in %s" % ( newChild.tagName, self.tagName))
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
_append_child(self, newChild)
newChild.nextSibling = None
return newChild
def removeChild(self, oldChild):
""" Removes the child node indicated by oldChild from the list of children, and returns it.
"""
#FIXME: update ownerDocument.element_dict or find other solution
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
if oldChild.nextSibling is not None:
oldChild.nextSibling.previousSibling = oldChild.previousSibling
if oldChild.previousSibling is not None:
oldChild.previousSibling.nextSibling = oldChild.nextSibling
oldChild.nextSibling = oldChild.previousSibling = None
if self.ownerDocument:
self.ownerDocument.clear_caches()
oldChild.parentNode = None
return oldChild
def __str__(self):
val = []
for c in self.childNodes:
val.append(str(c))
return ''.join(val)
def __unicode__(self):
val = []
for c in self.childNodes:
val.append(unicode(c))
return u''.join(val)
defproperty(Node, "firstChild", doc="First child node, or None.")
defproperty(Node, "lastChild", doc="Last child node, or None.")
def _append_child(self, node):
# fast path with less checks; usable by DOM builders if careful
childNodes = self.childNodes
if childNodes:
last = childNodes[-1]
node.__dict__["previousSibling"] = last
last.__dict__["nextSibling"] = node
childNodes.append(node)
node.__dict__["parentNode"] = self
class Childless:
""" Mixin that makes childless-ness easy to implement and avoids
the complexity of the Node methods that deal with children.
"""
attributes = None
childNodes = EmptyNodeList()
firstChild = None
lastChild = None
def _get_firstChild(self):
return None
def _get_lastChild(self):
return None
def appendChild(self, node):
""" Raises an error """
raise xml.dom.HierarchyRequestErr(
self.tagName + " nodes cannot have children")
def hasChildNodes(self):
return False
def insertBefore(self, newChild, refChild):
""" Raises an error """
raise xml.dom.HierarchyRequestErr(
self.tagName + " nodes do not have children")
def removeChild(self, oldChild):
""" Raises an error """
raise xml.dom.NotFoundErr(
self.tagName + " nodes do not have children")
def replaceChild(self, newChild, oldChild):
""" Raises an error """
raise xml.dom.HierarchyRequestErr(
self.tagName + " nodes do not have children")
class Text(Childless, Node):
nodeType = Node.TEXT_NODE
tagName = "Text"
def __init__(self, data):
self.data = data
def __str__(self):
return self.data
def __unicode__(self):
return self.data
def toXml(self,level,f):
""" Write XML in UTF-8 """
if self.data:
f.write(_escape(unicode(self.data)))
class CDATASection(Text, Childless):
nodeType = Node.CDATA_SECTION_NODE
def toXml(self,level,f):
""" Generate XML output of the node. If the text contains "]]>", then
escape it by going out of CDATA mode (]]>), then write the string
and then go into CDATA mode again. (<![CDATA[)
"""
if self.data:
f.write('<![CDATA[%s]]>' % self.data.replace(']]>',']]>]]><![CDATA['))
class Element(Node):
""" Creates a arbitrary element and is intended to be subclassed not used on its own.
This element is the base of every element it defines a class which resembles
a xml-element. The main advantage of this kind of implementation is that you don't
have to create a toXML method for every different object. Every element
consists of an attribute, optional subelements, optional text and optional cdata.
"""
nodeType = Node.ELEMENT_NODE
namespaces = {} # Due to shallow copy this is a static variable
_child_node_types = (Node.ELEMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE)
def __init__(self, attributes=None, text=None, cdata=None, qname=None, qattributes=None, check_grammar=True, **args):
if qname is not None:
self.qname = qname
assert(hasattr(self, 'qname'))
self.ownerDocument = None
self.childNodes=[]
self.allowed_children = grammar.allowed_children.get(self.qname)
prefix = self.get_nsprefix(self.qname[0])
self.tagName = prefix + ":" + self.qname[1]
if text is not None:
self.addText(text)
if cdata is not None:
self.addCDATA(cdata)
allowed_attrs = self.allowed_attributes()
if allowed_attrs is not None:
allowed_args = [ a[1].lower().replace('-','') for a in allowed_attrs]
self.attributes={}
# Load the attributes from the 'attributes' argument
if attributes:
for attr, value in attributes.items():
self.setAttribute(attr, value)
# Load the qualified attributes
if qattributes:
for attr, value in qattributes.items():
self.setAttrNS(attr[0], attr[1], value)
if allowed_attrs is not None:
# Load the attributes from the 'args' argument
for arg in args.keys():
self.setAttribute(arg, args[arg])
else:
for arg in args.keys(): # If any attribute is allowed
self.attributes[arg]=args[arg]
if not check_grammar:
return
# Test that all mandatory attributes have been added.
required = grammar.required_attributes.get(self.qname)
if required:
for r in required:
if self.getAttrNS(r[0],r[1]) is None:
raise AttributeError( "Required attribute missing: %s in <%s>" % (r[1].lower().replace('-',''), self.tagName))
def get_knownns(self, prefix):
""" Odfpy maintains a list of known namespaces. In some cases a prefix is used, and
we need to know which namespace it resolves to.
"""
global nsdict
for ns,p in nsdict.items():
if p == prefix: return ns
return None
def get_nsprefix(self, namespace):
""" Odfpy maintains a list of known namespaces. In some cases we have a namespace URL,
and needs to look up or assign the prefix for it.
"""
if namespace is None: namespace = ""
prefix = _nsassign(namespace)
if not namespace in self.namespaces:
self.namespaces[namespace] = prefix
return prefix
def allowed_attributes(self):
return grammar.allowed_attributes.get(self.qname)
def _setOwnerDoc(self, element):
element.ownerDocument = self.ownerDocument
for child in element.childNodes:
self._setOwnerDoc(child)
def addElement(self, element, check_grammar=True):
""" adds an element to an Element
Element.addElement(Element)
"""
if check_grammar and self.allowed_children is not None:
if element.qname not in self.allowed_children:
raise IllegalChild( "<%s> is not allowed in <%s>" % ( element.tagName, self.tagName))
self.appendChild(element)
self._setOwnerDoc(element)
if self.ownerDocument:
self.ownerDocument.rebuild_caches(element)
def addText(self, text, check_grammar=True):
""" Adds text to an element
Setting check_grammar=False turns off grammar checking
"""
if check_grammar and self.qname not in grammar.allows_text:
raise IllegalText( "The <%s> element does not allow text" % self.tagName)
else:
if text != '':
self.appendChild(Text(text))
def addCDATA(self, cdata, check_grammar=True):
""" Adds CDATA to an element
Setting check_grammar=False turns off grammar checking
"""
if check_grammar and self.qname not in grammar.allows_text:
raise IllegalText( "The <%s> element does not allow text" % self.tagName)
else:
self.appendChild(CDATASection(cdata))
def removeAttribute(self, attr, check_grammar=True):
""" Removes an attribute by name. """
allowed_attrs = self.allowed_attributes()
if allowed_attrs is None:
if type(attr) == type(()):
prefix, localname = attr
self.removeAttrNS(prefix, localname)
else:
raise AttributeError( "Unable to add simple attribute - use (namespace, localpart)")
else:
# Construct a list of allowed arguments
allowed_args = [ a[1].lower().replace('-','') for a in allowed_attrs]
if check_grammar and attr not in allowed_args:
raise AttributeError( "Attribute %s is not allowed in <%s>" % ( attr, self.tagName))
i = allowed_args.index(attr)
self.removeAttrNS(allowed_attrs[i][0], allowed_attrs[i][1])
def setAttribute(self, attr, value, check_grammar=True):
""" Add an attribute to the element
This is sort of a convenience method. All attributes in ODF have
namespaces. The library knows what attributes are legal and then allows
the user to provide the attribute as a keyword argument and the
library will add the correct namespace.
Must overwrite, If attribute already exists.
"""
allowed_attrs = self.allowed_attributes()
if allowed_attrs is None:
if type(attr) == type(()):
prefix, localname = attr
self.setAttrNS(prefix, localname, value)
else:
raise AttributeError( "Unable to add simple attribute - use (namespace, localpart)")
else:
# Construct a list of allowed arguments
allowed_args = [ a[1].lower().replace('-','') for a in allowed_attrs]
if check_grammar and attr not in allowed_args:
raise AttributeError( "Attribute %s is not allowed in <%s>" % ( attr, self.tagName))
i = allowed_args.index(attr)
self.setAttrNS(allowed_attrs[i][0], allowed_attrs[i][1], value)
def setAttrNS(self, namespace, localpart, value):
""" Add an attribute to the element
In case you need to add an attribute the library doesn't know about
then you must provide the full qualified name
It will not check that the attribute is legal according to the schema.
Must overwrite, If attribute already exists.
"""
allowed_attrs = self.allowed_attributes()
prefix = self.get_nsprefix(namespace)
# if allowed_attrs and (namespace, localpart) not in allowed_attrs:
# raise AttributeError( "Attribute %s:%s is not allowed in element <%s>" % ( prefix, localpart, self.tagName))
c = AttrConverters()
self.attributes[(namespace, localpart)] = c.convert((namespace, localpart), value, self)
def getAttrNS(self, namespace, localpart):
"""
gets an attribute, given a namespace and a key
@param namespace a unicode string or a bytes: the namespace
@param localpart a unicode string or a bytes:
the key to get the attribute
@return an attribute as a unicode string or a bytes: if both paramters
are byte strings, it will be a bytes; if both attributes are
unicode strings, it will be a unicode string
"""
prefix = self.get_nsprefix(namespace)
result = self.attributes.get((namespace, localpart))
assert(
(type(namespace), type(namespace), type(namespace) == \
type(b""), type(b""), type(b"")) or
(type(namespace), type(namespace), type(namespace) == \
type(u""), type(u""), type(u""))
)
return result
def removeAttrNS(self, namespace, localpart):
del self.attributes[(namespace, localpart)]
def getAttribute(self, attr):
""" Get an attribute value. The method knows which namespace the attribute is in
"""
allowed_attrs = self.allowed_attributes()
if allowed_attrs is None:
if type(attr) == type(()):
prefix, localname = attr
return self.getAttrNS(prefix, localname)
else:
raise AttributeError( "Unable to get simple attribute - use (namespace, localpart)")
else:
# Construct a list of allowed arguments
allowed_args = [ a[1].lower().replace('-','') for a in allowed_attrs]
i = allowed_args.index(attr)
return self.getAttrNS(allowed_attrs[i][0], allowed_attrs[i][1])
def write_open_tag(self, level, f):
f.write(('<'+self.tagName))
if level == 0:
for namespace, prefix in self.namespaces.items():
f.write(u' xmlns:' + prefix + u'="'+ _escape(str(namespace))+'"')
for qname in self.attributes.keys():
prefix = self.get_nsprefix(qname[0])
f.write(u' '+_escape(str(prefix+u':'+qname[1]))+u'='+_quoteattr(unicode(self.attributes[qname])))
f.write(u'>')
def write_close_tag(self, level, f):
f.write('</'+self.tagName+'>')
def toXml(self, level, f):
"""
Generate an XML stream out of the tree structure
@param level integer: level in the XML tree; zero at root of the tree
@param f an open writable file able to accept unicode strings
"""
f.write(u'<'+self.tagName)
if level == 0:
for namespace, prefix in self.namespaces.items():
f.write(u' xmlns:' + prefix + u'="'+ _escape(str(namespace))+u'"')
for qname in self.attributes.keys():
prefix = self.get_nsprefix(qname[0])
f.write(u' '+_escape(unicode(prefix+':'+qname[1]))+u'='+_quoteattr(unicode(self.attributes[qname])))
if self.childNodes:
f.write(u'>')
for element in self.childNodes:
element.toXml(level+1,f)
f.write(u'</'+self.tagName+'>')
else:
f.write(u'/>')
def _getElementsByObj(self, obj, accumulator):
if self.qname == obj.qname:
accumulator.append(self)
for e in self.childNodes:
if e.nodeType == Node.ELEMENT_NODE:
accumulator = e._getElementsByObj(obj, accumulator)
return accumulator
def getElementsByType(self, element):
""" Gets elements based on the type, which is function from text.py, draw.py etc. """
obj = element(check_grammar=False)
return self._getElementsByObj(obj,[])
def isInstanceOf(self, element):
""" This is a check to see if the object is an instance of a type """
obj = element(check_grammar=False)
return self.qname == obj.qname
| mit |
jef-n/QGIS | python/pyplugin_installer/version_compare.py | 30 | 7602 | """
/***************************************************************************
Plugin Installer module
Plugin version comparison functions
-------------------
Date : 2008-11-24
Copyright : (C) 2008 by Borys Jurgiel
Email : info at borysjurgiel dot pl
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
Here is Python function for comparing version numbers. It's case insensitive
and recognizes all major notations, prefixes (ver. and version), delimiters
(. - and _) and suffixes (alpha, beta, rc, preview and trunk).
Usage: compareVersions(version1, version2)
The function accepts arguments of any type convertible to Unicode string
and returns integer value:
0 - the versions are equal
1 - version 1 is higher
2 - version 2 is higher
-----------------------------------------------------------------------------
HOW DOES IT WORK...
First, both arguments are converted to uppercase Unicode and stripped of
'VERSION' or 'VER.' prefix. Then they are chopped into a list of particular
numeric and alphabetic elements. The dots, dashes and underlines are recognized
as delimiters. Also numbers and non numbers are separated. See example below:
'Ver 0.03-120_rc7foo' is converted to ['0','03','120','RC','7','FOO']
Then every pair of elements, from left to right, is compared as string
or as number to provide the best result (you know, 11>9 but also '03'>'007').
The comparing stops when one of elements is greater. If comparing achieves
the end of the shorter list and the matter is still unresolved, the longer
list is usually recognized as higher, except following suffixes:
ALPHA, BETA, RC, PREVIEW and TRUNK which make the version number lower.
"""
from builtins import str
from builtins import range
from qgis.core import Qgis
import re
# ------------------------------------------------------------------------ #
def normalizeVersion(s):
""" remove possible prefix from given string and convert to uppercase """
prefixes = ['VERSION', 'VER.', 'VER', 'V.', 'V', 'REVISION', 'REV.', 'REV', 'R.', 'R']
if not s:
return str()
s = str(s).upper()
for i in prefixes:
if s[:len(i)] == i:
s = s.replace(i, '')
s = s.strip()
return s
# ------------------------------------------------------------------------ #
def classifyCharacter(c):
""" return 0 for delimiter, 1 for digit and 2 for alphabetic character """
if c in [".", "-", "_", " "]:
return 0
if c.isdigit():
return 1
else:
return 2
# ------------------------------------------------------------------------ #
def chopString(s):
""" convert string to list of numbers and words """
l = [s[0]]
for i in range(1, len(s)):
if classifyCharacter(s[i]) == 0:
pass
elif classifyCharacter(s[i]) == classifyCharacter(s[i - 1]):
l[len(l) - 1] += s[i]
else:
l += [s[i]]
return l
# ------------------------------------------------------------------------ #
def compareElements(s1, s2):
""" compare two particular elements """
# check if the matter is easy solvable:
if s1 == s2:
return 0
# try to compare as numeric values (but only if the first character is not 0):
if s1 and s2 and s1.isnumeric() and s2.isnumeric() and s1[0] != '0' and s2[0] != '0':
if float(s1) == float(s2):
return 0
elif float(s1) > float(s2):
return 1
else:
return 2
# if the strings aren't numeric or start from 0, compare them as a strings:
# but first, set ALPHA < BETA < PREVIEW < RC < TRUNK < [NOTHING] < [ANYTHING_ELSE]
if s1 not in ['ALPHA', 'BETA', 'PREVIEW', 'RC', 'TRUNK']:
s1 = 'Z' + s1
if s2 not in ['ALPHA', 'BETA', 'PREVIEW', 'RC', 'TRUNK']:
s2 = 'Z' + s2
# the final test:
if s1 > s2:
return 1
else:
return 2
# ------------------------------------------------------------------------ #
def compareVersions(a, b):
""" Compare two version numbers. Return 0 if a==b or error, 1 if a>b and 2 if b>a """
if not a or not b:
return 0
a = normalizeVersion(a)
b = normalizeVersion(b)
if a == b:
return 0
# convert the strings to lists
v1 = chopString(a)
v2 = chopString(b)
# set the shorter string as a base
l = len(v1)
if l > len(v2):
l = len(v2)
# try to determine within the common length
for i in range(l):
if compareElements(v1[i], v2[i]):
return compareElements(v1[i], v2[i])
# if the lists are identical till the end of the shorther string, try to compare the odd tail
# with the simple space (because the 'alpha', 'beta', 'preview' and 'rc' are LESS then nothing)
if len(v1) > l:
return compareElements(v1[l], u' ')
if len(v2) > l:
return compareElements(u' ', v2[l])
# if everything else fails...
if a > b:
return 1
else:
return 2
"""
COMPARE CURRENT QGIS VERSION WITH qgisMinimumVersion AND qgisMaximumVersion
ALLOWED FORMATS ARE: major.minor OR major.minor.bugfix, where each segment must be 0..99
"""
def splitVersion(s):
""" split string into 2 or 3 numerical segments """
if not s or type(s) != str:
return None
l = str(s).split('.')
for c in l:
if not c.isnumeric():
return None
if int(c) > 99:
return None
if len(l) not in [2, 3]:
return None
return l
def isCompatible(curVer, minVer, maxVer):
""" Compare current QGIS version with qgisMinVersion and qgisMaxVersion """
if not minVer or not curVer or not maxVer:
return False
minVer = splitVersion(re.sub(r'[^0-9.]+', '', minVer))
maxVer = splitVersion(re.sub(r'[^0-9.]+', '', maxVer))
curVer = splitVersion(re.sub(r'[^0-9.]+', '', curVer))
if not minVer or not curVer or not maxVer:
return False
if len(minVer) < 3:
minVer += ["0"]
if len(curVer) < 3:
curVer += ["0"]
if len(maxVer) < 3:
maxVer += ["99"]
minVer = "{:04n}{:04n}{:04n}".format(int(minVer[0]), int(minVer[1]), int(minVer[2]))
maxVer = "{:04n}{:04n}{:04n}".format(int(maxVer[0]), int(maxVer[1]), int(maxVer[2]))
curVer = "{:04n}{:04n}{:04n}".format(int(curVer[0]), int(curVer[1]), int(curVer[2]))
return (minVer <= curVer and maxVer >= curVer)
def pyQgisVersion():
""" Return current QGIS version number as X.Y.Z for testing plugin compatibility.
If Y = 99, bump up to (X+1.0.0), so e.g. 2.99 becomes 3.0.0
This way QGIS X.99 is only compatible with plugins for the upcoming major release.
"""
x, y, z = re.findall(r'^(\d*).(\d*).(\d*)', Qgis.QGIS_VERSION)[0]
if y == '99':
x = str(int(x) + 1)
y = z = '0'
return '{}.{}.{}'.format(x, y, z)
| gpl-2.0 |
RuudBurger/CouchPotatoServer | libs/caper/parsers/anime.py | 81 | 2347 | # Copyright 2013 Dean Gardiner <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from caper.parsers.base import Parser
REGEX_GROUP = re.compile(r'(\(|\[)(?P<group>.*?)(\)|\])', re.IGNORECASE)
PATTERN_GROUPS = [
('identifier', [
r'S(?P<season>\d+)E(?P<episode>\d+)',
r'(S(?P<season>\d+))|(E(?P<episode>\d+))',
r'Ep(?P<episode>\d+)',
r'$(?P<absolute>\d+)^',
(r'Episode', r'(?P<episode>\d+)'),
]),
('video', [
(r'(?P<h264_profile>%s)', [
'Hi10P'
]),
(r'.(?P<resolution>%s)', [
'720p',
'1080p',
'960x720',
'1920x1080'
]),
(r'(?P<source>%s)', [
'BD'
]),
]),
('audio', [
(r'(?P<codec>%s)', [
'FLAC'
]),
])
]
class AnimeParser(Parser):
def __init__(self, debug=False):
super(AnimeParser, self).__init__(PATTERN_GROUPS, debug)
def capture_group(self, fragment):
match = REGEX_GROUP.match(fragment.value)
if not match:
return None
return match.group('group')
def run(self, closures):
"""
:type closures: list of CaperClosure
"""
self.setup(closures)
self.capture_closure('group', func=self.capture_group)\
.execute(once=True)
self.capture_fragment('show_name', single=False)\
.until_fragment(value__re='identifier')\
.until_fragment(value__re='video')\
.execute()
self.capture_fragment('identifier', regex='identifier') \
.capture_fragment('video', regex='video', single=False) \
.capture_fragment('audio', regex='audio', single=False) \
.execute()
self.result.build()
return self.result
| gpl-3.0 |
andrewnc/scikit-learn | sklearn/cross_decomposition/tests/test_pls.py | 215 | 11427 | import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true, assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
| bsd-3-clause |
nt/code-jam-ruby | gcj_clear_contest.py | 1 | 2210 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file implements the main function for the contest cleaner, which
uses the ContestManager class in the lib directory to clear all the contest
information."""
import optparse
import os
import sys
from lib import constants
from lib import contest_manager
from lib import error
def main():
"""Main function for the contest cleaner script.
This script receives no positional arguments.
"""
try:
# Create an option parser and use it to parse the supplied arguments.
program_version = 'GCJ contest cleaner {0}'.format(
constants.VERSION)
parser = optparse.OptionParser(usage='%prog [options] contest_id',
version=program_version)
parser.add_option('-p', '--passwd', action='store', dest='password',
help=('Password used to login in the server, will be '
'asked if not specified'))
options, args = parser.parse_args()
# Check that the number of arguments is valid.
if len(args) != 0:
raise error.OptionError('need no positional arguments')
# Clear the contest information.
contest_manager.ClearContest()
except error.OptionError as e:
parser.print_usage()
program_basename = os.path.basename(sys.argv[0])
sys.stderr.write('{0}: error: {1}\n'.format(program_basename, e))
sys.exit(1)
except error.UserError as e:
sys.stderr.write(str(e))
sys.exit(1)
except error.CommandlineError as e:
sys.stderr.write('{0}: {1}'.format(e.__class__.__name__, e))
sys.exit(1)
if __name__ == '__main__':
main()
| apache-2.0 |
HelloLily/hellolily | lily/contacts/documents.py | 1 | 2522 | from lily.accounts.models import Account
from django_elasticsearch_dsl import DocType, Index, IntegerField, ObjectField
from lily.search.fields import CharField, EmailAddressField, PhoneNumberField
from lily.tags.models import Tag
from lily.utils.models.models import EmailAddress, PhoneNumber
from .models import Contact
index = Index('contact')
@index.doc_type
class ContactDoc(DocType):
accounts = ObjectField(properties={
'id': IntegerField(),
'name': CharField(),
'phone_numbers': PhoneNumberField(related_model=PhoneNumber),
}, related_model=Account)
description = CharField()
email_addresses = EmailAddressField(related_model=EmailAddress)
full_name = CharField()
phone_numbers = PhoneNumberField(related_model=PhoneNumber)
tags = CharField(related_model=Tag)
tenant_id = IntegerField()
def get_queryset(self):
return Contact.objects.prefetch_related('email_addresses', 'phone_numbers', 'tags')
def prepare_accounts(self, obj):
functions = obj.functions.filter(
account__is_deleted=False
).select_related('account').prefetch_related('account__phone_numbers')
return [self._convert_function_to_account(func) for func in functions]
def _convert_function_to_account(self, func):
return {
'id': func.account_id,
'name': func.account.name if func.account.name else '',
'phone_numbers': [phone_number.number for phone_number in func.account.phone_numbers.all()],
}
def prepare_email_addresses(self, obj):
return [email.email_address for email in obj.email_addresses.all()]
def prepare_phone_numbers(self, obj):
return [phone_number.number for phone_number in obj.phone_numbers.all()]
def prepare_tags(self, obj):
return [tag.name for tag in obj.tags.all()]
def get_instances_from_accounts(self, account):
return account.contacts.all()
def get_instances_from_accounts_phone_numbers(self, phone_number):
return Contact.objects.filter(accounts__phone_numbers=phone_number)
def get_instances_from_email_addresses(self, email_address):
return email_address.contact_set.all()
def get_instances_from_phone_numbers(self, phone_number):
return phone_number.contact_set.all()
def get_instances_from_tags(self, tag):
if tag.content_type.model == 'contact':
return Contact.objects.get(pk=tag.object_id)
class Meta:
model = Contact
| agpl-3.0 |
xutian/avocado-vt | selftests/unit/test_virsh.py | 8 | 12854 | #!/usr/bin/python
import unittest
import logging
import os
import sys
from avocado.utils import process
# simple magic for using scripts within a source tree
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.path.isdir(os.path.join(basedir, 'virttest')):
sys.path.append(basedir)
def process_is_alive(name_pattern):
"""
'pgrep name' misses all python processes and also long process names.
'pgrep -f name' gets all shell commands with name in args.
So look only for command whose initial pathname ends with name.
Name itself is an egrep pattern, so it can use | etc for variations.
"""
return process.system("pgrep -f '^([^ /]*/)*(%s)([ ]|$)'" % name_pattern,
ignore_status=True, shell=True) == 0
class bogusVirshFailureException(unittest.TestCase.failureException):
def __init__(self, *args, **dargs):
self.virsh_args = args
self.virsh_dargs = dargs
def __str__(self):
msg = ("Codepath under unittest attempted call to un-mocked virsh"
" method, with args: '%s' and dargs: '%s'"
% (self.virsh_args, self.virsh_dargs))
return msg
def FakeVirshFactory(preserve=None):
"""
Return Virsh() instance with methods to raise bogusVirshFailureException.
Users of this class should override methods under test on instance.
:param preserve: List of symbol names NOT to modify, None for all
"""
from virttest import virsh
def raise_bogusVirshFailureException(*args, **dargs):
raise bogusVirshFailureException()
if preserve is None:
preserve = []
fake_virsh = virsh.Virsh(virsh_exec='/bin/false',
uri='qemu:///system', debug=True,
ignore_status=True)
# Make all virsh commands throw an exception by calling it
for symbol in dir(virsh):
# Get names of just closure functions by Virsh class
if symbol in virsh.NOCLOSE + preserve:
continue
if isinstance(getattr(fake_virsh, symbol), virsh.VirshClosure):
# fake_virsh is a propcan, can't use setattr.
fake_virsh.__super_set__(symbol, raise_bogusVirshFailureException)
return fake_virsh
class ModuleLoad(unittest.TestCase):
from virttest import virsh
class ConstantsTest(ModuleLoad):
def test_ModuleLoad(self):
self.assertTrue(hasattr(self.virsh, 'NOCLOSE'))
self.assertTrue(hasattr(self.virsh, 'SCREENSHOT_ERROR_COUNT'))
self.assertTrue(hasattr(self.virsh, 'VIRSH_COMMAND_CACHE'))
self.assertTrue(hasattr(self.virsh, 'VIRSH_EXEC'))
class TestVirshClosure(ModuleLoad):
@staticmethod
def somefunc(*args, **dargs):
return (args, dargs)
class SomeClass(dict):
def somemethod(self):
return "foobar"
def test_init(self):
# save some typing
VC = self.virsh.VirshClosure
# self is guaranteed to be not dict-like
self.assertRaises(ValueError, VC, self.somefunc, self)
self.assertRaises(ValueError, VC, lambda: None, self)
def test_args(self):
# save some typing
VC = self.virsh.VirshClosure
tcinst = self.SomeClass()
vcinst = VC(self.somefunc, tcinst)
args, dargs = vcinst('foo')
self.assertEqual(len(args), 1)
self.assertEqual(args[0], 'foo')
self.assertEqual(len(dargs), 0)
def test_fake_virsh(self):
fake_virsh = FakeVirshFactory()
for symb in dir(self.virsh):
if symb in self.virsh.NOCLOSE:
continue
value = fake_virsh.__super_get__(symb)
self.assertRaises(unittest.TestCase.failureException, value)
def test_dargs(self):
# save some typing
VC = self.virsh.VirshClosure
tcinst = self.SomeClass(foo='bar')
vcinst = VC(self.somefunc, tcinst)
args, dargs = vcinst()
self.assertEqual(len(args), 0)
self.assertEqual(len(dargs), 1)
self.assertEqual(list(dargs.keys()), ['foo'])
self.assertEqual(list(dargs.values()), ['bar'])
def test_args_and_dargs(self):
# save some typing
VC = self.virsh.VirshClosure
tcinst = self.SomeClass(foo='bar')
vcinst = VC(self.somefunc, tcinst)
args, dargs = vcinst('foo')
self.assertEqual(len(args), 1)
self.assertEqual(args[0], 'foo')
self.assertEqual(len(dargs), 1)
self.assertEqual(list(dargs.keys()), ['foo'])
self.assertEqual(list(dargs.values()), ['bar'])
def test_args_dargs_subclass(self):
# save some typing
VC = self.virsh.VirshClosure
tcinst = self.SomeClass(foo='bar')
vcinst = VC(self.somefunc, tcinst)
args, dargs = vcinst('foo')
self.assertEqual(len(args), 1)
self.assertEqual(args[0], 'foo')
self.assertEqual(len(dargs), 1)
self.assertEqual(list(dargs.keys()), ['foo'])
self.assertEqual(list(dargs.values()), ['bar'])
def test_update_args_dargs_subclass(self):
# save some typing
VC = self.virsh.VirshClosure
tcinst = self.SomeClass(foo='bar')
vcinst = VC(self.somefunc, tcinst)
args, dargs = vcinst('foo')
self.assertEqual(len(args), 1)
self.assertEqual(args[0], 'foo')
self.assertEqual(len(dargs), 1)
self.assertEqual(list(dargs.keys()), ['foo'])
self.assertEqual(list(dargs.values()), ['bar'])
# Update dictionary
tcinst['sna'] = 'fu'
# Is everything really the same?
args, dargs = vcinst('foo', 'baz')
self.assertEqual(len(args), 2)
self.assertEqual(args[0], 'foo')
self.assertEqual(args[1], 'baz')
self.assertEqual(len(dargs), 2)
self.assertEqual(dargs['foo'], 'bar')
self.assertEqual(dargs['sna'], 'fu')
def test_multi_inst(self):
# save some typing
VC1 = self.virsh.VirshClosure
VC2 = self.virsh.VirshClosure
tcinst1 = self.SomeClass(darg1=1)
tcinst2 = self.SomeClass(darg1=2)
vcinst1 = VC1(self.somefunc, tcinst1)
vcinst2 = VC2(self.somefunc, tcinst2)
args1, dargs1 = vcinst1(1)
args2, dargs2 = vcinst2(2)
self.assertEqual(len(args1), 1)
self.assertEqual(len(args2), 1)
self.assertEqual(args1[0], 1)
self.assertEqual(args2[0], 2)
self.assertEqual(len(dargs1), 1)
self.assertEqual(len(dargs2), 1)
self.assertEqual(dargs1['darg1'], 1)
self.assertEqual(dargs2['darg1'], 2)
class ConstructorsTest(ModuleLoad):
def test_VirshBase(self):
vb = self.virsh.VirshBase()
del vb # keep pylint happy
def test_Virsh(self):
v = self.virsh.Virsh()
del v # keep pylint happy
def test_VirshPersistent(self):
test_virsh = self.virsh.Virsh()
if test_virsh['virsh_exec'] == '/bin/true':
return
else:
logging.disable(logging.INFO)
vp = self.virsh.VirshPersistent()
vp.close_session() # Make sure session gets cleaned up
def TestVirshClosure(self):
class MyDict(dict):
pass
vc = self.virsh.VirshClosure(None, MyDict())
del vc # keep pylint happy
# Ensure the following tests ONLY run if a valid virsh command exists #####
class ModuleLoadCheckVirsh(unittest.TestCase):
from virttest import virsh
def run(self, *args, **dargs):
test_virsh = self.virsh.Virsh()
if test_virsh['virsh_exec'] == '/bin/true':
return # Don't run any tests, no virsh executable was found
else:
super(ModuleLoadCheckVirsh, self).run(*args, **dargs)
class SessionManagerTest(ModuleLoadCheckVirsh):
def test_del_VirshPersistent(self):
"""
Unittest for __del__ of VirshPersistent.
This test makes sure the __del__ method of VirshPersistent works
well in `del vp_instance`.
"""
vp = self.virsh.VirshPersistent()
virsh_exec = vp.virsh_exec
self.assertTrue(process_is_alive(virsh_exec))
del vp
self.assertFalse(process_is_alive(virsh_exec))
def test_VirshSession(self):
"""
Unittest for VirshSession.
This test use VirshSession over VirshPersistent with auto_close=True.
"""
virsh_exec = self.virsh.Virsh()['virsh_exec']
# Build a VirshSession object.
session_1 = self.virsh.VirshSession(virsh_exec, auto_close=True)
self.assertTrue(process_is_alive(virsh_exec))
del session_1
self.assertFalse(process_is_alive(virsh_exec))
def test_VirshPersistent(self):
"""
Unittest for session manager of VirshPersistent.
"""
virsh_exec = self.virsh.Virsh()['virsh_exec']
vp_1 = self.virsh.VirshPersistent()
self.assertTrue(process_is_alive(virsh_exec))
# Init the vp_2 with same params of vp_1.
vp_2 = self.virsh.VirshPersistent(**vp_1)
# Make sure vp_1 and vp_2 are refer to the same session.
self.assertEqual(vp_1.session_id, vp_2.session_id)
del vp_1
# Make sure the session is not closed when vp_2 still refer to it.
self.assertTrue(process_is_alive(virsh_exec))
del vp_2
# Session was closed since no other VirshPersistent refer to it.
self.assertFalse(process_is_alive(virsh_exec))
class VirshHasHelpCommandTest(ModuleLoadCheckVirsh):
def setUp(self):
# subclasses override self.virsh
self.VIRSH_COMMAND_CACHE = self.virsh.VIRSH_COMMAND_CACHE
def test_false_command(self):
self.assertFalse(self.virsh.has_help_command('print'))
self.assertFalse(self.virsh.has_help_command('Commands:'))
self.assertFalse(self.virsh.has_help_command('dom'))
self.assertFalse(self.virsh.has_help_command('pool'))
def test_true_command(self):
self.assertTrue(self.virsh.has_help_command('uri'))
self.assertTrue(self.virsh.has_help_command('help'))
self.assertTrue(self.virsh.has_help_command('list'))
def test_no_cache(self):
self.VIRSH_COMMAND_CACHE = None
self.assertTrue(self.virsh.has_help_command('uri'))
self.VIRSH_COMMAND_CACHE = []
self.assertTrue(self.virsh.has_help_command('uri'))
def test_subcommand_help(self):
regex = r'--command'
self.assertTrue(self.virsh.has_command_help_match('help', regex))
self.assertFalse(self.virsh.has_command_help_match('uri', regex))
def test_groups_in_commands(self):
# groups will be empty in older libvirt, but test will still work
groups = self.virsh.help_command_group(cache=True)
groups_set = set(groups)
commands = self.virsh.help_command_only(cache=True)
commands_set = set(commands)
grp_cmd = self.virsh.help_command(cache=True)
grp_cmd_set = set(grp_cmd)
# No duplicates check
self.assertEqual(len(commands_set), len(commands))
self.assertEqual(len(groups_set), len(groups))
self.assertEqual(len(grp_cmd_set), len(grp_cmd))
# No groups in commands or commands in groups
self.assertEqual(len(groups_set & commands_set), 0)
# Groups and Commands in help_command
self.assertTrue(len(grp_cmd_set), len(commands_set) + len(groups_set))
class VirshHelpCommandTest(ModuleLoadCheckVirsh):
def test_cache_command(self):
l1 = self.virsh.help_command(cache=True)
l2 = self.virsh.help_command()
l3 = self.virsh.help_command()
self.assertEqual(l1, l2)
self.assertEqual(l2, l3)
self.assertEqual(l3, l1)
class VirshClassHasHelpCommandTest(VirshHasHelpCommandTest):
def setUp(self):
logging.disable(logging.INFO)
super(VirshClassHasHelpCommandTest, self).setUp()
self.virsh = self.virsh.Virsh(debug=False)
class VirshPersistentClassHasHelpCommandTest(VirshHasHelpCommandTest):
def setUp(self):
logging.disable(logging.INFO)
super(VirshPersistentClassHasHelpCommandTest, self).setUp()
self.VirshPersistent = self.virsh.VirshPersistent
self.virsh = self.VirshPersistent(debug=False)
self.assertTrue(process_is_alive(self.virsh.virsh_exec))
def test_recycle_session(self):
# virsh can be used as a dict of it's properties
another = self.VirshPersistent(**self.virsh)
self.assertEqual(self.virsh.session_id, another.session_id)
def tearDown(self):
self.assertTrue(process_is_alive(self.virsh.virsh_exec))
self.virsh.close_session()
self.assertFalse(process_is_alive(self.virsh.virsh_exec))
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
matijapretnar/projekt-tomo | web/users/migrations/0005_auto_20190328_1457.py | 2 | 1223 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2019-03-28 14:57
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20161004_0927'),
]
operations = [
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=70, verbose_name='first name'),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=70, verbose_name='last name'),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=70, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username'),
),
]
| agpl-3.0 |
chrxr/wagtail | wagtail/wagtailadmin/tests/test_rich_text.py | 3 | 7407 | from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from wagtail.tests.testapp.rich_text import CustomRichTextArea
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailadmin.rich_text import HalloRichTextArea, get_rich_text_editor_widget
from wagtail.wagtailcore.models import Page, get_page_models
class BaseRichTextEditHandlerTestCase(TestCase):
def _clear_edit_handler_cache(self):
"""
These tests generate new EditHandlers with different settings. The
cached edit handlers should be cleared before and after each test run
to ensure that no changes leak through to other tests.
"""
from wagtail.tests.testapp.models import DefaultRichBlockFieldPage
block_page_edit_handler = DefaultRichBlockFieldPage.get_edit_handler()
if block_page_edit_handler._form_class:
rich_text_block = block_page_edit_handler._form_class.base_fields['body'].block.child_blocks['rich_text']
if hasattr(rich_text_block, 'field'):
del rich_text_block.field
for page_class in get_page_models():
page_class.get_edit_handler.cache_clear()
def setUp(self):
super(BaseRichTextEditHandlerTestCase, self).setUp()
self._clear_edit_handler_cache()
def tearDown(self):
self._clear_edit_handler_cache()
super(BaseRichTextEditHandlerTestCase, self).tearDown()
class TestGetRichTextEditorWidget(TestCase):
@override_settings()
def test_default(self):
# Simulate the absence of a setting
if hasattr(settings, 'WAGTAILADMIN_RICH_TEXT_EDITORS'):
del settings.WAGTAILADMIN_RICH_TEXT_EDITORS
self.assertIsInstance(get_rich_text_editor_widget(), HalloRichTextArea)
@override_settings(WAGTAILADMIN_RICH_TEXT_EDITORS={
'default': {
'WIDGET': 'wagtail.tests.testapp.rich_text.CustomRichTextArea'
},
})
def test_overridden_default_editor(self):
self.assertIsInstance(get_rich_text_editor_widget(), CustomRichTextArea)
@override_settings(WAGTAILADMIN_RICH_TEXT_EDITORS={
'custom': {
'WIDGET': 'wagtail.tests.testapp.rich_text.CustomRichTextArea'
},
})
def test_custom_editor_without_default(self):
self.assertIsInstance(get_rich_text_editor_widget('custom'), CustomRichTextArea)
@override_settings(WAGTAILADMIN_RICH_TEXT_EDITORS={
'default': {
'WIDGET': 'wagtail.wagtailadmin.rich_text.HalloRichTextArea'
},
'custom': {
'WIDGET': 'wagtail.tests.testapp.rich_text.CustomRichTextArea'
},
})
def test_custom_editor_with_default(self):
self.assertIsInstance(get_rich_text_editor_widget(), HalloRichTextArea)
self.assertIsInstance(get_rich_text_editor_widget('custom'), CustomRichTextArea)
@override_settings()
class TestDefaultRichText(BaseRichTextEditHandlerTestCase, WagtailTestUtils):
def setUp(self):
super(TestDefaultRichText, self).setUp()
# Find root page
self.root_page = Page.objects.get(id=2)
self.login()
# Simulate the absence of a setting
if hasattr(settings, 'WAGTAILADMIN_RICH_TEXT_EDITORS'):
del settings.WAGTAILADMIN_RICH_TEXT_EDITORS
def test_default_editor_in_rich_text_field(self):
response = self.client.get(reverse(
'wagtailadmin_pages:add', args=('tests', 'defaultrichtextfieldpage', self.root_page.id)
))
# Check status code
self.assertEqual(response.status_code, 200)
# Check that hallo (default editor by now)
self.assertContains(response, 'makeHalloRichTextEditable("id_body");')
def test_default_editor_in_rich_text_block(self):
response = self.client.get(reverse(
'wagtailadmin_pages:add', args=('tests', 'defaultrichblockfieldpage', self.root_page.id)
))
# Check status code
self.assertEqual(response.status_code, 200)
# Check that hallo (default editor by now)
self.assertContains(response, 'makeHalloRichTextEditable("__PREFIX__-value");')
@override_settings(WAGTAILADMIN_RICH_TEXT_EDITORS={
'default': {
'WIDGET': 'wagtail.tests.testapp.rich_text.CustomRichTextArea'
},
})
class TestOverriddenDefaultRichText(BaseRichTextEditHandlerTestCase, WagtailTestUtils):
def setUp(self):
super(TestOverriddenDefaultRichText, self).setUp()
# Find root page
self.root_page = Page.objects.get(id=2)
self.login()
def test_overridden_default_editor_in_rich_text_field(self):
response = self.client.get(reverse(
'wagtailadmin_pages:add', args=('tests', 'defaultrichtextfieldpage', self.root_page.id)
))
# Check status code
self.assertEqual(response.status_code, 200)
# Check that hallo (default editor by now) was replaced with fake editor
self.assertNotContains(response, 'makeHalloRichTextEditable("id_body");')
self.assertContains(response, 'customEditorInitScript("id_body");')
def test_overridden_default_editor_in_rich_text_block(self):
response = self.client.get(reverse(
'wagtailadmin_pages:add', args=('tests', 'defaultrichblockfieldpage', self.root_page.id)
))
# Check status code
self.assertEqual(response.status_code, 200)
# Check that hallo (default editor by now) was replaced with fake editor
self.assertNotContains(response, 'makeHalloRichTextEditable("__PREFIX__-value");')
self.assertContains(response, 'customEditorInitScript("__PREFIX__-value");')
@override_settings(WAGTAILADMIN_RICH_TEXT_EDITORS={
'default': {
'WIDGET': 'wagtail.wagtailadmin.rich_text.HalloRichTextArea'
},
'custom': {
'WIDGET': 'wagtail.tests.testapp.rich_text.CustomRichTextArea'
},
})
class TestCustomDefaultRichText(BaseRichTextEditHandlerTestCase, WagtailTestUtils):
def setUp(self):
super(TestCustomDefaultRichText, self).setUp()
# Find root page
self.root_page = Page.objects.get(id=2)
self.login()
def test_custom_editor_in_rich_text_field(self):
response = self.client.get(reverse(
'wagtailadmin_pages:add', args=('tests', 'customrichtextfieldpage', self.root_page.id)
))
# Check status code
self.assertEqual(response.status_code, 200)
# Check that hallo (default editor by now) was replaced with fake editor
self.assertNotContains(response, 'makeHalloRichTextEditable("id_body");')
self.assertContains(response, 'customEditorInitScript("id_body");')
def test_custom_editor_in_rich_text_block(self):
response = self.client.get(reverse(
'wagtailadmin_pages:add', args=('tests', 'customrichblockfieldpage', self.root_page.id)
))
# Check status code
self.assertEqual(response.status_code, 200)
# Check that hallo (default editor by now) was replaced with fake editor
self.assertNotContains(response, 'makeHalloRichTextEditable("__PREFIX__-value");')
self.assertContains(response, 'customEditorInitScript("__PREFIX__-value");')
| bsd-3-clause |
Maistho/CouchPotatoServer | couchpotato/core/notifications/androidpn.py | 17 | 2293 | import traceback
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
log = CPLog(__name__)
autoload = 'AndroidPN'
class AndroidPN(Notification):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
data = {
'action': "send",
'uri': "",
'title': self.default_title,
'message': toUnicode(message),
'broadcast': self.conf('broadcast'),
'username': self.conf('username'),
}
headers = {
'Content-type': 'application/x-www-form-urlencoded'
}
try:
self.urlopen(self.conf('url'), headers = headers, data = data, show_error = False)
return True
except:
log.error('AndroidPN failed: %s', traceback.format_exc())
return False
config = [{
'name': 'androidpn',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'androidpn',
'description': 'Self hosted Android push notification server',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'broadcast',
'label': 'Broadcast',
'default': 1,
'type': 'bool',
'description': 'Send notification to all users',
},
{
'name': 'username',
'label': 'Username',
'description': 'Required if broadcast not selected',
},
{
'name': 'url',
'label': 'Url',
'description': 'URL of server',
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]
| gpl-3.0 |
alexmojaki/funcfinder | funcfinder/__init__.py | 1 | 9712 | from bdb import Bdb
import inspect
from itertools import imap, dropwhile, permutations
import pydoc
import re
import sys
import traceback
import timeit
import wrapt
import funcfinder.answers
import funcfinder.questions
from utils import TryImportError
def search_questions(terms):
if isinstance(terms, basestring):
terms = terms.split()
terms = map(str.lower, terms)
print
found = False
for question in funcfinder.questions.functions.itervalues():
search_string = (question.__name__ + question.__doc__).lower()
if all(imap(search_string.__contains__, terms)):
found = True
print question.__name__ + ":\n"
print pydoc.getdoc(question)
print "\n-----------------------\n"
if not found:
print "No questions found"
def _show_dependencies(dependencies, existing_sources):
found_dependency = False
if dependencies:
for dependency in dependencies:
dependency_source = _get_source(dependency)
if dependency_source in existing_sources:
continue
spaceless_dependency_source = re.sub("\s+", "", dependency_source)
found_matching_source = False
for existing_source in existing_sources:
spaceless_existing_source = re.sub("\s+", "", existing_source)
if spaceless_dependency_source in spaceless_existing_source:
found_matching_source = True
break
if found_matching_source:
continue
if not found_dependency:
print "Dependencies:"
print
found_dependency = True
_show_source(dependency, dependency_source)
def show_question(question, time_answers=True):
print
if isinstance(question, basestring):
try:
question = funcfinder.questions.functions[question]
except KeyError:
print "No question with name %s found" % question
return
sources = set()
dependencies = set()
_show_source_and_add_to_set(question, sources)
if hasattr(question, "answers"):
print "Answers:"
print
correct_answers = []
for answer in question.answers:
dependencies.update(_CodeDetector.detect(question, answer, include_questions=True))
_show_source_and_add_to_set(answer, sources)
try:
question(answer)
print "Passed tests successfully."
print "--------------------------"
print
correct_answers.append(answer)
except Exception as e:
print "Failed tests with exception:"
if not isinstance(e, TryImportError):
tb_list = traceback.extract_tb(sys.exc_info()[2])
tb_list = list(dropwhile(lambda entry: entry[2] != question.__name__, tb_list))
print "".join(traceback.format_list(tb_list)).rstrip()
print "".join(traceback.format_exception_only(*sys.exc_info()[:2]))
if time_answers:
_time_answers(question, correct_answers)
_show_dependencies(dependencies, sources)
else:
print "No answers have been marked as solving this question, which is a problem."
print "The question will now be asked manually. If any solutions are found, please contribute by adding:"
print
print "@solves(q.%s)" % question.__name__
print
print "to each solution."
print
ask(question, time_answers=time_answers)
def _get_source(func, index_permutation=None):
try:
name = func.__name__
except AttributeError:
name = func.co_name
pattern = r"(def\s+%s\(.+)" % name
regex = re.compile(pattern, re.DOTALL)
source = inspect.getsource(func).strip()
match = regex.search(source)
if match is not None:
source = match.group(1)
if index_permutation and len(index_permutation) > 1 and list(index_permutation) != sorted(index_permutation):
pattern = r"(def\s+%s)\((.+?)\)(.+)" % name
regex = re.compile(pattern, re.DOTALL)
match = regex.search(source)
if match is None:
raise Exception("Failed to extract arguments from function definition:\n" + source)
args = re.split(r"\s*,\s*", match.group(2), flags=re.DOTALL)
args = _permute(args, index_permutation)
args = ", ".join(args)
source = "{start}({args}){rest}".format(start=match.group(1), args=args, rest=match.group(3))
return source
def _show_source(func, source):
print inspect.getsourcefile(func), ":", inspect.getsourcelines(func)[1]
print source
print
def _show_source_and_add_to_set(func, sources_set, index_permutation=None):
source = _get_source(func, index_permutation)
sources_set.add(source)
_show_source(func, source)
def _time(question, answer, number=1):
def stmt():
question(answer)
if number == 1:
time_taken = 0
while time_taken < 1:
number *= 2
time_taken = timeit.timeit(stmt, number=number)
return min(timeit.repeat(stmt, number=number, repeat=5)), number
def _time_answers(question, correct_answers):
if len(correct_answers) > 1:
print "Best times per answer:"
number = 1
for answer in correct_answers:
time_taken, number = _time(question, answer, number)
print "%s: %.3f s" % (answer.__name__, time_taken)
print "(among 5 sets of %i repetitions)" % number
print
def ask(question, time_answers=True):
num_args_holder = []
def count_expected_args(*args):
num_args_holder.append(len(args))
exc_info = None
try:
question(count_expected_args)
except Exception:
exc_info = sys.exc_info()
pass
if not num_args_holder:
if exc_info:
raise exc_info[0], exc_info[1], exc_info[2]
else:
raise AssertionError("Failed to find the number of arguments the answer must have. "
"Did you call the given function?")
del exc_info
num_args = num_args_holder[0]
index_permutations = list(permutations(range(num_args)))
correct_answers = []
dependencies = set()
sources = set()
for answer in funcfinder.answers.functions.itervalues():
if not getattr(answer, "ask_ignore", False) and answer.func_code.co_argcount == num_args:
for index_permutation in index_permutations:
try:
permuted_answer = _permute_args(index_permutation)(answer)
question(permuted_answer)
except (_ForbiddenKwargs, _WrongNumberOfArgs) as e:
print e.message
return
except Exception:
pass
else:
_show_source_and_add_to_set(answer, sources, index_permutation)
solved_questions = getattr(answer, "solved_questions")
if solved_questions:
print "Solves the question%s %s" % (
"s" * (len(solved_questions) > 1),
", ".join(q.__name__ for q in solved_questions))
print
print "-------------------------"
print
correct_answers.append(permuted_answer)
dependencies.update(_CodeDetector.detect(question, permuted_answer, include_questions=False))
dependencies.discard(answer.func_code)
break
if not correct_answers:
print "Sorry, no correct answers found. If you find one, please consider contributing it!"
return
if time_answers:
_time_answers(question, correct_answers)
_show_dependencies(dependencies, sources)
def _permute_args(index_permutation):
@wrapt.decorator
def wrapper(wrapped, _, args, kwargs):
if kwargs:
raise _ForbiddenKwargs
if len(index_permutation) != len(args):
raise _WrongNumberOfArgs
return wrapped(*_permute(args, index_permutation))
return wrapper
def _permute(it, index_permutation):
return (it[i] for i in index_permutation)
class _ForbiddenKwargs(Exception):
message = "You cannot ask for a function with keyword arguments."
class _WrongNumberOfArgs(Exception):
message = "The function you ask for must always have the same number of arguments."
class _CodeDetector(Bdb):
def __init__(self, *args):
Bdb.__init__(self, *args)
self.codes = set()
def do_clear(self, arg):
pass
def user_call(self, frame, argument_list):
self.codes.add(frame.f_code)
@classmethod
def detect(cls, question, answer, include_questions):
detector = cls()
detector.set_trace()
try:
question(answer)
except Exception:
pass
detector.set_quit()
for func in (question, answer):
detector.codes.discard(func.func_code)
filtered_codes = set()
for code in detector.codes:
filename = code.co_filename
def from_package(package):
return ("funcfinder/%s/" % package) in filename and not filename.endswith("__init__.py")
# noinspection PyTypeChecker
if from_package("answers") or include_questions and from_package("questions"):
filtered_codes.add(code)
return filtered_codes
| mit |
unioslo/cerebrum | Cerebrum/utils/pidcontext.py | 1 | 1737 | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright 2018 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""This module contains a simple PID file locking tool.
>>> from pidcontext import Pid
... with Pid():
... do_something()
Runs do_something() only if a lockfile for the program is acquirable. A warning
stating '<filename> is locked' is logged and SystemExit is raised if the
lockfile is not acquirable.
"""
import logging
from cereconf import LOCKFILE_DIR
from pid import PidFile
from pid import PidFileAlreadyLockedError
class Pid(object):
def __init__(self):
self.pid = PidFile(piddir=LOCKFILE_DIR)
def __enter__(self):
try:
self.pid.__enter__()
except PidFileAlreadyLockedError:
logging.getLogger(__name__).warning('%s is locked',
self.pid.filename)
raise SystemExit()
return self
def __exit__(self, exc_type=None, exc_value=None, exc_tb=None):
self.pid.__exit__()
| gpl-2.0 |
popazerty/SDG-e2 | lib/python/Components/FileList.py | 19 | 14902 | import os
import re
from MenuList import MenuList
from Components.Harddisk import harddiskmanager
from Tools.Directories import SCOPE_CURRENT_SKIN, resolveFilename, fileExists
from enigma import RT_HALIGN_LEFT, eListboxPythonMultiContent, \
eServiceReference, eServiceCenter, gFont
from Tools.LoadPixmap import LoadPixmap
import skin
EXTENSIONS = {
"m4a": "music",
"mp2": "music",
"mp3": "music",
"wav": "music",
"ogg": "music",
"wma": "music",
"flac": "music",
"jpg": "picture",
"jpeg": "picture",
"png": "picture",
"bmp": "picture",
"ts": "movie",
"avi": "movie",
"divx": "movie",
"m4v": "movie",
"mpg": "movie",
"mpeg": "movie",
"mkv": "movie",
"mp4": "movie",
"mov": "movie",
"m2ts": "movie",
"3gp": "movie",
"3g2": "movie",
"asf": "movie",
"wmv": "movie",
}
def FileEntryComponent(name, absolute = None, isDir = False):
res = [ (absolute, isDir) ]
x, y, w, h = skin.parameters.get("FileListName",(35, 1, 470, 20))
res.append((eListboxPythonMultiContent.TYPE_TEXT, x, y, w, h, 0, RT_HALIGN_LEFT, name))
if isDir:
png = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "extensions/directory.png"))
else:
extension = name.split('.')
extension = extension[-1].lower()
if EXTENSIONS.has_key(extension):
png = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "extensions/" + EXTENSIONS[extension] + ".png"))
else:
png = None
if png is not None:
x, y, w, h = skin.parameters.get("FileListIcon",(10, 2, 20, 20))
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, x, y, w, h, png))
return res
class FileList(MenuList):
def __init__(self, directory, showDirectories = True, showFiles = True, showMountpoints = True, matchingPattern = None, useServiceRef = False, inhibitDirs = False, inhibitMounts = False, isTop = False, enableWrapAround = False, additionalExtensions = None):
MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
self.additional_extensions = additionalExtensions
self.mountpoints = []
self.current_directory = None
self.current_mountpoint = None
self.useServiceRef = useServiceRef
self.showDirectories = showDirectories
self.showMountpoints = showMountpoints
self.showFiles = showFiles
self.isTop = isTop
# example: matching .nfi and .ts files: "^.*\.(nfi|ts)"
if matchingPattern:
self.matchingPattern = re.compile(matchingPattern)
else:
self.matchingPattern = None
self.inhibitDirs = inhibitDirs or []
self.inhibitMounts = inhibitMounts or []
self.refreshMountpoints()
self.changeDir(directory)
font = skin.fonts.get("FileList", ("Regular", 18, 23))
self.l.setFont(0, gFont(font[0], font[1]))
self.l.setItemHeight(font[2])
self.serviceHandler = eServiceCenter.getInstance()
def refreshMountpoints(self):
self.mountpoints = [os.path.join(p.mountpoint, "") for p in harddiskmanager.getMountedPartitions()]
self.mountpoints.sort(reverse = True)
def getMountpoint(self, file):
file = os.path.join(os.path.realpath(file), "")
for m in self.mountpoints:
if file.startswith(m):
return m
return False
def getMountpointLink(self, file):
if os.path.realpath(file) == file:
return self.getMountpoint(file)
else:
if file[-1] == "/":
file = file[:-1]
mp = self.getMountpoint(file)
last = file
file = os.path.dirname(file)
while last != "/" and mp == self.getMountpoint(file):
last = file
file = os.path.dirname(file)
return os.path.join(last, "")
def getSelection(self):
if self.l.getCurrentSelection() is None:
return None
return self.l.getCurrentSelection()[0]
def getCurrentEvent(self):
l = self.l.getCurrentSelection()
if not l or l[0][1] == True:
return None
else:
return self.serviceHandler.info(l[0][0]).getEvent(l[0][0])
def getFileList(self):
return self.list
def inParentDirs(self, dir, parents):
dir = os.path.realpath(dir)
for p in parents:
if dir.startswith(p):
return True
return False
def changeDir(self, directory, select = None):
self.list = []
# if we are just entering from the list of mount points:
if self.current_directory is None:
if directory and self.showMountpoints:
self.current_mountpoint = self.getMountpointLink(directory)
else:
self.current_mountpoint = None
self.current_directory = directory
directories = []
files = []
if directory is None and self.showMountpoints: # present available mountpoints
for p in harddiskmanager.getMountedPartitions():
path = os.path.join(p.mountpoint, "")
if path not in self.inhibitMounts and not self.inParentDirs(path, self.inhibitDirs):
self.list.append(FileEntryComponent(name = p.description, absolute = path, isDir = True))
files = [ ]
directories = [ ]
elif directory is None:
files = [ ]
directories = [ ]
elif self.useServiceRef:
# we should not use the 'eServiceReference(string)' constructor, because it doesn't allow ':' in the directoryname
root = eServiceReference(2, 0, directory)
if self.additional_extensions:
root.setName(self.additional_extensions)
serviceHandler = eServiceCenter.getInstance()
list = serviceHandler.list(root)
while 1:
s = list.getNext()
if not s.valid():
del list
break
if s.flags & s.mustDescent:
directories.append(s.getPath())
else:
files.append(s)
directories.sort()
files.sort()
else:
if fileExists(directory):
try:
files = os.listdir(directory)
except:
files = []
files.sort()
tmpfiles = files[:]
for x in tmpfiles:
if os.path.isdir(directory + x):
directories.append(directory + x + "/")
files.remove(x)
if directory is not None and self.showDirectories and not self.isTop:
if directory == self.current_mountpoint and self.showMountpoints:
self.list.append(FileEntryComponent(name = "<" +_("List of storage devices") + ">", absolute = None, isDir = True))
elif (directory != "/") and not (self.inhibitMounts and self.getMountpoint(directory) in self.inhibitMounts):
self.list.append(FileEntryComponent(name = "<" +_("Parent directory") + ">", absolute = '/'.join(directory.split('/')[:-2]) + '/', isDir = True))
if self.showDirectories:
for x in directories:
if not (self.inhibitMounts and self.getMountpoint(x) in self.inhibitMounts) and not self.inParentDirs(x, self.inhibitDirs):
name = x.split('/')[-2]
self.list.append(FileEntryComponent(name = name, absolute = x, isDir = True))
if self.showFiles:
for x in files:
if self.useServiceRef:
path = x.getPath()
name = path.split('/')[-1]
else:
path = directory + x
name = x
if (self.matchingPattern is None) or self.matchingPattern.search(path):
self.list.append(FileEntryComponent(name = name, absolute = x , isDir = False))
if self.showMountpoints and len(self.list) == 0:
self.list.append(FileEntryComponent(name = _("nothing connected"), absolute = None, isDir = False))
self.l.setList(self.list)
if select is not None:
i = 0
self.moveToIndex(0)
for x in self.list:
p = x[0][0]
if isinstance(p, eServiceReference):
p = p.getPath()
if p == select:
self.moveToIndex(i)
i += 1
def getCurrentDirectory(self):
return self.current_directory
def canDescent(self):
if self.getSelection() is None:
return False
return self.getSelection()[1]
def descent(self):
if self.getSelection() is None:
return
self.changeDir(self.getSelection()[0], select = self.current_directory)
def getFilename(self):
if self.getSelection() is None:
return None
x = self.getSelection()[0]
if isinstance(x, eServiceReference):
x = x.getPath()
return x
def getServiceRef(self):
if self.getSelection() is None:
return None
x = self.getSelection()[0]
if isinstance(x, eServiceReference):
return x
return None
def execBegin(self):
harddiskmanager.on_partition_list_change.append(self.partitionListChanged)
def execEnd(self):
harddiskmanager.on_partition_list_change.remove(self.partitionListChanged)
def refresh(self):
self.changeDir(self.current_directory, self.getFilename())
def partitionListChanged(self, action, device):
self.refreshMountpoints()
if self.current_directory is None:
self.refresh()
def MultiFileSelectEntryComponent(name, absolute = None, isDir = False, selected = False):
res = [ (absolute, isDir, selected, name) ]
x, y, w, h = skin.parameters.get("FileListMultiName",(55, 0, 470, 25))
res.append((eListboxPythonMultiContent.TYPE_TEXT, x, y, w, h, 0, RT_HALIGN_LEFT, name))
if isDir:
png = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "extensions/directory.png"))
else:
extension = name.split('.')
extension = extension[-1].lower()
if EXTENSIONS.has_key(extension):
png = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "extensions/" + EXTENSIONS[extension] + ".png"))
else:
png = None
if png is not None:
x, y, w, h = skin.parameters.get("FileListMultiIcon",(30, 2, 20, 20))
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, x, y, w, h, png))
if not name.startswith('<'):
if selected:
icon = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/lock_on.png"))
else:
icon = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/lock_off.png"))
x, y, w, h = skin.parameters.get("FileListMultiLock",(2, 0, 25, 25))
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, x, y, w, h, icon))
return res
class MultiFileSelectList(FileList):
def __init__(self, preselectedFiles, directory, showMountpoints = False, matchingPattern = None, showDirectories = True, showFiles = True, useServiceRef = False, inhibitDirs = False, inhibitMounts = False, isTop = False, enableWrapAround = False, additionalExtensions = None):
if preselectedFiles is None:
self.selectedFiles = []
else:
self.selectedFiles = preselectedFiles
FileList.__init__(self, directory, showMountpoints = showMountpoints, matchingPattern = matchingPattern, showDirectories = showDirectories, showFiles = showFiles, useServiceRef = useServiceRef, inhibitDirs = inhibitDirs, inhibitMounts = inhibitMounts, isTop = isTop, enableWrapAround = enableWrapAround, additionalExtensions = additionalExtensions)
self.changeDir(directory)
font = skin.fonts.get("FileListMulti", ("Regular", 20, 25))
self.l.setFont(0, gFont(font[0], font[1]))
self.l.setItemHeight(font[2])
self.onSelectionChanged = [ ]
def selectionChanged(self):
for f in self.onSelectionChanged:
f()
def changeSelectionState(self):
idx = self.l.getCurrentSelectionIndex()
newList = self.list[:]
x = self.list[idx]
if not x[0][3].startswith('<'):
if x[0][1] is True:
realPathname = x[0][0]
else:
realPathname = self.current_directory + x[0][0]
if x[0][2] == True:
SelectState = False
try:
self.selectedFiles.remove(realPathname)
except:
try:
self.selectedFiles.remove(os.path.normpath(realPathname))
except:
print "Couldn't remove:", realPathname
else:
SelectState = True
if (realPathname not in self.selectedFiles) and (os.path.normpath(realPathname) not in self.selectedFiles):
self.selectedFiles.append(realPathname)
newList[idx] = MultiFileSelectEntryComponent(name = x[0][3], absolute = x[0][0], isDir = x[0][1], selected = SelectState)
self.list = newList
self.l.setList(self.list)
def getSelectedList(self):
return self.selectedFiles
def changeDir(self, directory, select = None):
self.list = []
# if we are just entering from the list of mount points:
if self.current_directory is None:
if directory and self.showMountpoints:
self.current_mountpoint = self.getMountpointLink(directory)
else:
self.current_mountpoint = None
self.current_directory = directory
directories = []
files = []
if directory is None and self.showMountpoints: # present available mountpoints
for p in harddiskmanager.getMountedPartitions():
path = os.path.join(p.mountpoint, "")
if path not in self.inhibitMounts and not self.inParentDirs(path, self.inhibitDirs):
self.list.append(MultiFileSelectEntryComponent(name = p.description, absolute = path, isDir = True))
files = [ ]
directories = [ ]
elif directory is None:
files = [ ]
directories = [ ]
elif self.useServiceRef:
root = eServiceReference("2:0:1:0:0:0:0:0:0:0:" + directory)
if self.additional_extensions:
root.setName(self.additional_extensions)
serviceHandler = eServiceCenter.getInstance()
list = serviceHandler.list(root)
while 1:
s = list.getNext()
if not s.valid():
del list
break
if s.flags & s.mustDescent:
directories.append(s.getPath())
else:
files.append(s)
directories.sort()
files.sort()
else:
if fileExists(directory):
try:
files = os.listdir(directory)
except:
files = []
files.sort()
tmpfiles = files[:]
for x in tmpfiles:
if os.path.isdir(directory + x):
directories.append(directory + x + "/")
files.remove(x)
if directory is not None and self.showDirectories and not self.isTop:
if directory == self.current_mountpoint and self.showMountpoints:
self.list.append(MultiFileSelectEntryComponent(name = "<" +_("List of storage devices") + ">", absolute = None, isDir = True))
elif (directory != "/") and not (self.inhibitMounts and self.getMountpoint(directory) in self.inhibitMounts):
self.list.append(MultiFileSelectEntryComponent(name = "<" +_("Parent directory") + ">", absolute = '/'.join(directory.split('/')[:-2]) + '/', isDir = True))
if self.showDirectories:
for x in directories:
if not (self.inhibitMounts and self.getMountpoint(x) in self.inhibitMounts) and not self.inParentDirs(x, self.inhibitDirs):
name = x.split('/')[-2]
alreadySelected = (x in self.selectedFiles) or (os.path.normpath(x) in self.selectedFiles)
self.list.append(MultiFileSelectEntryComponent(name = name, absolute = x, isDir = True, selected = alreadySelected))
if self.showFiles:
for x in files:
if self.useServiceRef:
path = x.getPath()
name = path.split('/')[-1]
else:
path = directory + x
name = x
if (self.matchingPattern is None) or self.matchingPattern.search(path):
alreadySelected = False
for entry in self.selectedFiles:
if os.path.basename(entry) == x:
alreadySelected = True
self.list.append(MultiFileSelectEntryComponent(name = name, absolute = x , isDir = False, selected = alreadySelected))
self.l.setList(self.list)
if select is not None:
i = 0
self.moveToIndex(0)
for x in self.list:
p = x[0][0]
if isinstance(p, eServiceReference):
p = p.getPath()
if p == select:
self.moveToIndex(i)
i += 1
| gpl-2.0 |
Triton3D/loto | 2_get_stats_from_tickets.py | 1 | 1772 | # Модуль сбора статистики присутствия чисел в билетах Русское Лото
import os
import time
import sys
dir=".\\tickets\\"
files=os.listdir(dir)
numbers_list=[i for i in range(1,91)]
number_stats={numbers_list[i]:0 for i in range(0,90) }
print("Найдено "+str(len(files))+" файлов билетов.\nНомер первого билета: " + \
files[0][:-len('.csv')]+"\nНомер последнего билета: " + files[len(files)-1][:-len('.csv')]+ "\n")
for curfile in files:
try:
file=open(dir+curfile,'r')
except FileNotFoundError:
print("Файл " + curfile+" был удален или перемещен в процессе работы программы")
continue
tickets_contents_numbers=file.readline()[:-1].split(',')
for i in range(0,len(tickets_contents_numbers)):
number_stats[int(tickets_contents_numbers[i])]+=1
file.close()
time.sleep(0.05)
sys.stderr.write("Сбор статистики: " + str(int(files.index(curfile)*100/len(files))+1)+'%\r')
print("\nГотово!")
time.sleep(1)
try:
stats_csv=open('stats.csv','w')
except PermissionError:
print("\nФайл открыт в другой программе! Закройте пожалуйста и повторите попытку.")
stats_csv.writelines("Количество билетов ," + str(len(files))+ \
"\n\nНомер бочонка,Количество\n")
for i in range(1,91):
stats_csv.writelines(str(i)+','+str(number_stats[i])+'\n')
stats_csv.close()
print("\nРезультаты статистических данных успешно записаны в файл stats.csv ") | gpl-3.0 |
apuapaquola/rf | rf/__init__.py | 1 | 7155 | #!/usr/bin/env python
""" rf - A framework for collaborative data analysis
Copyright (C) 2015 Apuã Paquola <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import os
import shutil
import subprocess
import re
from . import rflib
__author__ = 'Apuã Paquola'
def nodes(parent):
"""Finds the nodes under a directory tree.
Args:
parent: a node at the root of the tree.
Yields:
str: each node of the tree
"""
assert os.path.isdir(parent)
yield parent
for x in os.listdir(parent):
if x not in ['_h', '_m', '.git']:
child = os.path.join(parent, x)
if os.path.isdir(child):
yield from nodes(child)
def run(args):
""" Runs driver scripts throughout the tree
:param args:
:return:
"""
rflib.run(args)
def drop(args):
"""Deletes the contents of machine dirs _m
"""
if args.recursive:
nl = list(nodes(args.node))
else:
nl = [args.node]
dirs = [os.path.join(x, '_m') for x in nl]
for x in dirs:
assert x.endswith('_m')
assert not x.endswith('_h')
if not os.path.isdir(x):
continue
command = ['git', 'rm', '-r'] + [x]
try:
subprocess.check_call(command)
except subprocess.CalledProcessError:
pass
if args.force and os.path.isdir(x):
shutil.rmtree(x)
def clone(args):
"""Clones a tree
"""
subprocess.check_call(['git', 'clone', args.repository, args.directory])
os.chdir(args.directory)
subprocess.check_call(['git', 'annex', 'init'])
subprocess.check_call(['git', 'annex', 'sync', '--no-push'])
def commit(args):
"""Commits human and machine dirs to git and git-annex
"""
if args.recursive:
nl = list(nodes(args.node))
else:
nl = [args.node]
machine_dirs = [y for y in [os.path.join(x, '_m') for x in nl] if os.path.isdir(y)]
human_dirs = [y for y in [os.path.join(x, '_h') for x in nl] if os.path.isdir(y)]
try:
subprocess.check_call(['git', 'add'] + human_dirs)
subprocess.check_call(['git', 'annex', 'add'] + machine_dirs)
subprocess.check_call(['git', 'commit', '-m', args.message] + human_dirs + machine_dirs)
except subprocess.CalledProcessError:
raise
def get(args):
"""Fetches contents of machine directories from origin repository
:param args:
:return:
"""
if args.recursive:
nl = list(nodes(args.node))
else:
nl = [args.node]
machine_dirs = [y for y in [os.path.join(x, '_m') for x in nl] if os.path.isdir(y)]
subprocess.check_call(['git', 'annex', 'sync', '--no-push'])
subprocess.check_call(['git', 'annex', 'get'] + machine_dirs)
def node_status(node):
"""Returns node status"""
if not os.path.isdir(node + '/_h'):
return 'no _h'
elif os.path.exists(node + '/_h/run') and os.path.exists(node + '/_h/yield'):
return 'run/yield'
elif os.path.exists(node + '/_h/yield'):
return 'yield'
elif not (os.path.exists(node + '/_h/run') and os.access(node + '/_h/run', os.X_OK)):
return 'no run script'
elif not os.path.isdir(node + '/_m'):
return 'ready to run'
elif os.path.exists(node + '/_m/SUCCESS'):
return 'done'
else:
return 'incomplete'
def pretty_print_status(args):
"""Prints status of all nodes in a subtree
"""
command = ['tree', '--noreport', '-d', '-I', '_h|_m'] + [args.node]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
maxlen = max((len(x.decode().rstrip()) for x in p.stdout))
p = subprocess.Popen(command, stdout=subprocess.PIPE)
path = []
for line in p.stdout:
l = line.decode().rstrip()
m = re.search('(─ )?([^─]*)$', l)
pos = m.start(2) // 4
del path[pos:]
path.append(m.group(2))
node = '/'.join(path)
s = node_status(node)
if args.parseable:
print(node, s, sep='\t')
else:
print(l, ' ' * (maxlen - len(l) + 13 - len(s)), s)
def print_tree(args):
"""Prints directory tree under a node.
"""
subprocess.check_call(['tree', '--noreport', '-d', '-I', '_h|_m'] + [args.node])
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
parser_run = subparsers.add_parser('run', help='run driver script')
parser_run.add_argument('-n', '--dry-run', action='store_true')
parser_run.add_argument('-v', '--verbose', action='store_true')
parser_run.add_argument('-r', '--recursive', action='store_true')
parser_run.add_argument('-d', '--docker-image')
parser_run.add_argument('node')
parser_run.set_defaults(func=run)
parser_drop = subparsers.add_parser('drop', help='drop machine directory')
parser_drop.add_argument('-r', '--recursive', action='store_true')
parser_drop.add_argument('-f', '--force', action='store_true')
parser_drop.add_argument('node')
parser_drop.set_defaults(func=drop)
parser_clone = subparsers.add_parser('clone', help='clone an analysis tree')
parser_clone.add_argument('repository')
parser_clone.add_argument('directory')
parser_clone.set_defaults(func=clone)
parser_commit = subparsers.add_parser('commit', help='commit to git and git-annex')
parser_commit.add_argument('-r', '--recursive', action='store_true')
parser_commit.add_argument('-m', '--message', required=True)
parser_commit.add_argument('node')
parser_commit.set_defaults(func=commit)
parser_get = subparsers.add_parser('get', help='get machine directory contents from origin repository')
parser_get.add_argument('-r', '--recursive', action='store_true')
parser_get.add_argument('node')
parser_get.set_defaults(func=get)
parser_get = subparsers.add_parser('status', help='print analysis tree status')
parser_get.add_argument('-p', '--parseable', action='store_true', help='prints path <tab> status for each node')
parser_get.add_argument('node', nargs='?', default='.')
parser_get.set_defaults(func=pretty_print_status)
parser_get = subparsers.add_parser('tree', help='print analysis tree')
parser_get.add_argument('node', nargs='?', default='.')
parser_get.set_defaults(func=print_tree)
args = parser.parse_args()
if hasattr(args, 'func'):
args.func(args)
else:
parser.parse_args(['--help'])
if __name__ == '__main__':
main()
| gpl-3.0 |
varunnaganathan/django | tests/gis_tests/geoapp/test_sitemaps.py | 123 | 2631 | from __future__ import unicode_literals
import zipfile
from io import BytesIO
from xml.dom import minidom
from django.conf import settings
from django.contrib.sites.models import Site
from django.test import (
TestCase, modify_settings, override_settings, skipUnlessDBFeature,
)
from .models import City, Country
@modify_settings(INSTALLED_APPS={'append': ['django.contrib.sites', 'django.contrib.sitemaps']})
@override_settings(ROOT_URLCONF='gis_tests.geoapp.urls')
@skipUnlessDBFeature("gis_enabled")
class GeoSitemapTest(TestCase):
def setUp(self):
super(GeoSitemapTest, self).setUp()
Site(id=settings.SITE_ID, domain="example.com", name="example.com").save()
def assertChildNodes(self, elem, expected):
"Taken from syndication/tests.py."
actual = set(n.nodeName for n in elem.childNodes)
expected = set(expected)
self.assertEqual(actual, expected)
def test_geositemap_kml(self):
"Tests KML/KMZ geographic sitemaps."
for kml_type in ('kml', 'kmz'):
doc = minidom.parseString(self.client.get('/sitemaps/%s.xml' % kml_type).content)
# Ensuring the right sitemaps namespace is present.
urlset = doc.firstChild
self.assertEqual(urlset.getAttribute('xmlns'), 'http://www.sitemaps.org/schemas/sitemap/0.9')
urls = urlset.getElementsByTagName('url')
self.assertEqual(2, len(urls)) # Should only be 2 sitemaps.
for url in urls:
self.assertChildNodes(url, ['loc'])
# Getting the relative URL since we don't have a real site.
kml_url = url.getElementsByTagName('loc')[0].childNodes[0].data.split('http://example.com')[1]
if kml_type == 'kml':
kml_doc = minidom.parseString(self.client.get(kml_url).content)
elif kml_type == 'kmz':
# Have to decompress KMZ before parsing.
buf = BytesIO(self.client.get(kml_url).content)
with zipfile.ZipFile(buf) as zf:
self.assertEqual(1, len(zf.filelist))
self.assertEqual('doc.kml', zf.filelist[0].filename)
kml_doc = minidom.parseString(zf.read('doc.kml'))
# Ensuring the correct number of placemarks are in the KML doc.
if 'city' in kml_url:
model = City
elif 'country' in kml_url:
model = Country
self.assertEqual(model.objects.count(), len(kml_doc.getElementsByTagName('Placemark')))
| bsd-3-clause |
theDarkForce/plask | plask/flask/testsuite/testing.py | 561 | 7411 | # -*- coding: utf-8 -*-
"""
flask.testsuite.testing
~~~~~~~~~~~~~~~~~~~~~~~
Test client and more.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
from flask.testsuite import FlaskTestCase
from flask._compat import text_type
class TestToolsTestCase(FlaskTestCase):
def test_environ_defaults_from_config(self):
app = flask.Flask(__name__)
app.testing = True
app.config['SERVER_NAME'] = 'example.com:1234'
app.config['APPLICATION_ROOT'] = '/foo'
@app.route('/')
def index():
return flask.request.url
ctx = app.test_request_context()
self.assert_equal(ctx.request.url, 'http://example.com:1234/foo/')
with app.test_client() as c:
rv = c.get('/')
self.assert_equal(rv.data, b'http://example.com:1234/foo/')
def test_environ_defaults(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
return flask.request.url
ctx = app.test_request_context()
self.assert_equal(ctx.request.url, 'http://localhost/')
with app.test_client() as c:
rv = c.get('/')
self.assert_equal(rv.data, b'http://localhost/')
def test_redirect_keep_session(self):
app = flask.Flask(__name__)
app.secret_key = 'testing'
@app.route('/', methods=['GET', 'POST'])
def index():
if flask.request.method == 'POST':
return flask.redirect('/getsession')
flask.session['data'] = 'foo'
return 'index'
@app.route('/getsession')
def get_session():
return flask.session.get('data', '<missing>')
with app.test_client() as c:
rv = c.get('/getsession')
assert rv.data == b'<missing>'
rv = c.get('/')
assert rv.data == b'index'
assert flask.session.get('data') == 'foo'
rv = c.post('/', data={}, follow_redirects=True)
assert rv.data == b'foo'
# This support requires a new Werkzeug version
if not hasattr(c, 'redirect_client'):
assert flask.session.get('data') == 'foo'
rv = c.get('/getsession')
assert rv.data == b'foo'
def test_session_transactions(self):
app = flask.Flask(__name__)
app.testing = True
app.secret_key = 'testing'
@app.route('/')
def index():
return text_type(flask.session['foo'])
with app.test_client() as c:
with c.session_transaction() as sess:
self.assert_equal(len(sess), 0)
sess['foo'] = [42]
self.assert_equal(len(sess), 1)
rv = c.get('/')
self.assert_equal(rv.data, b'[42]')
with c.session_transaction() as sess:
self.assert_equal(len(sess), 1)
self.assert_equal(sess['foo'], [42])
def test_session_transactions_no_null_sessions(self):
app = flask.Flask(__name__)
app.testing = True
with app.test_client() as c:
try:
with c.session_transaction() as sess:
pass
except RuntimeError as e:
self.assert_in('Session backend did not open a session', str(e))
else:
self.fail('Expected runtime error')
def test_session_transactions_keep_context(self):
app = flask.Flask(__name__)
app.testing = True
app.secret_key = 'testing'
with app.test_client() as c:
rv = c.get('/')
req = flask.request._get_current_object()
self.assert_true(req is not None)
with c.session_transaction():
self.assert_true(req is flask.request._get_current_object())
def test_session_transaction_needs_cookies(self):
app = flask.Flask(__name__)
app.testing = True
c = app.test_client(use_cookies=False)
try:
with c.session_transaction() as s:
pass
except RuntimeError as e:
self.assert_in('cookies', str(e))
else:
self.fail('Expected runtime error')
def test_test_client_context_binding(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
flask.g.value = 42
return 'Hello World!'
@app.route('/other')
def other():
1 // 0
with app.test_client() as c:
resp = c.get('/')
self.assert_equal(flask.g.value, 42)
self.assert_equal(resp.data, b'Hello World!')
self.assert_equal(resp.status_code, 200)
resp = c.get('/other')
self.assert_false(hasattr(flask.g, 'value'))
self.assert_in(b'Internal Server Error', resp.data)
self.assert_equal(resp.status_code, 500)
flask.g.value = 23
try:
flask.g.value
except (AttributeError, RuntimeError):
pass
else:
raise AssertionError('some kind of exception expected')
def test_reuse_client(self):
app = flask.Flask(__name__)
c = app.test_client()
with c:
self.assert_equal(c.get('/').status_code, 404)
with c:
self.assert_equal(c.get('/').status_code, 404)
def test_test_client_calls_teardown_handlers(self):
app = flask.Flask(__name__)
called = []
@app.teardown_request
def remember(error):
called.append(error)
with app.test_client() as c:
self.assert_equal(called, [])
c.get('/')
self.assert_equal(called, [])
self.assert_equal(called, [None])
del called[:]
with app.test_client() as c:
self.assert_equal(called, [])
c.get('/')
self.assert_equal(called, [])
c.get('/')
self.assert_equal(called, [None])
self.assert_equal(called, [None, None])
class SubdomainTestCase(FlaskTestCase):
def setUp(self):
self.app = flask.Flask(__name__)
self.app.config['SERVER_NAME'] = 'example.com'
self.client = self.app.test_client()
self._ctx = self.app.test_request_context()
self._ctx.push()
def tearDown(self):
if self._ctx is not None:
self._ctx.pop()
def test_subdomain(self):
@self.app.route('/', subdomain='<company_id>')
def view(company_id):
return company_id
url = flask.url_for('view', company_id='xxx')
response = self.client.get(url)
self.assert_equal(200, response.status_code)
self.assert_equal(b'xxx', response.data)
def test_nosubdomain(self):
@self.app.route('/<company_id>')
def view(company_id):
return company_id
url = flask.url_for('view', company_id='xxx')
response = self.client.get(url)
self.assert_equal(200, response.status_code)
self.assert_equal(b'xxx', response.data)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestToolsTestCase))
suite.addTest(unittest.makeSuite(SubdomainTestCase))
return suite
| lgpl-3.0 |
zasdfgbnm/tensorflow | tensorflow/python/keras/_impl/keras/optimizers.py | 1 | 26966 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Built-in optimizer classes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import six
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import ops
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras._impl.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer as tf_optimizer_module
from tensorflow.python.util.tf_export import tf_export
def clip_norm(g, c, n):
"""Clip a tensor by norm.
Arguments:
g: gradient tensor to clip.
c: clipping threshold.
n: norm of gradient tensor.
Returns:
Clipped gradient tensor.
"""
if c > 0:
condition = n >= c
then_expression = lambda: math_ops.scalar_mul(c / n, g)
else_expression = lambda: g
# saving the shape to avoid converting sparse tensor to dense
if isinstance(g, ops.Tensor):
g_shape = copy.copy(g.get_shape())
elif isinstance(g, ops.IndexedSlices):
g_shape = copy.copy(g.dense_shape)
if condition.dtype != dtypes_module.bool:
condition = math_ops.cast(condition, 'bool')
g = control_flow_ops.cond(condition, then_expression, else_expression)
if isinstance(g, ops.Tensor):
g.set_shape(g_shape)
elif isinstance(g, ops.IndexedSlices):
g._dense_shape = g_shape # pylint: disable=protected-access
return g
@tf_export('keras.optimizers.Optimizer')
class Optimizer(object):
"""Abstract optimizer base class.
Note: this is the parent class of all optimizers, not an actual optimizer
that can be used for training models.
All Keras optimizers support the following keyword arguments:
clipnorm: float >= 0. Gradients will be clipped
when their L2 norm exceeds this value.
clipvalue: float >= 0. Gradients will be clipped
when their absolute value exceeds this value.
"""
def __init__(self, **kwargs):
allowed_kwargs = {'clipnorm', 'clipvalue'}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError('Unexpected keyword argument '
'passed to optimizer: ' + str(k))
self.__dict__.update(kwargs)
self.updates = []
self.weights = []
def get_updates(self, loss, params):
raise NotImplementedError
def get_gradients(self, loss, params):
grads = K.gradients(loss, params)
if hasattr(self, 'clipnorm') and self.clipnorm > 0:
norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
if hasattr(self, 'clipvalue') and self.clipvalue > 0:
grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
return grads
def set_weights(self, weights):
"""Sets the weights of the optimizer, from Numpy arrays.
Should only be called after computing the gradients
(otherwise the optimizer has no weights).
Arguments:
weights: a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the optimizer (i.e. it should match the
output of `get_weights`).
Raises:
ValueError: in case of incompatible weight shapes.
"""
params = self.weights
weight_value_tuples = []
param_values = K.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError(
'Optimizer weight shape ' + str(pv.shape) + ' not compatible with '
'provided weight shape ' + str(w.shape))
weight_value_tuples.append((p, w))
K.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current value of the weights of the optimizer.
Returns:
A list of numpy arrays.
"""
return K.batch_get_value(self.weights)
def get_config(self):
config = {}
if hasattr(self, 'clipnorm'):
config['clipnorm'] = self.clipnorm
if hasattr(self, 'clipvalue'):
config['clipvalue'] = self.clipvalue
return config
@classmethod
def from_config(cls, config):
return cls(**config)
@tf_export('keras.optimizers.SGD')
class SGD(Optimizer):
"""Stochastic gradient descent optimizer.
Includes support for momentum,
learning rate decay, and Nesterov momentum.
Arguments:
lr: float >= 0. Learning rate.
momentum: float >= 0. Parameter that accelerates SGD
in the relevant direction and dampens oscillations.
decay: float >= 0. Learning rate decay over each update.
nesterov: boolean. Whether to apply Nesterov momentum.
"""
def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, **kwargs):
super(SGD, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.momentum = K.variable(momentum, name='momentum')
self.decay = K.variable(decay, name='decay')
self.initial_decay = decay
self.nesterov = nesterov
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (1. / # pylint: disable=g-no-augmented-assignment
(1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
# momentum
shapes = [K.int_shape(p) for p in params]
moments = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + moments
for p, g, m in zip(params, grads, moments):
v = self.momentum * m - lr * g # velocity
self.updates.append(K.update(m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'momentum': float(K.get_value(self.momentum)),
'decay': float(K.get_value(self.decay)),
'nesterov': self.nesterov
}
base_config = super(SGD, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.optimizers.RMSprop')
class RMSprop(Optimizer):
"""RMSProp optimizer.
It is recommended to leave the parameters of this optimizer
at their default values
(except the learning rate, which can be freely tuned).
This optimizer is usually a good choice for recurrent
neural networks.
Arguments:
lr: float >= 0. Learning rate.
rho: float >= 0.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self, lr=0.001, rho=0.9, epsilon=None, decay=0., **kwargs):
super(RMSprop, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.rho = K.variable(rho, name='rho')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
accumulators = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = accumulators
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (1. / # pylint: disable=g-no-augmented-assignment
(1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * K.square(g)
self.updates.append(K.update(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'rho': float(K.get_value(self.rho)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(RMSprop, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.optimizers.Adagrad')
class Adagrad(Optimizer):
"""Adagrad optimizer.
It is recommended to leave the parameters of this optimizer
at their default values.
Arguments:
lr: float >= 0. Learning rate.
epsilon: float >= 0. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self, lr=0.01, epsilon=None, decay=0., **kwargs):
super(Adagrad, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (1. / # pylint: disable=g-no-augmented-assignment
(1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
new_a = a + K.square(g) # update accumulator
self.updates.append(K.update(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adagrad, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.optimizers.Adadelta')
class Adadelta(Optimizer):
"""Adadelta optimizer.
It is recommended to leave the parameters of this optimizer
at their default values.
Arguments:
lr: float >= 0. Learning rate.
It is recommended to leave it at the default value.
rho: float >= 0.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self, lr=1.0, rho=0.95, epsilon=None, decay=0., **kwargs):
super(Adadelta, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.rho = rho
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
delta_accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators + delta_accumulators
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (1. / # pylint: disable=g-no-augmented-assignment
(1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * K.square(g)
self.updates.append(K.update(a, new_a))
# use the new accumulator and the *old* delta_accumulator
update = g * K.sqrt(d_a + self.epsilon) / K.sqrt(new_a + self.epsilon)
new_p = p - lr * update
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
# update delta_accumulator
new_d_a = self.rho * d_a + (1 - self.rho) * K.square(update)
self.updates.append(K.update(d_a, new_d_a))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'rho': self.rho,
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adadelta, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.optimizers.Adam')
class Adam(Optimizer):
"""Adam optimizer.
Default parameters follow those provided in the original paper.
Arguments:
lr: float >= 0. Learning rate.
beta_1: float, 0 < beta < 1. Generally close to 1.
beta_2: float, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
amsgrad: boolean. Whether to apply the AMSGrad variant of this
algorithm from the paper "On the Convergence of Adam and
Beyond".
"""
def __init__(self,
lr=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.,
amsgrad=False,
**kwargs):
super(Adam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
self.amsgrad = amsgrad
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (1. / # pylint: disable=g-no-augmented-assignment
(1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx()) + 1
lr_t = lr * (
K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
if self.amsgrad:
vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
else:
vhats = [K.zeros(1) for _ in params]
self.weights = [self.iterations] + ms + vs + vhats
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
if self.amsgrad:
vhat_t = K.maximum(vhat, v_t)
p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)
self.updates.append(K.update(vhat, vhat_t))
else:
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon,
'amsgrad': self.amsgrad
}
base_config = super(Adam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.optimizers.Adamax')
class Adamax(Optimizer):
"""Adamax optimizer from Adam paper's Section 7.
It is a variant of Adam based on the infinity norm.
Default parameters follow those provided in the paper.
Arguments:
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self,
lr=0.002,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.,
**kwargs):
super(Adamax, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (1. / # pylint: disable=g-no-augmented-assignment
(1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx()) + 1
lr_t = lr / (1. - K.pow(self.beta_1, t))
shapes = [K.int_shape(p) for p in params]
# zero init of 1st moment
ms = [K.zeros(shape) for shape in shapes]
# zero init of exponentially weighted infinity norm
us = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + ms + us
for p, g, m, u in zip(params, grads, ms, us):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
u_t = K.maximum(self.beta_2 * u, K.abs(g))
p_t = p - lr_t * m_t / (u_t + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(u, u_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adamax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.optimizers.Nadam')
class Nadam(Optimizer):
"""Nesterov Adam optimizer.
Much like Adam is essentially RMSprop with momentum,
Nadam is Adam RMSprop with Nesterov momentum.
Default parameters follow those provided in the paper.
It is recommended to leave the parameters of this optimizer
at their default values.
Arguments:
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
"""
def __init__(self,
lr=0.002,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
schedule_decay=0.004,
**kwargs):
super(Nadam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.m_schedule = K.variable(1., name='m_schedule')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.schedule_decay = schedule_decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
t = K.cast(self.iterations, K.floatx()) + 1
# Due to the recommendations in [2], i.e. warming momentum schedule
momentum_cache_t = self.beta_1 * (
1. - 0.5 * (K.pow(K.cast_to_floatx(0.96), t * self.schedule_decay)))
momentum_cache_t_1 = self.beta_1 * (
1. - 0.5 *
(K.pow(K.cast_to_floatx(0.96), (t + 1) * self.schedule_decay)))
m_schedule_new = self.m_schedule * momentum_cache_t
m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
self.updates.append((self.m_schedule, m_schedule_new))
shapes = [K.int_shape(p) for p in params]
ms = [K.zeros(shape) for shape in shapes]
vs = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
# the following equations given in [1]
g_prime = g / (1. - m_schedule_new)
m_t = self.beta_1 * m + (1. - self.beta_1) * g
m_t_prime = m_t / (1. - m_schedule_next)
v_t = self.beta_2 * v + (1. - self.beta_2) * K.square(g)
v_t_prime = v_t / (1. - K.pow(self.beta_2, t))
m_t_bar = (
1. - momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
p_t = p - self.lr * m_t_bar / (K.sqrt(v_t_prime) + self.epsilon)
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'epsilon': self.epsilon,
'schedule_decay': self.schedule_decay
}
base_config = super(Nadam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class TFOptimizer(Optimizer):
"""Wrapper class for native TensorFlow optimizers.
"""
def __init__(self, optimizer): # pylint: disable=super-init-not-called
self.optimizer = optimizer
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
def apply_gradients(self, grads):
self.optimizer.apply_gradients(grads)
def get_grads(self, loss, params):
return self.optimizer.compute_gradients(loss, params)
def get_updates(self, loss, params):
grads = self.optimizer.compute_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
opt_update = self.optimizer.apply_gradients(
grads, global_step=self.iterations)
self.updates.append(opt_update)
return self.updates
@property
def weights(self):
raise NotImplementedError
def get_config(self):
raise NotImplementedError
def from_config(self, config):
raise NotImplementedError
# Aliases.
sgd = SGD
rmsprop = RMSprop
adagrad = Adagrad
adadelta = Adadelta
adam = Adam
adamax = Adamax
nadam = Nadam
@tf_export('keras.optimizers.serialize')
def serialize(optimizer):
return serialize_keras_object(optimizer)
@tf_export('keras.optimizers.deserialize')
def deserialize(config, custom_objects=None):
"""Inverse of the `serialize` function.
Arguments:
config: Optimizer configuration dictionary.
custom_objects: Optional dictionary mapping
names (strings) to custom objects
(classes and functions)
to be considered during deserialization.
Returns:
A Keras Optimizer instance.
"""
all_classes = {
'sgd': SGD,
'rmsprop': RMSprop,
'adagrad': Adagrad,
'adadelta': Adadelta,
'adam': Adam,
'adamax': Adamax,
'nadam': Nadam,
'tfoptimizer': TFOptimizer,
}
# Make deserialization case-insensitive for built-in optimizers.
if config['class_name'].lower() in all_classes:
config['class_name'] = config['class_name'].lower()
return deserialize_keras_object(
config,
module_objects=all_classes,
custom_objects=custom_objects,
printable_module_name='optimizer')
@tf_export('keras.optimizers.get')
def get(identifier):
"""Retrieves a Keras Optimizer instance.
Arguments:
identifier: Optimizer identifier, one of
- String: name of an optimizer
- Dictionary: configuration dictionary.
- Keras Optimizer instance (it will be returned unchanged).
- TensorFlow Optimizer instance
(it will be wrapped as a Keras Optimizer).
Returns:
A Keras Optimizer instance.
Raises:
ValueError: If `identifier` cannot be interpreted.
"""
# Wrap TF optimizer instances
if isinstance(identifier, tf_optimizer_module.Optimizer):
return TFOptimizer(identifier)
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
if isinstance(identifier, Optimizer):
return identifier
else:
raise ValueError('Could not interpret optimizer identifier:', identifier)
| apache-2.0 |
yast/yast-python-bindings | examples/BarGraph3.py | 1 | 3392 | #!/usr/bin/env python
# encoding: utf-8
# Advanced BarGraph example:
#
# Create a dialog with a BarGraph with a number of segments
# and a "+" and a "-" button for each segment.
import copy
from yast import import_module
import_module('UI')
from yast import *
import ycpbuiltins
class BarGraph3Client:
def main(self):
# Check for availability of the BarGraph widget - this is necessary since
# this is an optional widget that not all UIs need to support.
if not UI.HasSpecialWidget("BarGraph"):
# Pop up error message if the BarGraph widget is not available
UI.OpenDialog(
VBox(
Label("Error: This UI doesn't support the BarGraph widget!"),
PushButton(Opt("default"), "&OK")
)
)
UI.UserInput()
UI.CloseDialog()
return
# list values = [ 100, 200, 300, 150, 250, 120, 200, 120 ];
values = [100, 100, 100, 100, 100, 100, 100, 100]
inc = 10 # increment / decrement for each button press
# Create the main dialog:
#
# One BarGraph at the top, below that two rows of equal sized (thus the
# weights) buttons, below that a "close" button.
#
# The "+" / "-" -buttons use an integer value as their ID which can be
# used to point to the index of the value to be changed. If the ID is
# negative it means subtract rather than add.
plus_buttons = HBox()
minus_buttons = HBox()
i = 1
for val in ycpbuiltins.foreach(values):
plus_buttons = ycpbuiltins.add(plus_buttons, HWeight(1, PushButton(Id(str(i)), "+")))
minus_buttons = ycpbuiltins.add(minus_buttons, HWeight(1, PushButton(Id(str(-i)), "-")))
i = i + 1
UI.OpenDialog(
VBox(
BarGraph(Id("bar"), values),
plus_buttons,
minus_buttons,
PushButton(Id("close"), Opt("default"), "&Close")
)
)
# Event processing loop - left only via the "close" button
# or the window manager close button / function.
button_id = None
while True:
button_id = UI.UserInput() # wait for button click
if button_id != "close" and button_id != "cancel":
sign = 1
button_id = int(button_id)
if int(button_id) < 0:
sign = -1
button_id = -button_id
# Loop over the values. Increment the value corresponding to the
# clicked button, decrement all others as to maintain the total
# sum of all values - or vice versa for negative button IDs
# (i.e. "-" buttons).
new_values = []
i2 = 0
while i2 < ycpbuiltins.size(values):
old_val = values[i2]
if (i2 + 1) == button_id:
new_values = ycpbuiltins.add(
new_values,
old_val + (sign * inc)
)
else:
new_values = ycpbuiltins.add(
new_values,
old_val + (-(sign) * (inc / (len(values) - 1) ))
)
i2 = i2 + 1
values = copy.deepcopy(new_values)
UI.ChangeWidget(Id("bar"), "Values", values)
if button_id == "close" or "button_id" == "cancel":
break;
UI.CloseDialog()
BarGraph3Client().main()
| gpl-2.0 |
wkeeling/ansible | test/units/modules/core/test_apt.py | 6 | 1505 | import collections
import os
import sys
from ansible.compat.tests import mock
from ansible.compat.tests import unittest
try:
from ansible.modules.core.packaging.os.apt import (
expand_pkgspec_from_fnmatches,
)
except:
# Need some more module_utils work (porting urls.py) before we can test
# modules. So don't error out in this case.
if sys.version_info[0] >= 3:
pass
class AptExpandPkgspecTestCase(unittest.TestCase):
def setUp(self):
FakePackage = collections.namedtuple("Package", ("name",))
self.fake_cache = [ FakePackage("apt"),
FakePackage("apt-utils"),
FakePackage("not-selected"),
]
def test_trivial(self):
foo = ["apt"]
self.assertEqual(
expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo)
def test_version_wildcard(self):
foo = ["apt=1.0*"]
self.assertEqual(
expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo)
def test_pkgname_wildcard_version_wildcard(self):
foo = ["apt*=1.0*"]
m_mock = mock.Mock()
self.assertEqual(
expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache),
['apt', 'apt-utils'])
def test_pkgname_expands(self):
foo = ["apt*"]
m_mock = mock.Mock()
self.assertEqual(
expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache),
["apt", "apt-utils"])
| gpl-3.0 |
infobloxopen/neutron | neutron/tests/tempest/services/identity/v3/json/policy_client.py | 23 | 2468 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from neutron.tests.tempest.common import service_client
class PolicyClientJSON(service_client.ServiceClient):
api_version = "v3"
def create_policy(self, blob, type):
"""Creates a Policy."""
post_body = {
"blob": blob,
"type": type
}
post_body = json.dumps({'policy': post_body})
resp, body = self.post('policies', post_body)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['policy'])
def list_policies(self):
"""Lists the policies."""
resp, body = self.get('policies')
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['policies'])
def get_policy(self, policy_id):
"""Lists out the given policy."""
url = 'policies/%s' % policy_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['policy'])
def update_policy(self, policy_id, **kwargs):
"""Updates a policy."""
type = kwargs.get('type')
post_body = {
'type': type
}
post_body = json.dumps({'policy': post_body})
url = 'policies/%s' % policy_id
resp, body = self.patch(url, post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['policy'])
def delete_policy(self, policy_id):
"""Deletes the policy."""
url = "policies/%s" % policy_id
resp, body = self.delete(url)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
| apache-2.0 |
repotvsupertuga/tvsupertuga.repository | script.module.streamtvsupertuga/lib/resources/lib/sources/en/ddlspot.py | 1 | 5137 | # -*- coding: utf-8 -*-
'''
Eggman Add-on
**Created by Tempest**
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import client
from resources.lib.modules import debrid
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['www.ddlspot.com']
self.base_link = 'http://www.ddlspot.com/'
self.search_link = 'search/?q=%s&m=1&x=0&y=0'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (
data['tvshowtitle'], int(data['season']), int(data['episode'])) \
if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url).replace('-', '+')
r = client.request(url)
if r == None and 'tvshowtitle' in data:
season = re.search('S(.*?)E', hdlr)
season = season.group(1)
url = title
r = client.request(url)
for loopCount in range(0,2):
if loopCount == 1 or (r == None and 'tvshowtitle' in data):
r = client.request(url)
posts = client.parseDOM(r, "table", attrs={"class": "download"})
hostDict = hostprDict + hostDict
items = []
for post in posts:
try:
u = client.parseDOM(post, 'a', ret='href')
for i in u:
try:
name = str(i)
items.append(name)
except:
pass
except:
pass
if len(items) > 0: break
for item in items:
try:
info = []
i = str(item)
i = self.base_link + i
r = client.request(i)
u = client.parseDOM(r, "div", attrs={"class": "dl-links"})
for t in u:
r = re.compile('a href=".+?" rel=".+?">(.+?)<').findall(t)
for url in r:
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
quality, info = source_utils.get_release_quality(url)
valid, host = source_utils.is_host_valid(url, hostDict)
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check: sources = check
return sources
except:
return
def resolve(self, url):
return url
| gpl-2.0 |
huobaowangxi/scikit-learn | sklearn/mixture/tests/test_gmm.py | 200 | 17427 | import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| bsd-3-clause |
RefugeeMatchmaking/HackZurich | GAE_Playground/libs/decorator.py | 6 | 16260 | # ######################### LICENSE ############################ #
# Copyright (c) 2005-2015, Michele Simionato
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
Decorator module, see http://pypi.python.org/pypi/decorator
for the documentation.
"""
from __future__ import print_function
__version__ = '4.0.4'
import re
import sys
import inspect
import operator
import itertools
import collections
if sys.version >= '3':
from inspect import getfullargspec
def get_init(cls):
return cls.__init__
else:
class getfullargspec(object):
"A quick and dirty replacement for getfullargspec for Python 2.X"
def __init__(self, f):
self.args, self.varargs, self.varkw, self.defaults = \
inspect.getargspec(f)
self.kwonlyargs = []
self.kwonlydefaults = None
def __iter__(self):
yield self.args
yield self.varargs
yield self.varkw
yield self.defaults
getargspec = inspect.getargspec
def get_init(cls):
return cls.__init__.__func__
# getargspec has been deprecated in Python 3.5
ArgSpec = collections.namedtuple(
'ArgSpec', 'args varargs varkw defaults')
def getargspec(f):
"""A replacement for inspect.getargspec"""
spec = getfullargspec(f)
return ArgSpec(spec.args, spec.varargs, spec.varkw, spec.defaults)
DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(')
# basic functionality
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
# Atomic get-and-increment provided by the GIL
_compile_count = itertools.count()
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
if sys.version < '3': # easy way
self.shortsignature = self.signature = (
inspect.formatargspec(
formatvalue=lambda val: "", *argspec)[1:-1])
else: # Python 3 way
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
allargs.append('*' + self.varargs)
allshortargs.append('*' + self.varargs)
elif self.kwonlyargs:
allargs.append('*') # single star syntax
for a in self.kwonlyargs:
allargs.append('%s=None' % a)
allshortargs.append('%s=%s' % (a, a))
if self.varkw:
allargs.append('**' + self.varkw)
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.__defaults__ = getattr(self, 'defaults', ())
func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
func.__annotations__ = getattr(self, 'annotations', None)
try:
frame = sys._getframe(3)
except AttributeError: # for IronPython and similar implementations
callermodule = '?'
else:
callermodule = frame.f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.match(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline just for safety
src += '\n' # this is needed in old versions of Python
# Ensure each generated function has a unique filename for profilers
# (such as cProfile) that depend on the tuple of (<filename>,
# <definition line>, <function name>) being unique.
filename = '<decorator-gen-%d>' % (next(self._compile_count),)
try:
code = compile(src, filename, 'single')
exec(code, evaldict)
except:
print('Error in generated code:', file=sys.stderr)
print(src, file=sys.stderr)
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an
attribute __source__ is added to the result. The attributes attrs
are added, if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] # strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
return self.make('def %(name)s(%(signature)s):\n' + ibody,
evaldict, addsource, **attrs)
def decorate(func, caller):
"""
decorate(func, caller) decorates a function using a caller.
"""
evaldict = func.__globals__.copy()
evaldict['_call_'] = caller
evaldict['_func_'] = func
fun = FunctionMaker.create(
func, "return _call_(_func_, %(shortsignature)s)",
evaldict, __wrapped__=func)
if hasattr(func, '__qualname__'):
fun.__qualname__ = func.__qualname__
return fun
def decorator(caller, _func=None):
"""decorator(caller) converts a caller function into a decorator"""
if _func is not None: # return a decorated function
# this is obsolete behavior; you should use decorate instead
return decorate(_func, caller)
# else return a decorator function
if inspect.isclass(caller):
name = caller.__name__.lower()
callerfunc = get_init(caller)
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
fun = getfullargspec(callerfunc).args[1] # second arg
elif inspect.isfunction(caller):
if caller.__name__ == '<lambda>':
name = '_lambda_'
else:
name = caller.__name__
callerfunc = caller
doc = caller.__doc__
fun = getfullargspec(callerfunc).args[0] # first arg
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
callerfunc = caller.__call__.__func__
doc = caller.__call__.__doc__
fun = getfullargspec(callerfunc).args[1] # second arg
evaldict = callerfunc.__globals__.copy()
evaldict['_call_'] = caller
evaldict['_decorate_'] = decorate
return FunctionMaker.create(
'%s(%s)' % (name, fun),
'return _decorate_(%s, _call_)' % fun,
evaldict, call=caller, doc=doc, module=caller.__module__,
__wrapped__=caller)
# ####################### contextmanager ####################### #
try: # Python >= 3.2
from contextlib import _GeneratorContextManager
except ImportError: # Python >= 2.5
from contextlib import GeneratorContextManager as _GeneratorContextManager
class ContextManager(_GeneratorContextManager):
def __call__(self, func):
"""Context manager decorator"""
return FunctionMaker.create(
func, "with _self_: return _func_(%(shortsignature)s)",
dict(_self_=self, _func_=func), __wrapped__=func)
init = getfullargspec(_GeneratorContextManager.__init__)
n_args = len(init.args)
if n_args == 2 and not init.varargs: # (self, genobj) Python 2.7
def __init__(self, g, *a, **k):
return _GeneratorContextManager.__init__(self, g(*a, **k))
ContextManager.__init__ = __init__
elif n_args == 2 and init.varargs: # (self, gen, *a, **k) Python 3.4
pass
elif n_args == 4: # (self, gen, args, kwds) Python 3.5
def __init__(self, g, *a, **k):
return _GeneratorContextManager.__init__(self, g, a, k)
ContextManager.__init__ = __init__
contextmanager = decorator(ContextManager)
# ############################ dispatch_on ############################ #
def append(a, vancestors):
"""
Append ``a`` to the list of the virtual ancestors, unless it is already
included.
"""
add = True
for j, va in enumerate(vancestors):
if issubclass(va, a):
add = False
break
if issubclass(a, va):
vancestors[j] = a
add = False
if add:
vancestors.append(a)
# inspired from simplegeneric by P.J. Eby and functools.singledispatch
def dispatch_on(*dispatch_args):
"""
Factory of decorators turning a function into a generic function
dispatching on the given arguments.
"""
assert dispatch_args, 'No dispatch args passed'
dispatch_str = '(%s,)' % ', '.join(dispatch_args)
def check(arguments, wrong=operator.ne, msg=''):
"""Make sure one passes the expected number of arguments"""
if wrong(len(arguments), len(dispatch_args)):
raise TypeError('Expected %d arguments, got %d%s' %
(len(dispatch_args), len(arguments), msg))
def gen_func_dec(func):
"""Decorator turning a function into a generic function"""
# first check the dispatch arguments
argset = set(getfullargspec(func).args)
if not set(dispatch_args) <= argset:
raise NameError('Unknown dispatch arguments %s' % dispatch_str)
typemap = {}
def vancestors(*types):
"""
Get a list of sets of virtual ancestors for the given types
"""
check(types)
ras = [[] for _ in range(len(dispatch_args))]
for types_ in typemap:
for t, type_, ra in zip(types, types_, ras):
if issubclass(t, type_) and type_ not in t.__mro__:
append(type_, ra)
return [set(ra) for ra in ras]
def ancestors(*types):
"""
Get a list of virtual MROs, one for each type
"""
check(types)
lists = []
for t, vas in zip(types, vancestors(*types)):
n_vas = len(vas)
if n_vas > 1:
raise RuntimeError(
'Ambiguous dispatch for %s: %s' % (t, vas))
elif n_vas == 1:
va, = vas
mro = type('t', (t, va), {}).__mro__[1:]
else:
mro = t.__mro__
lists.append(mro[:-1]) # discard t and object
return lists
def register(*types):
"""
Decorator to register an implementation for the given types
"""
check(types)
def dec(f):
check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)
typemap[types] = f
return f
return dec
def dispatch_info(*types):
"""
An utility to introspect the dispatch algorithm
"""
check(types)
lst = []
for anc in itertools.product(*ancestors(*types)):
lst.append(tuple(a.__name__ for a in anc))
return lst
def _dispatch(dispatch_args, *args, **kw):
types = tuple(type(arg) for arg in dispatch_args)
try: # fast path
f = typemap[types]
except KeyError:
pass
else:
return f(*args, **kw)
combinations = itertools.product(*ancestors(*types))
next(combinations) # the first one has been already tried
for types_ in combinations:
f = typemap.get(types_)
if f is not None:
return f(*args, **kw)
# else call the default implementation
return func(*args, **kw)
return FunctionMaker.create(
func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str,
dict(_f_=_dispatch), register=register, default=func,
typemap=typemap, vancestors=vancestors, ancestors=ancestors,
dispatch_info=dispatch_info, __wrapped__=func)
gen_func_dec.__name__ = 'dispatch_on' + dispatch_str
return gen_func_dec
| mit |
mlskit/astromlskit | FRONTEND/doefront3.py | 3 | 2435 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'fractionalui.ui'
#
# Created: Wed Apr 08 07:10:07 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(240, 257)
self.pushButton_3 = QtGui.QPushButton(Form)
self.pushButton_3.setGeometry(QtCore.QRect(40, 220, 161, 23))
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setGeometry(QtCore.QRect(10, 10, 221, 61))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.lineEdit = QtGui.QLineEdit(self.groupBox)
self.lineEdit.setGeometry(QtCore.QRect(40, 20, 141, 20))
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.pushButton_2 = QtGui.QPushButton(Form)
self.pushButton_2.setGeometry(QtCore.QRect(40, 190, 161, 23))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.textEdit = QtGui.QTextEdit(Form)
self.textEdit.setGeometry(QtCore.QRect(10, 100, 221, 71))
self.textEdit.setObjectName(_fromUtf8("textEdit"))
self.label = QtGui.QLabel(Form)
self.label.setGeometry(QtCore.QRect(10, 80, 181, 16))
self.label.setObjectName(_fromUtf8("label"))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.pushButton_3.setText(_translate("Form", "Start", None))
self.groupBox.setTitle(_translate("Form", "DOE Name", None))
self.lineEdit.setText(_translate("Form", "Fractionall Factorial", None))
self.pushButton_2.setText(_translate("Form", "Output Folder", None))
self.label.setText(_translate("Form", "Enter the generator string", None))
| gpl-3.0 |
bwbeach/ansible | lib/ansible/playbook/attribute.py | 7 | 1338 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class Attribute:
def __init__(self, isa=None, private=False, default=None, required=False, listof=None, priority=0, always_post_validate=False):
self.isa = isa
self.private = private
self.default = default
self.required = required
self.listof = listof
self.priority = priority
self.always_post_validate = always_post_validate
def __cmp__(self, other):
return cmp(other.priority, self.priority)
class FieldAttribute(Attribute):
pass
| gpl-3.0 |
heisencoder/route-finder | third_party/lib/python/django/contrib/gis/geos/collections.py | 219 | 4667 | """
This module houses the Geometry Collection objects:
GeometryCollection, MultiPoint, MultiLineString, and MultiPolygon
"""
from ctypes import c_int, c_uint, byref
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.libgeos import get_pointer_arr, GEOS_PREPARE
from django.contrib.gis.geos.linestring import LineString, LinearRing
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.polygon import Polygon
from django.contrib.gis.geos import prototypes as capi
from django.utils.six.moves import xrange
class GeometryCollection(GEOSGeometry):
_typeid = 7
def __init__(self, *args, **kwargs):
"Initializes a Geometry Collection from a sequence of Geometry objects."
# Checking the arguments
if not args:
raise TypeError('Must provide at least one Geometry to initialize %s.' % self.__class__.__name__)
if len(args) == 1:
# If only one geometry provided or a list of geometries is provided
# in the first argument.
if isinstance(args[0], (tuple, list)):
init_geoms = args[0]
else:
init_geoms = args
else:
init_geoms = args
# Ensuring that only the permitted geometries are allowed in this collection
# this is moved to list mixin super class
self._check_allowed(init_geoms)
# Creating the geometry pointer array.
collection = self._create_collection(len(init_geoms), iter(init_geoms))
super(GeometryCollection, self).__init__(collection, **kwargs)
def __iter__(self):
"Iterates over each Geometry in the Collection."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of geometries in this Collection."
return self.num_geom
### Methods for compatibility with ListMixin ###
def _create_collection(self, length, items):
# Creating the geometry pointer array.
geoms = get_pointer_arr(length)
for i, g in enumerate(items):
# this is a little sloppy, but makes life easier
# allow GEOSGeometry types (python wrappers) or pointer types
geoms[i] = capi.geom_clone(getattr(g, 'ptr', g))
return capi.create_collection(c_int(self._typeid), byref(geoms), c_uint(length))
def _get_single_internal(self, index):
return capi.get_geomn(self.ptr, index)
def _get_single_external(self, index):
"Returns the Geometry from this Collection at the given index (0-based)."
# Checking the index and returning the corresponding GEOS geometry.
return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid)
def _set_list(self, length, items):
"Create a new collection, and destroy the contents of the previous pointer."
prev_ptr = self.ptr
srid = self.srid
self.ptr = self._create_collection(length, items)
if srid: self.srid = srid
capi.destroy_geom(prev_ptr)
_set_single = GEOSGeometry._set_single_rebuild
_assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild
@property
def kml(self):
"Returns the KML for this Geometry Collection."
return '<MultiGeometry>%s</MultiGeometry>' % ''.join([g.kml for g in self])
@property
def tuple(self):
"Returns a tuple of all the coordinates in this Geometry Collection"
return tuple([g.tuple for g in self])
coords = tuple
# MultiPoint, MultiLineString, and MultiPolygon class definitions.
class MultiPoint(GeometryCollection):
_allowed = Point
_typeid = 4
class MultiLineString(GeometryCollection):
_allowed = (LineString, LinearRing)
_typeid = 5
@property
def merged(self):
"""
Returns a LineString representing the line merge of this
MultiLineString.
"""
return self._topology(capi.geos_linemerge(self.ptr))
class MultiPolygon(GeometryCollection):
_allowed = Polygon
_typeid = 6
@property
def cascaded_union(self):
"Returns a cascaded union of this MultiPolygon."
if GEOS_PREPARE:
return GEOSGeometry(capi.geos_cascaded_union(self.ptr), self.srid)
else:
raise GEOSException('The cascaded union operation requires GEOS 3.1+.')
# Setting the allowed types here since GeometryCollection is defined before
# its subclasses.
GeometryCollection._allowed = (Point, LineString, LinearRing, Polygon, MultiPoint, MultiLineString, MultiPolygon)
| mit |
arivarton/workhours | stamp/config.py | 1 | 10075 | """Handling of config settings and files."""
import os
import sys
import re
from datetime import datetime
import yaml
from .constants import DATA_DIR, INVOICE_DIR
from .helpers import error_handler
from .exceptions import ConfigValueError
__all__ = ['Config']
class ConfigValue(object):
"""Template for setting values
Values from config file trumps all.
The environment variable will be the same as class name in uppercase
with a _ sign between every uppercase letter from the original class name,
'STAMP_' precedes every name. For example: 'STAMP_DATABASE_PATH'.
If no environment variable with correct name is found then a default value is used.
"""
def __init__(self):
self.env_variable_name = 'STAMP_' + re.sub('(?!^)(?=[A-Z])', '_',
self.__class__.__name__).upper()
self.env_variable_value = os.getenv(self.env_variable_name)
self.value = None
self.choices = None
self.validated = False
self.validation_type = None
self.type = str
self.add_default_value(self.env_variable_value)
def __str__(self):
return self.value
def validate(self):
if self.choices:
if self.value not in self.choices:
raise ConfigValueError('%s is not a valid choice! These are the valid choices: %s.'
% (self.value, ', '.join(self.choices)))
if self.validation_type == 'path':
if not os.path.exists(self.value):
try:
os.makedirs(self.value)
except PermissionError:
error_handler('Permission denied to create path %s!' % self.value)
if self.validation_type == 'email':
if re.match(r'^[\S]+@[\S]+.[A-Za-z]$', self.value) is None:
raise ConfigValueError('%s is not a valid mail address!' % self.value)
elif self.type is float:
if not isinstance(self.value, (float)):
raise ConfigValueError('%s is not a float!' % self.value)
elif self.type is int:
if not isinstance(self.value, (int)):
raise ConfigValueError('%s is not an integer!' % self.value)
elif self.type is str:
if not isinstance(self.value, (str)):
raise ConfigValueError('%s is not a string!' % self.value)
self.validated = True
def replace_value(self, value):
if value:
value = self.type(value)
self.value = value
def set_validation_type(self, val_type):
valid_choices = ('path', 'email')
if val_type in valid_choices:
self.validation_type = val_type
else:
raise AttributeError('%s is not a valid validation type!' % val_type)
def add_default_value(self, default_value):
self.replace_value(self.env_variable_value or self.type(default_value))
class Interface(ConfigValue):
def __init__(self):
super().__init__()
self.add_default_value('cli')
self.choices = ['cli', 'curses']
class DatabasePath(ConfigValue):
def __init__(self):
super().__init__()
self.add_default_value(DATA_DIR)
self.set_validation_type('path')
class LogoPath(ConfigValue):
def __init__(self):
super().__init__()
self.add_default_value(DATA_DIR)
self.set_validation_type('path')
class InvoicePath(ConfigValue):
def __init__(self):
super().__init__()
self.add_default_value(INVOICE_DIR)
self.set_validation_type('path')
class MinimumHours(ConfigValue):
def __init__(self):
super().__init__()
self.type = float
self.add_default_value(2.0)
class StandardHours(ConfigValue):
def __init__(self):
super().__init__()
self.add_default_value('08:00-16:00')
def validate(self):
if re.match(r'\d\d:\d\d-\d\d:\d\d', self.value) is None:
raise ConfigValueError('%s is the wrong format! The correct format is HH:MM-HH:MM.'
% self.value)
from_time, to_time = self.value.split('-')
try:
if datetime.strptime(from_time, '%H:%M') > datetime.strptime(to_time, '%H:%M'):
error_handler('From time is higher than to time!')
except ValueError:
raise ConfigValueError('Wrong use of 24 hour format!')
super().validate()
class LunchHours(ConfigValue):
def __init__(self):
super().__init__()
self.type = float
self.add_default_value(0.5)
class WagePerHour(ConfigValue):
def __init__(self):
super().__init__()
self.type = int
self.add_default_value(300)
class Currency(ConfigValue):
def __init__(self):
super().__init__()
self.add_default_value('NOK')
self.choices = ['NOK', 'ISK', 'USD']
class OrganizationNumber(ConfigValue):
def __init__(self):
super().__init__()
def validate(self):
try:
int(self.value.replace(' ', ''))
except ValueError:
raise ConfigValueError('Organization number must contain only digits!' % self.value)
super().validate()
class CompanyName(ConfigValue):
def __init__(self):
super().__init__()
class CompanyAddress(ConfigValue):
def __init__(self):
super().__init__()
class CompanyZip(ConfigValue):
def __init__(self):
super().__init__()
def validate(self):
try:
int(self.value.replace(' ', ''))
except ValueError:
raise ConfigValueError('%s is not an integer!' % self.value)
super().validate()
class CompanyAccountNumber(ConfigValue):
def __init__(self):
super().__init__()
def validate(self):
try:
int(self.value.replace(' ', ''))
except ValueError:
raise ConfigValueError('%s is not an integer!' % self.value)
super().validate()
class PhoneNumber(ConfigValue):
def __init__(self):
super().__init__()
def validate(self):
try:
int(self.value.replace(' ', ''))
except ValueError:
raise ConfigValueError('%s is not an integer!' % self.value)
super().validate()
class MailAddress(ConfigValue):
def __init__(self):
super().__init__()
self.set_validation_type('email')
class ValuesWrapper(object):
def __repr__(self):
repr_string = '\n'
for key, value in self.__dict__.items():
repr_string += '%s: %s\n' % (str(key.replace('_', ' ')), str(value.value))
return repr_string
def add(self, value):
if issubclass(value.__class__, ConfigValue):
regex = re.compile('(?!^)(?=[A-Z])')
name = re.sub(regex, '_', value.__class__.__name__).lower()
setattr(self, name, value)
else:
raise ValueError('value argument must be a ConfigValue class!')
class Config(object):
def __init__(self, config_path):
self.config_path = config_path
self.values = ValuesWrapper()
self.file_exists = False
# Add values
self.values.add(Interface())
self.values.add(DatabasePath())
self.values.add(LogoPath())
self.values.add(InvoicePath())
self.values.add(MinimumHours())
self.values.add(StandardHours())
self.values.add(LunchHours())
self.values.add(WagePerHour())
self.values.add(Currency())
self.values.add(OrganizationNumber())
self.values.add(CompanyName())
self.values.add(CompanyAddress())
self.values.add(CompanyZip())
self.values.add(CompanyAccountNumber())
self.values.add(PhoneNumber())
self.values.add(MailAddress())
if config_path:
try:
self.load()
except ConfigValueError as err_msg:
error_handler(err_msg)
def validate_config_values(self):
for value in self.values.__dict__.values():
if value.value:
try:
value.validate()
except ConfigValueError as err_msg:
error_handler('Error when validating the %s option in config file!' % value.__class__.__name__, exit_on_error=False)
error_handler(err_msg)
def load(self):
"""Load config.
User defined config files will take precedence over the built in config.
"""
try:
with open(self.config_path, 'r') as config_readout:
try:
config = yaml.load(config_readout)
self.file_exists = True
except yaml.YAMLError as err:
print(err)
sys.exit(64)
except FileNotFoundError:
return None
for key, value in config.items():
key = key.replace(' ', '_')
if key not in self.values.__dict__.keys():
raise ConfigValueError('%s is not a valid config setting' % key)
else:
config_setting = self.values.__dict__[key]
config_setting.replace_value(value)
# Validate
if value:
try:
config_setting.validate()
except ConfigValueError as err_msg:
error_handler('Error when validating the %s option in config file!' % config_setting.__class__.__name__, exit_on_error=False)
error_handler(err_msg)
def write(self, value):
if not os.path.exists(os.path.dirname(self.config_path)):
os.makedirs(os.path.dirname(self.config_path))
with open(self.config_path, 'w') as config_readout:
try:
yaml.dump(value, config_readout, default_flow_style=False)
except yaml.YAMLError as err:
print(err)
sys.exit(64)
| gpl-3.0 |
schlueter/ansible | lib/ansible/modules/network/avi/avi_authprofile.py | 41 | 5005 | #!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_authprofile
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of AuthProfile Avi RESTful Object
description:
- This module is used to configure AuthProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
description:
description:
- User defined description for the object.
http:
description:
- Http user authentication params.
ldap:
description:
- Ldap server and directory settings.
name:
description:
- Name of the auth profile.
required: true
saml:
description:
- Saml settings.
- Field introduced in 17.2.3.
version_added: "2.5"
tacacs_plus:
description:
- Tacacs+ settings.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Type of the auth profile.
- Enum options - AUTH_PROFILE_LDAP, AUTH_PROFILE_TACACS_PLUS, AUTH_PROFILE_SAML.
required: true
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the auth profile.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create user authorization profile based on the LDAP
avi_authprofile:
controller: '{{ controller }}'
password: '{{ password }}'
username: '{{ username }}'
http:
cache_expiration_time: 5
group_member_is_full_dn: false
ldap:
base_dn: dc=avi,dc=local
bind_as_administrator: true
port: 389
security_mode: AUTH_LDAP_SECURE_NONE
server:
- 10.10.0.100
settings:
admin_bind_dn: [email protected]
group_filter: (objectClass=*)
group_member_attribute: member
group_member_is_full_dn: true
group_search_dn: dc=avi,dc=local
group_search_scope: AUTH_LDAP_SCOPE_SUBTREE
ignore_referrals: true
password: password
user_id_attribute: samAccountname
user_search_dn: dc=avi,dc=local
user_search_scope: AUTH_LDAP_SCOPE_ONE
name: ProdAuth
tenant_ref: admin
type: AUTH_PROFILE_LDAP
"""
RETURN = '''
obj:
description: AuthProfile (api/authprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
description=dict(type='str',),
http=dict(type='dict',),
ldap=dict(type='dict',),
name=dict(type='str', required=True),
saml=dict(type='dict',),
tacacs_plus=dict(type='dict',),
tenant_ref=dict(type='str',),
type=dict(type='str', required=True),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'authprofile',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
inasafe/inasafe | safe/common/custom_logging.py | 3 | 7256 | # coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**Logging related code.**
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import logging
import os
import socket
import sys
from osgeo import gdal
# This is ugly but we dont have a better solution yet...
safe_extras_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', 'safe_extras'))
if safe_extras_dir not in sys.path:
sys.path.append(safe_extras_dir)
# We add "# NOQA" because imports are not done at top of file.
from qgis.core import QgsMessageLog # NOQA
from qgis.PyQt.QtCore import QT_VERSION_STR, QSettings # NOQA We can't move to
# our settings class.
from raven.handlers.logging import SentryHandler # NOQA
from safe.common.utilities import log_file_path # NOQA
from safe.common.version import get_version # NOQA
from safe.definitions.provenance import ( # NOQA
provenance_gdal_version, # NOQA
provenance_os, # NOQA
provenance_qgis_version, # NOQA
provenance_qt_version, # NOQA
) # NOQA
from safe.definitions.sentry import PRODUCTION_SERVER # NOQA
from safe.utilities.i18n import tr # NOQA
from safe.utilities.gis import qgis_version_detailed # NOQA
from safe.utilities.utilities import readable_os_version # NOQA
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "[email protected]"
__revision__ = '$Format:%H$'
LOGGER = logging.getLogger('InaSAFE')
class QgsLogHandler(logging.Handler):
"""A logging handler that will log messages to the QGIS logging console."""
def __init__(self, level=logging.NOTSET):
logging.Handler.__init__(self)
def emit(self, record):
"""Try to log the message to QGIS if available, otherwise do nothing.
:param record: logging record containing whatever info needs to be
logged.
"""
try:
# Check logging.LogRecord properties for lots of other goodies
# like line number etc. you can get from the log message.
QgsMessageLog.logMessage(record.getMessage(), 'InaSAFE', 0)
except MemoryError:
message = tr(
'Due to memory limitations on this machine, InaSAFE can not '
'handle the full log')
print(message)
QgsMessageLog.logMessage(message, 'InaSAFE', 0)
def add_logging_handler_once(logger, handler):
"""A helper to add a handler to a logger, ensuring there are no duplicates.
:param logger: Logger that should have a handler added.
:type logger: logging.logger
:param handler: Handler instance to be added. It will not be added if an
instance of that Handler subclass already exists.
:type handler: logging.Handler
:returns: True if the logging handler was added, otherwise False.
:rtype: bool
"""
class_name = handler.__class__.__name__
for logger_handler in logger.handlers:
if logger_handler.__class__.__name__ == class_name:
return False
logger.addHandler(handler)
return True
def setup_logger(logger_name, log_file=None, sentry_url=None):
"""Run once when the module is loaded and enable logging.
:param logger_name: The logger name that we want to set up.
:type logger_name: str
:param log_file: Optional full path to a file to write logs to.
:type log_file: str
:param sentry_url: Optional url to sentry api for remote
logging. Defaults to URL defined in safe.definitions.sentry.py
which is the sentry project for InaSAFE desktop.
:type sentry_url: str
Borrowed heavily from this:
http://docs.python.org/howto/logging-cookbook.html
Now to log a message do::
LOGGER.debug('Some debug message')
.. note:: The file logs are written to the inasafe user tmp dir e.g.:
/tmp/inasafe/23-08-2012/timlinux/logs/inasafe.log
"""
logger = logging.getLogger(logger_name)
logging_level = int(os.environ.get('INASAFE_LOGGING_LEVEL', logging.DEBUG))
logger.setLevel(logging_level)
default_handler_level = logging_level
# create formatter that will be added to the handlers
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# create syslog handler which logs even debug messages
# (ariel): Make this log to /var/log/safe.log instead of
# /var/log/syslog
# (Tim) Ole and I discussed this - we prefer to log into the
# user's temporary working directory.
inasafe_log_path = log_file_path()
if log_file is None:
file_handler = logging.FileHandler(inasafe_log_path)
else:
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(default_handler_level)
file_handler.setFormatter(formatter)
add_logging_handler_once(logger, file_handler)
if 'MUTE_LOGS' not in os.environ:
# create console handler with a higher log level
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter)
add_logging_handler_once(logger, console_handler)
# create a QGIS handler
qgis_handler = QgsLogHandler()
qgis_handler.setFormatter(formatter)
add_logging_handler_once(logger, qgis_handler)
# Sentry handler - this is optional hence the localised import
# If raven is available logging messages will be sent to
# http://sentry.kartoza.com
# We will log exceptions only there. You need to either:
# * Set env var 'INASAFE_SENTRY=1' present (value can be anything)
# before this will be enabled or sentry is enabled in QSettings
qsettings_flag = QSettings().value('inasafe/useSentry', False, type=bool)
environment_flag = 'INASAFE_SENTRY' in os.environ
if environment_flag or qsettings_flag:
if sentry_url is None:
sentry_url = PRODUCTION_SERVER
tags = dict()
tags[provenance_gdal_version['provenance_key']] = gdal.__version__
tags[provenance_os['provenance_key']] = readable_os_version()
qgis_short_version = provenance_qgis_version['provenance_key']
qgis_full_version = qgis_short_version + '_full'
versions = [str(v) for v in qgis_version_detailed()]
tags[qgis_short_version] = '.'.join(versions[0:2])
tags[qgis_full_version] = '.'.join(versions[0:3])
tags[provenance_qt_version['provenance_key']] = QT_VERSION_STR
hostname = os.environ.get('HOSTNAME_SENTRY', socket.gethostname())
sentry_handler = SentryHandler(
dsn=sentry_url,
name=hostname,
release=get_version(),
tags=tags,
)
sentry_handler.setFormatter(formatter)
sentry_handler.setLevel(logging.ERROR)
if add_logging_handler_once(logger, sentry_handler):
logger.debug('Sentry logging enabled in safe')
else:
logger.debug('Sentry logging disabled in safe')
| gpl-3.0 |
Workday/OpenFrame | tools/telemetry/telemetry/page/shared_page_state_unittest.py | 1 | 4249 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import tempfile
import unittest
from telemetry.internal import story_runner
from telemetry.page import page
from telemetry.page import page_test
from telemetry.page import shared_page_state
from telemetry import story as story_module
from telemetry.testing import fakes
from telemetry.util import wpr_modes
def SetUpPageRunnerArguments(options):
parser = options.CreateParser()
story_runner.AddCommandLineArgs(parser)
options.MergeDefaultValues(parser.get_default_values())
story_runner.ProcessCommandLineArgs(parser, options)
class DummyTest(page_test.PageTest):
def ValidateAndMeasurePage(self, *_):
pass
class FakeNetworkController(object):
def __init__(self):
self.archive_path = None
self.wpr_mode = None
def SetReplayArgs(self, archive_path, wpr_mode, _netsim, _extra_wpr_args,
_make_javascript_deterministic=False):
self.archive_path = archive_path
self.wpr_mode = wpr_mode
class SharedPageStateTests(unittest.TestCase):
def setUp(self):
self.options = fakes.CreateBrowserFinderOptions()
self.options.use_live_sites = False
self.options.output_formats = ['none']
self.options.suppress_gtest_report = True
# pylint: disable=protected-access
def TestUseLiveSitesFlag(self, expected_wpr_mode):
with tempfile.NamedTemporaryFile() as f:
run_state = shared_page_state.SharedPageState(
DummyTest(), self.options, story_module.StorySet())
fake_network_controller = FakeNetworkController()
run_state._PrepareWpr(fake_network_controller, f.name, None)
self.assertEquals(fake_network_controller.wpr_mode, expected_wpr_mode)
self.assertEquals(fake_network_controller.archive_path, f.name)
def testUseLiveSitesFlagSet(self):
self.options.use_live_sites = True
self.TestUseLiveSitesFlag(expected_wpr_mode=wpr_modes.WPR_OFF)
def testUseLiveSitesFlagUnset(self):
self.TestUseLiveSitesFlag(expected_wpr_mode=wpr_modes.WPR_REPLAY)
def testConstructorCallsSetOptions(self):
test = DummyTest()
shared_page_state.SharedPageState(
test, self.options, story_module.StorySet())
self.assertEqual(test.options, self.options)
def assertUserAgentSetCorrectly(
self, shared_page_state_class, expected_user_agent):
story = page.Page(
'http://www.google.com',
shared_page_state_class=shared_page_state_class)
test = DummyTest()
story_set = story_module.StorySet()
story_set.AddStory(story)
story.shared_state_class(test, self.options, story_set)
browser_options = self.options.browser_options
actual_user_agent = browser_options.browser_user_agent_type
self.assertEqual(expected_user_agent, actual_user_agent)
def testPageStatesUserAgentType(self):
self.assertUserAgentSetCorrectly(
shared_page_state.SharedMobilePageState, 'mobile')
self.assertUserAgentSetCorrectly(
shared_page_state.SharedDesktopPageState, 'desktop')
self.assertUserAgentSetCorrectly(
shared_page_state.SharedTabletPageState, 'tablet')
self.assertUserAgentSetCorrectly(
shared_page_state.Shared10InchTabletPageState, 'tablet_10_inch')
self.assertUserAgentSetCorrectly(
shared_page_state.SharedPageState, None)
def testBrowserStartupURLSetCorrectly(self):
story_set = story_module.StorySet()
google_page = page.Page(
'http://www.google.com',
startup_url='http://www.google.com', page_set=story_set)
example_page = page.Page(
'https://www.example.com',
startup_url='https://www.example.com', page_set=story_set)
gmail_page = page.Page(
'https://www.gmail.com',
startup_url='https://www.gmail.com', page_set=story_set)
for p in (google_page, example_page, gmail_page):
story_set.AddStory(p)
shared_state = shared_page_state.SharedPageState(
DummyTest(), self.options, story_set)
for p in (google_page, example_page, gmail_page):
shared_state.WillRunStory(p)
self.assertEquals(
p.startup_url, self.options.browser_options.startup_url)
| bsd-3-clause |
roadmapper/ansible | lib/ansible/modules/cloud/azure/azure_rm_loganalyticsworkspace.py | 2 | 11331 | #!/usr/bin/python
#
# Copyright (c) 2019 Yuwei Zhou, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_loganalyticsworkspace
version_added: "2.8"
short_description: Manage Azure Log Analytics workspaces
description:
- Create, delete Azure Log Analytics workspaces.
options:
resource_group:
description:
- Name of resource group.
required: true
name:
description:
- Name of the workspace.
required: true
state:
description:
- Assert the state of the image. Use C(present) to create or update a image and C(absent) to delete an image.
default: present
choices:
- absent
- present
location:
description:
- Resource location.
sku:
description:
- The SKU of the workspace.
choices:
- free
- standard
- premium
- unlimited
- per_node
- per_gb2018
- standalone
default: per_gb2018
retention_in_days:
description:
- The workspace data retention in days.
- -1 means Unlimited retention for I(sku=unlimited).
- 730 days is the maximum allowed for all other SKUs.
intelligence_packs:
description:
- Manage intelligence packs possible for this workspace.
- Enable one pack by setting it to C(true). For example "Backup:true".
- Disable one pack by setting it to C(false). For example "Backup:false".
- Other intelligence packs not list in this property will not be changed.
type: dict
extends_documentation_fragment:
- azure
- azure_tags
author:
- Yuwei Zhou (@yuwzho)
'''
EXAMPLES = '''
- name: Create a workspace with backup enabled
azure_rm_loganalyticsworkspace:
resource_group: myResourceGroup
name: myLogAnalyticsWorkspace
intelligence_packs:
Backup: true
'''
RETURN = '''
id:
description:
- Workspace resource path.
type: str
returned: success
example: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.OperationalInsights/workspaces/m
yLogAnalyticsWorkspace"
location:
description:
- Resource location.
type: str
returned: success
example: eastus
sku:
description:
- The SKU of the workspace.
type: str
returned: success
example: "per_gb2018"
retention_in_days:
description:
- The workspace data retention in days.
- -1 means Unlimited retention for I(sku=unlimited).
- 730 days is the maximum allowed for all other SKUs.
type: int
returned: success
example: 40
intelligence_packs:
description:
- Lists all the intelligence packs possible and whether they are enabled or disabled for a given workspace.
type: list
returned: success
example: ['name': 'CapacityPerformance', 'enabled': true]
management_groups:
description:
- Management groups connected to the workspace.
type: dict
returned: success
example: {'value': []}
shared_keys:
description:
- Shared keys for the workspace.
type: dict
returned: success
example: {
'primarySharedKey': 'BozLY1JnZbxu0jWUQSY8iRPEM8ObmpP8rW+8bUl3+HpDJI+n689SxXgTgU7k1qdxo/WugRLxechxbolAfHM5uA==',
'secondarySharedKey': '7tDt5W0JBrCQKtQA3igfFltLSzJeyr9LmuT+B/ibzd8cdC1neZ1ePOQLBx5NUzc0q2VUIK0cLhWNyFvo/hT8Ww=='
}
usages:
description:
- Usage metrics for the workspace.
type: dict
returned: success
example: {
'value': [
{
'name': {
'value': 'DataAnalyzed',
'localizedValue': 'Data Analyzed'
},
'unit': 'Bytes',
'currentValue': 0,
'limit': 524288000,
'nextResetTime': '2017-10-03T00:00:00Z',
'quotaPeriod': 'P1D'
}
]
}
''' # NOQA
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
try:
from msrestazure.tools import parse_resource_id
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMLogAnalyticsWorkspace(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
location=dict(type='str'),
sku=dict(type='str', default='per_gb2018', choices=['free', 'standard', 'premium', 'unlimited', 'per_node', 'per_gb2018', 'standalone']),
retention_in_days=dict(type='int'),
intelligence_packs=dict(type='dict')
)
self.results = dict(
changed=False,
id=None
)
self.resource_group = None
self.name = None
self.state = None
self.location = None
self.sku = None
self.retention_in_days = None
self.intelligence_packs = None
super(AzureRMLogAnalyticsWorkspace, self).__init__(self.module_arg_spec, supports_check_mode=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
self.results = dict()
changed = False
if not self.location:
resource_group = self.get_resource_group(self.resource_group)
self.location = resource_group.location
if self.sku == 'per_gb2018':
self.sku = 'PerGB2018'
else:
self.sku = _snake_to_camel(self.sku)
workspace = self.get_workspace()
if not workspace and self.state == 'present':
changed = True
workspace = self.log_analytics_models.Workspace(sku=self.log_analytics_models.Sku(name=self.sku),
retention_in_days=self.retention_in_days,
location=self.location)
if not self.check_mode:
workspace = self.create_workspace(workspace)
elif workspace and self.state == 'absent':
changed = True
workspace = None
if not self.check_mode:
self.delete_workspace()
if workspace and workspace.id:
self.results = self.to_dict(workspace)
self.results['intelligence_packs'] = self.list_intelligence_packs()
self.results['management_groups'] = self.list_management_groups()
self.results['usages'] = self.list_usages()
self.results['shared_keys'] = self.get_shared_keys()
# handle the intelligence pack
if workspace and workspace.id and self.intelligence_packs:
intelligence_packs = self.results['intelligence_packs']
for key in self.intelligence_packs.keys():
enabled = self.intelligence_packs[key]
for x in intelligence_packs:
if x['name'].lower() == key.lower():
if x['enabled'] != enabled:
changed = True
if not self.check_mode:
self.change_intelligence(x['name'], enabled)
x['enabled'] = enabled
break
self.results['changed'] = changed
return self.results
def create_workspace(self, workspace):
try:
poller = self.log_analytics_client.workspaces.create_or_update(self.resource_group, self.name, workspace)
return self.get_poller_result(poller)
except CloudError as exc:
self.fail('Error when creating workspace {0} - {1}'.format(self.name, exc.message or str(exc)))
def get_workspace(self):
try:
return self.log_analytics_client.workspaces.get(self.resource_group, self.name)
except CloudError:
pass
def delete_workspace(self):
try:
self.log_analytics_client.workspaces.delete(self.resource_group, self.name)
except CloudError as exc:
self.fail('Error when deleting workspace {0} - {1}'.format(self.name, exc.message or str(exc)))
def to_dict(self, workspace):
result = workspace.as_dict()
result['sku'] = _camel_to_snake(workspace.sku.name)
return result
def list_intelligence_packs(self):
try:
response = self.log_analytics_client.workspaces.list_intelligence_packs(self.resource_group, self.name)
return [x.as_dict() for x in response]
except CloudError as exc:
self.fail('Error when listing intelligence packs {0}'.format(exc.message or str(exc)))
def change_intelligence(self, key, value):
try:
if value:
self.log_analytics_client.workspaces.enable_intelligence_pack(self.resource_group, self.name, key)
else:
self.log_analytics_client.workspaces.disable_intelligence_pack(self.resource_group, self.name, key)
except CloudError as exc:
self.fail('Error when changing intelligence pack {0} - {1}'.format(key, exc.message or str(exc)))
def list_management_groups(self):
result = []
try:
response = self.log_analytics_client.workspaces.list_management_groups(self.resource_group, self.name)
while True:
result.append(response.next().as_dict())
except StopIteration:
pass
except CloudError as exc:
self.fail('Error when listing management groups {0}'.format(exc.message or str(exc)))
return result
def list_usages(self):
result = []
try:
response = self.log_analytics_client.workspaces.list_usages(self.resource_group, self.name)
while True:
result.append(response.next().as_dict())
except StopIteration:
pass
except CloudError as exc:
self.fail('Error when listing usages {0}'.format(exc.message or str(exc)))
return result
def get_shared_keys(self):
try:
return self.log_analytics_client.workspaces.get_shared_keys(self.resource_group, self.name).as_dict()
except CloudError as exc:
self.fail('Error when getting shared key {0}'.format(exc.message or str(exc)))
def main():
AzureRMLogAnalyticsWorkspace()
if __name__ == '__main__':
main()
| gpl-3.0 |
tima/ansible | lib/ansible/module_utils/network/f5/bigiq.py | 2 | 1361 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
try:
from f5.bigiq import ManagementRoot
from icontrol.exceptions import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
try:
from library.module_utils.network.f5.common import F5BaseClient
from library.module_utils.network.f5.common import F5ModuleError
except ImportError:
from ansible.module_utils.network.f5.common import F5BaseClient
from ansible.module_utils.network.f5.common import F5ModuleError
class F5Client(F5BaseClient):
@property
def api(self):
try:
result = ManagementRoot(
self.params['server'],
self.params['user'],
self.params['password'],
port=self.params['server_port'],
verify=self.params['validate_certs'],
token='local'
)
except Exception:
raise F5ModuleError(
'Unable to connect to {0} on port {1}. '
'Is "validate_certs" preventing this?'.format(self.params['server'], self.params['server_port'])
)
return result
| gpl-3.0 |
raags/ansible-modules-core | cloud/openstack/_quantum_router_gateway.py | 58 | 7565 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_router_gateway
version_added: "1.2"
author: "Benno Joy (@bennojoy)"
deprecated: Deprecated in 2.0. Use os_router instead
short_description: set/unset a gateway interface for the router with the specified external network
description:
- Creates/Removes a gateway interface from the router, used to associate a external network with a router to route external traffic.
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone URL for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
router_name:
description:
- Name of the router to which the gateway should be attached.
required: true
default: None
network_name:
description:
- Name of the external network which should be attached to the router.
required: true
default: None
requirements:
- "python >= 2.6"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
# Attach an external network with a router to allow flow of external traffic
- quantum_router_gateway: state=present login_username=admin login_password=admin
login_tenant_name=admin router_name=external_router
network_name=external_network
'''
_os_keystone = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception, e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception, e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _get_router_id(module, neutron):
kwargs = {
'name': module.params['router_name'],
}
try:
routers = neutron.list_routers(**kwargs)
except Exception, e:
module.fail_json(msg = "Error in getting the router list: %s " % e.message)
if not routers['routers']:
return None
return routers['routers'][0]['id']
def _get_net_id(neutron, module):
kwargs = {
'name': module.params['network_name'],
'router:external': True
}
try:
networks = neutron.list_networks(**kwargs)
except Exception, e:
module.fail_json("Error in listing neutron networks: %s" % e.message)
if not networks['networks']:
return None
return networks['networks'][0]['id']
def _get_port_id(neutron, module, router_id, network_id):
kwargs = {
'device_id': router_id,
'network_id': network_id,
}
try:
ports = neutron.list_ports(**kwargs)
except Exception, e:
module.fail_json( msg = "Error in listing ports: %s" % e.message)
if not ports['ports']:
return None
return ports['ports'][0]['id']
def _add_gateway_router(neutron, module, router_id, network_id):
kwargs = {
'network_id': network_id
}
try:
neutron.add_gateway_router(router_id, kwargs)
except Exception, e:
module.fail_json(msg = "Error in adding gateway to router: %s" % e.message)
return True
def _remove_gateway_router(neutron, module, router_id):
try:
neutron.remove_gateway_router(router_id)
except Exception, e:
module.fail_json(msg = "Error in removing gateway to router: %s" % e.message)
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
router_name = dict(required=True),
network_name = dict(required=True),
state = dict(default='present', choices=['absent', 'present']),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')
neutron = _get_neutron_client(module, module.params)
router_id = _get_router_id(module, neutron)
if not router_id:
module.fail_json(msg="failed to get the router id, please check the router name")
network_id = _get_net_id(neutron, module)
if not network_id:
module.fail_json(msg="failed to get the network id, please check the network name and make sure it is external")
if module.params['state'] == 'present':
port_id = _get_port_id(neutron, module, router_id, network_id)
if not port_id:
_add_gateway_router(neutron, module, router_id, network_id)
module.exit_json(changed=True, result="created")
module.exit_json(changed=False, result="success")
if module.params['state'] == 'absent':
port_id = _get_port_id(neutron, module, router_id, network_id)
if not port_id:
module.exit_json(changed=False, result="Success")
_remove_gateway_router(neutron, module, router_id)
module.exit_json(changed=True, result="Deleted")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
nan86150/ImageFusion | lib/python2.7/site-packages/numpy/polynomial/hermite.py | 36 | 55853 | """
Objects for dealing with Hermite series.
This module provides a number of objects (mostly functions) useful for
dealing with Hermite series, including a `Hermite` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `hermdomain` -- Hermite series default domain, [-1,1].
- `hermzero` -- Hermite series that evaluates identically to 0.
- `hermone` -- Hermite series that evaluates identically to 1.
- `hermx` -- Hermite series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``.
- `hermadd` -- add two Hermite series.
- `hermsub` -- subtract one Hermite series from another.
- `hermmul` -- multiply two Hermite series.
- `hermdiv` -- divide one Hermite series by another.
- `hermval` -- evaluate a Hermite series at given points.
- `hermval2d` -- evaluate a 2D Hermite series at given points.
- `hermval3d` -- evaluate a 3D Hermite series at given points.
- `hermgrid2d` -- evaluate a 2D Hermite series on a Cartesian product.
- `hermgrid3d` -- evaluate a 3D Hermite series on a Cartesian product.
Calculus
--------
- `hermder` -- differentiate a Hermite series.
- `hermint` -- integrate a Hermite series.
Misc Functions
--------------
- `hermfromroots` -- create a Hermite series with specified roots.
- `hermroots` -- find the roots of a Hermite series.
- `hermvander` -- Vandermonde-like matrix for Hermite polynomials.
- `hermvander2d` -- Vandermonde-like matrix for 2D power series.
- `hermvander3d` -- Vandermonde-like matrix for 3D power series.
- `hermgauss` -- Gauss-Hermite quadrature, points and weights.
- `hermweight` -- Hermite weight function.
- `hermcompanion` -- symmetrized companion matrix in Hermite form.
- `hermfit` -- least-squares fit returning a Hermite series.
- `hermtrim` -- trim leading coefficients from a Hermite series.
- `hermline` -- Hermite series of given straight line.
- `herm2poly` -- convert a Hermite series to a polynomial.
- `poly2herm` -- convert a polynomial to a Hermite series.
Classes
-------
- `Hermite` -- A Hermite series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', 'hermadd',
'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval',
'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots',
'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite',
'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d',
'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight']
hermtrim = pu.trimcoef
def poly2herm(pol):
"""
poly2herm(pol)
Convert a polynomial to a Hermite series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Hermite series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Hermite
series.
See Also
--------
herm2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite import poly2herm
>>> poly2herm(np.arange(4))
array([ 1. , 2.75 , 0.5 , 0.375])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = hermadd(hermmulx(res), pol[i])
return res
def herm2poly(c):
"""
Convert a Hermite series to a polynomial.
Convert an array representing the coefficients of a Hermite series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Hermite series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2herm
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite import herm2poly
>>> herm2poly([ 1. , 2.75 , 0.5 , 0.375])
array([ 0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
if n == 2:
c[1] *= 2
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1*(2*(i - 1)))
c1 = polyadd(tmp, polymulx(c1)*2)
return polyadd(c0, polymulx(c1)*2)
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Hermite
hermdomain = np.array([-1, 1])
# Hermite coefficients representing zero.
hermzero = np.array([0])
# Hermite coefficients representing one.
hermone = np.array([1])
# Hermite coefficients representing the identity x.
hermx = np.array([0, 1/2])
def hermline(off, scl):
"""
Hermite series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Hermite series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.hermite import hermline, hermval
>>> hermval(0,hermline(3, 2))
3.0
>>> hermval(1,hermline(3, 2))
5.0
"""
if scl != 0:
return np.array([off, scl/2])
else:
return np.array([off])
def hermfromroots(roots):
"""
Generate a Hermite series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Hermite form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Hermite form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, chebfromroots,
hermefromroots.
Examples
--------
>>> from numpy.polynomial.hermite import hermfromroots, hermval
>>> coef = hermfromroots((-1, 0, 1))
>>> hermval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = hermfromroots((-1j, 1j))
>>> hermval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [hermline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [hermmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = hermmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def hermadd(c1, c2):
"""
Add one Hermite series to another.
Returns the sum of two Hermite series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Hermite series of their sum.
See Also
--------
hermsub, hermmul, hermdiv, hermpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Hermite series
is a Hermite series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite import hermadd
>>> hermadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermsub(c1, c2):
"""
Subtract one Hermite series from another.
Returns the difference of two Hermite series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their difference.
See Also
--------
hermadd, hermmul, hermdiv, hermpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Hermite
series is a Hermite series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite import hermsub
>>> hermsub([1, 2, 3, 4], [1, 2, 3])
array([ 0., 0., 0., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermmulx(c):
"""Multiply a Hermite series by x.
Multiply the Hermite series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Hermite
polynomials in the form
.. math::
xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x))
Examples
--------
>>> from numpy.polynomial.hermite import hermmulx
>>> hermmulx([1, 2, 3])
array([ 2. , 6.5, 1. , 1.5])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]/2
for i in range(1, len(c)):
prd[i + 1] = c[i]/2
prd[i - 1] += c[i]*i
return prd
def hermmul(c1, c2):
"""
Multiply one Hermite series by another.
Returns the product of two Hermite series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their product.
See Also
--------
hermadd, hermsub, hermdiv, hermpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Hermite polynomial basis set. Thus, to express
the product as a Hermite series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermmul
>>> hermmul([1, 2, 3], [0, 1, 2])
array([ 52., 29., 52., 7., 6.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1)))
c1 = hermadd(tmp, hermmulx(c1)*2)
return hermadd(c0, hermmulx(c1)*2)
def hermdiv(c1, c2):
"""
Divide one Hermite series by another.
Returns the quotient-with-remainder of two Hermite series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Hermite series coefficients representing the quotient and
remainder.
See Also
--------
hermadd, hermsub, hermmul, hermpow
Notes
-----
In general, the (polynomial) division of one Hermite series by another
results in quotient and remainder terms that are not in the Hermite
polynomial basis set. Thus, to express these results as a Hermite
series, it is necessary to "reproject" the results onto the Hermite
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermdiv
>>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 0.]))
>>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 2., 2.]))
>>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 1., 1.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = hermmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def hermpow(c, pow, maxpower=16):
"""Raise a Hermite series to a power.
Returns the Hermite series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Hermite series of power.
See Also
--------
hermadd, hermsub, hermmul, hermdiv
Examples
--------
>>> from numpy.polynomial.hermite import hermpow
>>> hermpow([1, 2, 3], 2)
array([ 81., 52., 82., 12., 9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = hermmul(prd, c)
return prd
def hermder(c, m=1, scl=1, axis=0):
"""
Differentiate a Hermite series.
Returns the Hermite series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2``
while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) +
2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Hermite series coefficients. If `c` is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Hermite series of the derivative.
See Also
--------
hermint
Notes
-----
In general, the result of differentiating a Hermite series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.hermite import hermder
>>> hermder([ 1. , 0.5, 0.5, 0.5])
array([ 1., 2., 3.])
>>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2)
array([ 1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 0, -1):
der[j - 1] = (2*j)*c[j]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Hermite series.
Returns the Hermite series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Hermite series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Hermite series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
hermder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermint
>>> hermint([1,2,3]) # integrate once, value 0 at 0.
array([ 1. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0
array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ])
>>> hermint([1,2,3], k=1) # integrate once, value 1 at 0.
array([ 2. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1
array([-2. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1)
array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]/2
for j in range(1, n):
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[0] += k[i] - hermval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermval(x, c, tensor=True):
"""
Evaluate an Hermite series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * H_0(x) + c_1 * H_1(x) + ... + c_n * H_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
hermval2d, hermgrid2d, hermval3d, hermgrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.hermite import hermval
>>> coef = [1,2,3]
>>> hermval(1, coef)
11.0
>>> hermval([[1,2],[3,4]], coef)
array([[ 11., 51.],
[ 115., 203.]])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
x2 = x*2
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - c1*(2*(nd - 1))
c1 = tmp + c1*x2
return c0 + c1*x2
def hermval2d(x, y, c):
"""
Evaluate a 2-D Hermite series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
hermval, hermgrid2d, hermval3d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = hermval(x, c)
c = hermval(y, c, tensor=False)
return c
def hermgrid2d(x, y, c):
"""
Evaluate a 2-D Hermite series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * H_i(a) * H_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermval, hermval2d, hermval3d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermval(x, c)
c = hermval(y, c)
return c
def hermval3d(x, y, z, c):
"""
Evaluate a 3-D Hermite series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
hermval, hermval2d, hermgrid2d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = hermval(x, c)
c = hermval(y, c, tensor=False)
c = hermval(z, c, tensor=False)
return c
def hermgrid3d(x, y, z, c):
"""
Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermval, hermval2d, hermgrid2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermval(x, c)
c = hermval(y, c)
c = hermval(z, c)
return c
def hermvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = H_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Hermite polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = hermvander(x, n)``, then ``np.dot(V, c)`` and
``hermval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Hermite series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Hermite polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.hermite import hermvander
>>> x = np.array([-1, 0, 1])
>>> hermvander(x, 3)
array([[ 1., -2., 2., 4.],
[ 1., 0., -2., -0.],
[ 1., 2., 2., -4.]])
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
x2 = x*2
v[1] = x2
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1)))
return np.rollaxis(v, 0, v.ndim)
def hermvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = H_i(x) * H_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Hermite polynomials.
If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Hermite
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
hermvander, hermvander3d. hermval2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = hermvander(x, degx)
vy = hermvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def hermvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = H_i(x)*H_j(y)*H_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Hermite polynomials.
If ``V = hermvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``hermval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Hermite
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
hermvander, hermvander3d. hermval2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = hermvander(x, degx)
vy = hermvander(y, degy)
vz = hermvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def hermfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Hermite series to data.
Return the coefficients of a Hermite series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Hermite coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, lagfit, polyfit, hermefit
hermval : Evaluates a Hermite series.
hermvander : Vandermonde matrix of Hermite series.
hermweight : Hermite weight function
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Hermite series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Hermite series are probably most useful when the data can be
approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Hermite
weight. In that case the weight ``sqrt(w(x[i])`` should be used
together with data values ``y[i]/sqrt(w(x[i])``. The weight function is
available as `hermweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.hermite import hermfit, hermval
>>> x = np.linspace(-10, 10)
>>> err = np.random.randn(len(x))/10
>>> y = hermval(x, [1, 2, 3]) + err
>>> hermfit(x, y, 2)
array([ 0.97902637, 1.99849131, 3.00006 ])
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = hermvander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def hermcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an Hermite basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-.5*c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.hstack((1., np.sqrt(2.*np.arange(1, n))))
scl = np.multiply.accumulate(scl)
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.sqrt(.5*np.arange(1, n))
bot[...] = top
mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5
return mat
def hermroots(c):
"""
Compute the roots of a Hermite series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * H_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, chebroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Hermite series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.hermite import hermroots, hermfromroots
>>> coef = hermfromroots([-1, 0, 1])
>>> coef
array([ 0. , 0.25 , 0. , 0.125])
>>> hermroots(coef)
array([ -1.00000000e+00, -1.38777878e-17, 1.00000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-.5*c[0]/c[1]])
m = hermcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def hermgauss(deg):
"""
Gauss-Hermite quadrature.
Computes the sample points and weights for Gauss-Hermite quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-\inf, \inf]`
with the weight function :math:`f(x) = \exp(-x^2)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`H_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = hermcompanion(c)
x = la.eigvals(m)
x.sort()
# improve roots by one application of Newton
dy = hermval(x, c)
df = hermval(x, hermder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = hermval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# for Hermite we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= np.sqrt(np.pi) / w.sum()
return x, w
def hermweight(x):
"""
Weight function of the Hermite polynomials.
The weight function is :math:`\exp(-x^2)` and the interval of
integration is :math:`[-\inf, \inf]`. the Hermite polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = np.exp(-x**2)
return w
#
# Hermite series class
#
class Hermite(ABCPolyBase):
"""An Hermite series class.
The Hermite class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Laguerre coefficients in order of increasing degree, i.e,
``(1, 2, 3)`` gives ``1*H_0(x) + 2*H_1(X) + 3*H_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(hermadd)
_sub = staticmethod(hermsub)
_mul = staticmethod(hermmul)
_div = staticmethod(hermdiv)
_pow = staticmethod(hermpow)
_val = staticmethod(hermval)
_int = staticmethod(hermint)
_der = staticmethod(hermder)
_fit = staticmethod(hermfit)
_line = staticmethod(hermline)
_roots = staticmethod(hermroots)
_fromroots = staticmethod(hermfromroots)
# Virtual properties
nickname = 'herm'
domain = np.array(hermdomain)
window = np.array(hermdomain)
| mit |
ptisserand/ansible | contrib/inventory/nova.py | 109 | 7029 | #!/usr/bin/env python
# (c) 2012, Marco Vito Moscaritolo <[email protected]>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# WARNING: This file is deprecated. New work should focus on the openstack.py
# inventory module, which properly handles multiple clouds as well as keystone
# v3 and keystone auth plugins
import sys
import re
import os
import ConfigParser
from novaclient import client as nova_client
from six import iteritems, itervalues
try:
import json
except ImportError:
import simplejson as json
sys.stderr.write("WARNING: this inventory module is deprecated. please migrate usage to openstack.py\n")
###################################################
# executed with no parameters, return the list of
# all groups and hosts
NOVA_CONFIG_FILES = [os.getcwd() + "/nova.ini",
os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/nova.ini")),
"/etc/ansible/nova.ini"]
NOVA_DEFAULTS = {
'auth_system': None,
'region_name': None,
'service_type': 'compute',
}
def nova_load_config_file():
p = ConfigParser.SafeConfigParser(NOVA_DEFAULTS)
for path in NOVA_CONFIG_FILES:
if os.path.exists(path):
p.read(path)
return p
return None
def get_fallback(config, value, section="openstack"):
"""
Get value from config object and return the value
or false
"""
try:
return config.get(section, value)
except ConfigParser.NoOptionError:
return False
def push(data, key, element):
"""
Assist in items to a dictionary of lists
"""
if (not element) or (not key):
return
if key in data:
data[key].append(element)
else:
data[key] = [element]
def to_safe(word):
'''
Converts 'bad' characters in a string to underscores so they can
be used as Ansible groups
'''
return re.sub(r"[^A-Za-z0-9\-]", "_", word)
def get_ips(server, access_ip=True):
"""
Returns a list of the server's IPs, or the preferred
access IP
"""
private = []
public = []
address_list = []
# Iterate through each servers network(s), get addresses and get type
addresses = getattr(server, 'addresses', {})
if len(addresses) > 0:
for network in itervalues(addresses):
for address in network:
if address.get('OS-EXT-IPS:type', False) == 'fixed':
private.append(address['addr'])
elif address.get('OS-EXT-IPS:type', False) == 'floating':
public.append(address['addr'])
if not access_ip:
address_list.append(server.accessIPv4)
address_list.extend(private)
address_list.extend(public)
return address_list
access_ip = None
# Append group to list
if server.accessIPv4:
access_ip = server.accessIPv4
if (not access_ip) and public and not (private and prefer_private):
access_ip = public[0]
if private and not access_ip:
access_ip = private[0]
return access_ip
def get_metadata(server):
"""Returns dictionary of all host metadata"""
get_ips(server, False)
results = {}
for key in vars(server):
# Extract value
value = getattr(server, key)
# Generate sanitized key
key = 'os_' + re.sub(r"[^A-Za-z0-9\-]", "_", key).lower()
# Att value to instance result (exclude manager class)
# TODO: maybe use value.__class__ or similar inside of key_name
if key != 'os_manager':
results[key] = value
return results
config = nova_load_config_file()
if not config:
sys.exit('Unable to find configfile in %s' % ', '.join(NOVA_CONFIG_FILES))
# Load up connections info based on config and then environment
# variables
username = (get_fallback(config, 'username') or
os.environ.get('OS_USERNAME', None))
api_key = (get_fallback(config, 'api_key') or
os.environ.get('OS_PASSWORD', None))
auth_url = (get_fallback(config, 'auth_url') or
os.environ.get('OS_AUTH_URL', None))
project_id = (get_fallback(config, 'project_id') or
os.environ.get('OS_TENANT_NAME', None))
region_name = (get_fallback(config, 'region_name') or
os.environ.get('OS_REGION_NAME', None))
auth_system = (get_fallback(config, 'auth_system') or
os.environ.get('OS_AUTH_SYSTEM', None))
# Determine what type of IP is preferred to return
prefer_private = False
try:
prefer_private = config.getboolean('openstack', 'prefer_private')
except ConfigParser.NoOptionError:
pass
client = nova_client.Client(
version=config.get('openstack', 'version'),
username=username,
api_key=api_key,
auth_url=auth_url,
region_name=region_name,
project_id=project_id,
auth_system=auth_system,
service_type=config.get('openstack', 'service_type'),
)
# Default or added list option
if (len(sys.argv) == 2 and sys.argv[1] == '--list') or len(sys.argv) == 1:
groups = {'_meta': {'hostvars': {}}}
# Cycle on servers
for server in client.servers.list():
access_ip = get_ips(server)
# Push to name group of 1
push(groups, server.name, access_ip)
# Run through each metadata item and add instance to it
for key, value in iteritems(server.metadata):
composed_key = to_safe('tag_{0}_{1}'.format(key, value))
push(groups, composed_key, access_ip)
# Do special handling of group for backwards compat
# inventory groups
group = server.metadata['group'] if 'group' in server.metadata else 'undefined'
push(groups, group, access_ip)
# Add vars to _meta key for performance optimization in
# Ansible 1.3+
groups['_meta']['hostvars'][access_ip] = get_metadata(server)
# Return server list
print(json.dumps(groups, sort_keys=True, indent=2))
sys.exit(0)
#####################################################
# executed with a hostname as a parameter, return the
# variables for that host
elif len(sys.argv) == 3 and (sys.argv[1] == '--host'):
results = {}
ips = []
for server in client.servers.list():
if sys.argv[2] in (get_ips(server) or []):
results = get_metadata(server)
print(json.dumps(results, sort_keys=True, indent=2))
sys.exit(0)
else:
sys.exit("usage: --list ..OR.. --host <hostname>")
| gpl-3.0 |
edmorley/django | tests/serializers/test_xml.py | 44 | 4779 | from xml.dom import minidom
from django.core import serializers
from django.core.serializers.xml_serializer import DTDForbidden
from django.test import TestCase, TransactionTestCase
from .tests import SerializersTestBase, SerializersTransactionTestBase
class XmlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "xml"
pkless_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object model="serializers.category">
<field type="CharField" name="name">Reference</field>
</object>
<object model="serializers.category">
<field type="CharField" name="name">Non-fiction</field>
</object>
</django-objects>"""
mapping_ordering_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object model="serializers.article" pk="%(article_pk)s">
<field name="author" rel="ManyToOneRel" to="serializers.author">%(author_pk)s</field>
<field name="headline" type="CharField">Poker has no place on ESPN</field>
<field name="pub_date" type="DateTimeField">2006-06-16T11:00:00</field>
<field name="categories" rel="ManyToManyRel" to="serializers.category"><object pk="%(first_category_pk)s"></object><object pk="%(second_category_pk)s"></object></field>
<field name="meta_data" rel="ManyToManyRel" to="serializers.categorymetadata"></field>
</object>
</django-objects>""" # NOQA
@staticmethod
def _comparison_value(value):
# The XML serializer handles everything as strings, so comparisons
# need to be performed on the stringified value
return str(value)
@staticmethod
def _validate_output(serial_str):
try:
minidom.parseString(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("object")
for field in fields:
ret_list.append(field.getAttribute("pk"))
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("field")
for field in fields:
if field.getAttribute("name") == field_name:
temp = []
for child in field.childNodes:
temp.append(child.nodeValue)
ret_list.append("".join(temp))
return ret_list
def test_control_char_failure(self):
"""
Serializing control characters with XML should fail as those characters
are not supported in the XML 1.0 standard (except HT, LF, CR).
"""
self.a1.headline = "This contains \u0001 control \u0011 chars"
msg = "Article.headline (pk:%s) contains unserializable characters" % self.a1.pk
with self.assertRaisesMessage(ValueError, msg):
serializers.serialize(self.serializer_name, [self.a1])
self.a1.headline = "HT \u0009, LF \u000A, and CR \u000D are allowed"
self.assertIn(
"HT \t, LF \n, and CR \r are allowed",
serializers.serialize(self.serializer_name, [self.a1])
)
def test_no_dtd(self):
"""
The XML deserializer shouldn't allow a DTD.
This is the most straightforward way to prevent all entity definitions
and avoid both external entities and entity-expansion attacks.
"""
xml = '<?xml version="1.0" standalone="no"?><!DOCTYPE example SYSTEM "http://example.com/example.dtd">'
with self.assertRaises(DTDForbidden):
next(serializers.deserialize('xml', xml))
class XmlSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "xml"
fwd_ref_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="1" model="serializers.article">
<field to="serializers.author" name="author" rel="ManyToOneRel">1</field>
<field type="CharField" name="headline">Forward references pose no problem</field>
<field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field>
<field to="serializers.category" name="categories" rel="ManyToManyRel">
<object pk="1"></object>
</field>
<field to="serializers.categorymetadata" name="meta_data" rel="ManyToManyRel"></field>
</object>
<object pk="1" model="serializers.author">
<field type="CharField" name="name">Agnes</field>
</object>
<object pk="1" model="serializers.category">
<field type="CharField" name="name">Reference</field></object>
</django-objects>"""
| bsd-3-clause |
shreyas111/Multimedia_CS523_Project1 | inception5h.py | 5 | 6371 | ########################################################################
#
# The Inception Model 5h for TensorFlow.
#
# This variant of the Inception model is easier to use for DeepDream
# and other imaging techniques. This is because it allows the input
# image to be any size, and the optimized images are also prettier.
#
# It is unclear which Inception model this implements because the
# Google developers have (as usual) neglected to document it.
# It is dubbed the 5h-model because that is the name of the zip-file,
# but it is apparently simpler than the v.3 model.
#
# See the Python Notebook for Tutorial #14 for an example usage.
#
# Implemented in Python 3.5 with TensorFlow v0.11.0rc0
#
########################################################################
#
# This file is part of the TensorFlow Tutorials available at:
#
# https://github.com/Hvass-Labs/TensorFlow-Tutorials
#
# Published under the MIT License. See the file LICENSE for details.
#
# Copyright 2016 by Magnus Erik Hvass Pedersen
#
########################################################################
import numpy as np
import tensorflow as tf
import download
import os
########################################################################
# Various directories and file-names.
# Internet URL for the tar-file with the Inception model.
# Note that this might change in the future and will need to be updated.
data_url = "http://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip"
# Directory to store the downloaded data.
data_dir = "inception/5h/"
# File containing the TensorFlow graph definition. (Downloaded)
path_graph_def = "tensorflow_inception_graph.pb"
########################################################################
def maybe_download():
"""
Download the Inception model from the internet if it does not already
exist in the data_dir. The file is about 50 MB.
"""
print("Downloading Inception 5h Model ...")
download.maybe_download_and_extract(url=data_url, download_dir=data_dir)
########################################################################
class Inception5h:
"""
The Inception model is a Deep Neural Network which has already been
trained for classifying images into 1000 different categories.
When you create a new instance of this class, the Inception model
will be loaded and can be used immediately without training.
"""
# Name of the tensor for feeding the input image.
tensor_name_input_image = "input:0"
# Names for some of the commonly used layers in the Inception model.
layer_names = ['conv2d0', 'conv2d1', 'conv2d2',
'mixed3a', 'mixed3b',
'mixed4a', 'mixed4b', 'mixed4c', 'mixed4d', 'mixed4e',
'mixed5a', 'mixed5b']
def __init__(self):
# Now load the Inception model from file. The way TensorFlow
# does this is confusing and requires several steps.
# Create a new TensorFlow computational graph.
self.graph = tf.Graph()
# Set the new graph as the default.
with self.graph.as_default():
# TensorFlow graphs are saved to disk as so-called Protocol Buffers
# aka. proto-bufs which is a file-format that works on multiple
# platforms. In this case it is saved as a binary file.
# Open the graph-def file for binary reading.
path = os.path.join(data_dir, path_graph_def)
with tf.gfile.FastGFile(path, 'rb') as file:
# The graph-def is a saved copy of a TensorFlow graph.
# First we need to create an empty graph-def.
graph_def = tf.GraphDef()
# Then we load the proto-buf file into the graph-def.
graph_def.ParseFromString(file.read())
# Finally we import the graph-def to the default TensorFlow graph.
tf.import_graph_def(graph_def, name='')
# Now self.graph holds the Inception model from the proto-buf file.
# Get a reference to the tensor for inputting images to the graph.
self.input = self.graph.get_tensor_by_name(self.tensor_name_input_image)
# Get references to the tensors for the commonly used layers.
self.layer_tensors = [self.graph.get_tensor_by_name(name + ":0") for name in self.layer_names]
def create_feed_dict(self, image=None):
"""
Create and return a feed-dict with an image.
:param image:
The input image is a 3-dim array which is already decoded.
The pixels MUST be values between 0 and 255 (float or int).
:return:
Dict for feeding to the Inception graph in TensorFlow.
"""
# Expand 3-dim array to 4-dim by prepending an 'empty' dimension.
# This is because we are only feeding a single image, but the
# Inception model was built to take multiple images as input.
image = np.expand_dims(image, axis=0)
# Image is passed in as a 3-dim array of raw pixel-values.
feed_dict = {self.tensor_name_input_image: image}
return feed_dict
def get_gradient(self, tensor):
"""
Get the gradient of the given tensor with respect to
the input image. This allows us to modify the input
image so as to maximize the given tensor.
For use in e.g. DeepDream and Visual Analysis.
:param tensor:
The tensor whose value we want to maximize
by changing the input image.
:return:
Gradient for the tensor with regard to the input image.
"""
# Set the graph as default so we can add operations to it.
with self.graph.as_default():
# Square the tensor-values.
# You can try and remove this to see the effect.
tensor = tf.square(tensor)
# Average the tensor so we get a single scalar value.
tensor_mean = tf.reduce_mean(tensor)
# Use TensorFlow to automatically create a mathematical
# formula for the gradient using the chain-rule of
# differentiation.
gradient = tf.gradients(tensor_mean, self.input)[0]
return gradient
########################################################################
| mit |
aaronzirbes/ansible | test/units/executor/test_task_executor.py | 33 | 9880 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.executor.task_executor import TaskExecutor
from ansible.playbook.play_context import PlayContext
from ansible.plugins import action_loader, lookup_loader
from units.mock.loader import DictDataLoader
class TestTaskExecutor(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_task_executor_init(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
new_stdin = None
job_vars = dict()
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = mock_shared_loader,
)
def test_task_executor_run(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task._role._role_path = '/path/to/role/foo'
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
new_stdin = None
job_vars = dict()
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = mock_shared_loader,
)
te._get_loop_items = MagicMock(return_value=None)
te._execute = MagicMock(return_value=dict())
res = te.run()
te._get_loop_items = MagicMock(return_value=[])
res = te.run()
te._get_loop_items = MagicMock(return_value=['a','b','c'])
te._run_loop = MagicMock(return_value=[dict(item='a', changed=True), dict(item='b', failed=True), dict(item='c')])
res = te.run()
te._get_loop_items = MagicMock(side_effect=AnsibleError(""))
res = te.run()
self.assertIn("failed", res)
def test_task_executor_get_loop_items(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task.loop = 'items'
mock_task.loop_args = ['a', 'b', 'c']
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
mock_shared_loader.lookup_loader = lookup_loader
new_stdin = None
job_vars = dict()
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = mock_shared_loader,
)
items = te._get_loop_items()
self.assertEqual(items, ['a', 'b', 'c'])
def test_task_executor_run_loop(self):
items = ['a', 'b', 'c']
fake_loader = DictDataLoader({})
mock_host = MagicMock()
def _copy():
new_item = MagicMock()
return new_item
mock_task = MagicMock()
mock_task.copy.side_effect = _copy
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
new_stdin = None
job_vars = dict()
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = mock_shared_loader,
)
def _execute(variables):
return dict(item=variables.get('item'))
te._squash_items = MagicMock(return_value=items)
te._execute = MagicMock(side_effect=_execute)
res = te._run_loop(items)
self.assertEqual(len(res), 3)
def test_task_executor_squash_items(self):
items = ['a', 'b', 'c']
fake_loader = DictDataLoader({})
mock_host = MagicMock()
def _evaluate_conditional(templar, variables):
item = variables.get('item')
if item == 'b':
return False
return True
mock_task = MagicMock()
mock_task.evaluate_conditional.side_effect = _evaluate_conditional
mock_play_context = MagicMock()
mock_shared_loader = None
new_stdin = None
job_vars = dict()
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = mock_shared_loader,
)
mock_task.action = 'foo'
new_items = te._squash_items(items=items, variables=job_vars)
self.assertEqual(new_items, ['a', 'b', 'c'])
mock_task.action = 'yum'
new_items = te._squash_items(items=items, variables=job_vars)
self.assertEqual(new_items, ['a,c'])
def test_task_executor_execute(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task.args = dict()
mock_task.retries = 0
mock_task.delay = -1
mock_task.register = 'foo'
mock_task.until = None
mock_task.changed_when = None
mock_task.failed_when = None
mock_task.post_validate.return_value = None
mock_play_context = MagicMock()
mock_play_context.post_validate.return_value = None
mock_play_context.update_vars.return_value = None
mock_connection = MagicMock()
mock_connection.set_host_overrides.return_value = None
mock_connection._connect.return_value = None
mock_action = MagicMock()
shared_loader = None
new_stdin = None
job_vars = dict(omit="XXXXXXXXXXXXXXXXXXX")
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = shared_loader,
)
te._get_connection = MagicMock(return_value=mock_connection)
te._get_action_handler = MagicMock(return_value=mock_action)
mock_action.run.return_value = dict(ansible_facts=dict())
res = te._execute()
mock_task.changed_when = "1 == 1"
res = te._execute()
mock_task.changed_when = None
mock_task.failed_when = "1 == 1"
res = te._execute()
mock_task.failed_when = None
mock_task.evaluate_conditional.return_value = False
res = te._execute()
mock_task.evaluate_conditional.return_value = True
mock_task.args = dict(_raw_params='foo.yml', a='foo', b='bar')
mock_task.action = 'include'
res = te._execute()
def test_task_executor_poll_async_result(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task.async = 3
mock_task.poll = 1
mock_play_context = MagicMock()
mock_connection = MagicMock()
mock_action = MagicMock()
shared_loader = None
new_stdin = None
job_vars = dict(omit="XXXXXXXXXXXXXXXXXXX")
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = shared_loader,
)
te._connection = MagicMock()
def _get(*args, **kwargs):
mock_action = MagicMock()
mock_action.run.return_value = dict(stdout='')
return mock_action
# testing with some bad values in the result passed to poll async,
# and with a bad value returned from the mock action
with patch.object(action_loader, 'get', _get):
mock_templar = MagicMock()
res = te._poll_async_result(result=dict(), templar=mock_templar)
self.assertIn('failed', res)
res = te._poll_async_result(result=dict(ansible_job_id=1), templar=mock_templar)
self.assertIn('failed', res)
def _get(*args, **kwargs):
mock_action = MagicMock()
mock_action.run.return_value = dict(finished=1)
return mock_action
# now testing with good values
with patch.object(action_loader, 'get', _get):
mock_templar = MagicMock()
res = te._poll_async_result(result=dict(ansible_job_id=1), templar=mock_templar)
self.assertEqual(res, dict(finished=1))
| gpl-3.0 |
eyalfa/spark | examples/src/main/python/mllib/random_forest_classification_example.py | 106 | 2572 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Random Forest Classification Example.
"""
from __future__ import print_function
from pyspark import SparkContext
# $example on$
from pyspark.mllib.tree import RandomForest, RandomForestModel
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonRandomForestClassificationExample")
# $example on$
# Load and parse the data file into an RDD of LabeledPoint.
data = MLUtils.loadLibSVMFile(sc, 'data/mllib/sample_libsvm_data.txt')
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a RandomForest model.
# Empty categoricalFeaturesInfo indicates all features are continuous.
# Note: Use larger numTrees in practice.
# Setting featureSubsetStrategy="auto" lets the algorithm choose.
model = RandomForest.trainClassifier(trainingData, numClasses=2, categoricalFeaturesInfo={},
numTrees=3, featureSubsetStrategy="auto",
impurity='gini', maxDepth=4, maxBins=32)
# Evaluate model on test instances and compute test error
predictions = model.predict(testData.map(lambda x: x.features))
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
testErr = labelsAndPredictions.filter(
lambda lp: lp[0] != lp[1]).count() / float(testData.count())
print('Test Error = ' + str(testErr))
print('Learned classification forest model:')
print(model.toDebugString())
# Save and load model
model.save(sc, "target/tmp/myRandomForestClassificationModel")
sameModel = RandomForestModel.load(sc, "target/tmp/myRandomForestClassificationModel")
# $example off$
| apache-2.0 |
pwarren/AGDeviceControl | agdevicecontrol/thirdparty/site-packages/darwin/twisted/test/test_logfile.py | 19 | 6844 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
# system imports
import os, shutil, time
# twisted imports
from twisted.python import logfile
class LogFileTestCase(unittest.TestCase):
"""Test the rotating log file."""
def setUp(self):
self.dir = self.mktemp()
os.makedirs(self.dir)
self.name = "test.log"
self.path = os.path.join(self.dir, self.name)
def tearDown(self):
shutil.rmtree(self.dir)
pass
def testWriting(self):
log = logfile.LogFile(self.name, self.dir)
log.write("123")
log.write("456")
log.flush()
log.write("7890")
log.close()
f = open(self.path, "r")
self.assertEquals(f.read(), "1234567890")
f.close()
def testRotation(self):
# this logfile should rotate every 10 bytes
log = logfile.LogFile(self.name, self.dir, rotateLength=10)
# test automatic rotation
log.write("123")
log.write("4567890")
log.write("1" * 11)
self.assert_(os.path.exists("%s.1" % self.path))
self.assert_(not os.path.exists("%s.2" % self.path))
log.write('')
self.assert_(os.path.exists("%s.1" % self.path))
self.assert_(os.path.exists("%s.2" % self.path))
self.assert_(not os.path.exists("%s.3" % self.path))
log.write("3")
self.assert_(not os.path.exists("%s.3" % self.path))
# test manual rotation
log.rotate()
self.assert_(os.path.exists("%s.3" % self.path))
self.assert_(not os.path.exists("%s.4" % self.path))
log.close()
self.assertEquals(log.listLogs(), [1, 2, 3])
def testAppend(self):
log = logfile.LogFile(self.name, self.dir)
log.write("0123456789")
log.close()
log = logfile.LogFile(self.name, self.dir)
self.assertEquals(log.size, 10)
self.assertEquals(log._file.tell(), log.size)
log.write("abc")
self.assertEquals(log.size, 13)
self.assertEquals(log._file.tell(), log.size)
f = log._file
f.seek(0, 0)
self.assertEquals(f.read(), "0123456789abc")
log.close()
def testLogReader(self):
log = logfile.LogFile(self.name, self.dir)
log.write("abc\n")
log.write("def\n")
log.rotate()
log.write("ghi\n")
log.flush()
# check reading logs
self.assertEquals(log.listLogs(), [1])
reader = log.getCurrentLog()
reader._file.seek(0)
self.assertEquals(reader.readLines(), ["ghi\n"])
self.assertEquals(reader.readLines(), [])
reader.close()
reader = log.getLog(1)
self.assertEquals(reader.readLines(), ["abc\n", "def\n"])
self.assertEquals(reader.readLines(), [])
reader.close()
# check getting illegal log readers
self.assertRaises(ValueError, log.getLog, 2)
self.assertRaises(TypeError, log.getLog, "1")
# check that log numbers are higher for older logs
log.rotate()
self.assertEquals(log.listLogs(), [1, 2])
reader = log.getLog(1)
reader._file.seek(0)
self.assertEquals(reader.readLines(), ["ghi\n"])
self.assertEquals(reader.readLines(), [])
reader.close()
reader = log.getLog(2)
self.assertEquals(reader.readLines(), ["abc\n", "def\n"])
self.assertEquals(reader.readLines(), [])
reader.close()
def testModePreservation(self):
"logfile: check rotated files have same permissions as original."
if not hasattr(os, "chmod"): return
f = open(self.path, "w").close()
os.chmod(self.path, 0707)
mode = os.stat(self.path)[0]
log = logfile.LogFile(self.name, self.dir)
log.write("abc")
log.rotate()
self.assertEquals(mode, os.stat(self.path)[0])
def testNoPermission(self):
"logfile: check it keeps working when permission on dir changes."
log = logfile.LogFile(self.name, self.dir)
log.write("abc")
# change permissions so rotation would fail
os.chmod(self.dir, 444)
# if this succeeds, chmod doesn't restrict us, so we can't
# do the test
try:
f = open(os.path.join(self.dir,"xxx"), "w")
except (OSError, IOError):
pass
else:
f.close()
return
log.rotate() # this should not fail
log.write("def")
log.flush()
f = log._file
self.assertEquals(f.tell(), 6)
f.seek(0, 0)
self.assertEquals(f.read(), "abcdef")
log.close()
# reset permission so tearDown won't fail
os.chmod(self.dir, 0777)
class RiggedDailyLogFile(logfile.DailyLogFile):
_clock = 0.0
def _openFile(self):
logfile.DailyLogFile._openFile(self)
# rig the date to match _clock, not mtime
self.lastDate = self.toDate()
def toDate(self, *args):
if args:
return time.gmtime(*args)[:3]
return time.gmtime(self._clock)[:3]
class DailyLogFileTestCase(unittest.TestCase):
"""Test the rotating log file."""
def setUp(self):
self.dir = self.mktemp()
os.makedirs(self.dir)
self.name = "testdaily.log"
self.path = os.path.join(self.dir, self.name)
def tearDown(self):
shutil.rmtree(self.dir)
pass
def testWriting(self):
log = RiggedDailyLogFile(self.name, self.dir)
log.write("123")
log.write("456")
log.flush()
log.write("7890")
log.close()
f = open(self.path, "r")
self.assertEquals(f.read(), "1234567890")
f.close()
def testRotation(self):
# this logfile should rotate every 10 bytes
log = RiggedDailyLogFile(self.name, self.dir)
days = [(self.path + '.' + log.suffix(day * 86400)) for day in range(3)]
# test automatic rotation
log._clock = 0.0 # 1970/01/01 00:00.00
log.write("123")
log._clock = 43200 # 1970/01/01 12:00.00
log.write("4567890")
log._clock = 86400 # 1970/01/02 00:00.00
log.write("1" * 11)
self.assert_(os.path.exists(days[0]))
self.assert_(not os.path.exists(days[1]))
log._clock = 172800 # 1970/01/03 00:00.00
log.write('')
self.assert_(os.path.exists(days[0]))
self.assert_(os.path.exists(days[1]))
self.assert_(not os.path.exists(days[2]))
log._clock = 259199 # 1970/01/03 23:59.59
log.write("3")
self.assert_(not os.path.exists(days[2]))
| gpl-2.0 |
kevclarx/ansible | lib/ansible/module_utils/facts.py | 5 | 171157 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import stat
import time
import shlex
import errno
import fnmatch
import glob
import platform
import re
import signal
import socket
import struct
import datetime
import getpass
import pwd
from ansible.module_utils.basic import get_all_subclasses
from ansible.module_utils.six import PY3, iteritems
from ansible.module_utils.six.moves import configparser, StringIO, reduce
from ansible.module_utils._text import to_native, to_text
try:
import selinux
HAVE_SELINUX=True
except ImportError:
HAVE_SELINUX=False
try:
# Check if we have SSLContext support
from ssl import create_default_context, SSLContext
del create_default_context
del SSLContext
HAS_SSLCONTEXT = True
except ImportError:
HAS_SSLCONTEXT = False
try:
import json
# Detect python-json which is incompatible and fallback to simplejson in
# that case
try:
json.loads
json.dumps
except AttributeError:
raise ImportError
except ImportError:
import simplejson as json
# The distutils module is not shipped with SUNWPython on Solaris.
# It's in the SUNWPython-devel package which also contains development files
# that don't belong on production boxes. Since our Solaris code doesn't
# depend on LooseVersion, do not import it on Solaris.
if platform.system() != 'SunOS':
from distutils.version import LooseVersion
# --------------------------------------------------------------
# timeout function to make sure some fact gathering
# steps do not exceed a time limit
GATHER_TIMEOUT=None
DEFAULT_GATHER_TIMEOUT = 10
class TimeoutError(Exception):
pass
def timeout(seconds=None, error_message="Timer expired"):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
local_seconds = seconds # Make local var as we modify this every time it's invoked
if local_seconds is None:
local_seconds = globals().get('GATHER_TIMEOUT') or DEFAULT_GATHER_TIMEOUT
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(local_seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wrapper
# If we were called as @timeout, then the first parameter will be the
# function we are to wrap instead of the number of seconds. Detect this
# and correct it by setting seconds to our sentinel value and return the
# inner decorator function manually wrapped around the function
if callable(seconds):
func = seconds
seconds = None
return decorator(func)
# If we were called as @timeout([...]) then python itself will take
# care of wrapping the inner decorator around the function
return decorator
# --------------------------------------------------------------
class Facts(object):
"""
This class should only attempt to populate those facts that
are mostly generic to all systems. This includes platform facts,
service facts (e.g. ssh keys or selinux), and distribution facts.
Anything that requires extensive code or may have more than one
possible implementation to establish facts for a given topic should
subclass Facts.
"""
# i86pc is a Solaris and derivatives-ism
_I386RE = re.compile(r'i([3456]86|86pc)')
# For the most part, we assume that platform.dist() will tell the truth.
# This is the fallback to handle unknowns or exceptions
SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
# A list of dicts. If there is a platform with more than one
# package manager, put the preferred one last. If there is an
# ansible module, use that as the value for the 'name' key.
PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' },
{ 'path' : '/usr/bin/dnf', 'name' : 'dnf' },
{ 'path' : '/usr/bin/apt-get', 'name' : 'apt' },
{ 'path' : '/usr/bin/zypper', 'name' : 'zypper' },
{ 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' },
{ 'path' : '/usr/bin/pacman', 'name' : 'pacman' },
{ 'path' : '/bin/opkg', 'name' : 'opkg' },
{ 'path' : '/usr/pkg/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/tools/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/local/bin/port', 'name' : 'macports' },
{ 'path' : '/usr/local/bin/brew', 'name' : 'homebrew' },
{ 'path' : '/sbin/apk', 'name' : 'apk' },
{ 'path' : '/usr/sbin/pkg', 'name' : 'pkgng' },
{ 'path' : '/usr/sbin/swlist', 'name' : 'SD-UX' },
{ 'path' : '/usr/bin/emerge', 'name' : 'portage' },
{ 'path' : '/usr/sbin/pkgadd', 'name' : 'svr4pkg' },
{ 'path' : '/usr/bin/pkg', 'name' : 'pkg5' },
{ 'path' : '/usr/bin/xbps-install','name' : 'xbps' },
{ 'path' : '/usr/local/sbin/pkg', 'name' : 'pkgng' },
{ 'path' : '/usr/bin/swupd', 'name' : 'swupd' },
{ 'path' : '/usr/sbin/sorcery', 'name' : 'sorcery' },
]
def __init__(self, module, load_on_init=True, cached_facts=None):
self.module = module
if not cached_facts:
self.facts = {}
else:
self.facts = cached_facts
### TODO: Eventually, these should all get moved to populate(). But
# some of the values are currently being used by other subclasses (for
# instance, os_family and distribution). Have to sort out what to do
# about those first.
if load_on_init:
self.get_platform_facts()
self.facts.update(Distribution(module).populate())
self.get_cmdline()
self.get_public_ssh_host_keys()
self.get_selinux_facts()
self.get_apparmor_facts()
self.get_caps_facts()
self.get_fips_facts()
self.get_pkg_mgr_facts()
self.get_service_mgr_facts()
self.get_lsb_facts()
self.get_date_time_facts()
self.get_user_facts()
self.get_local_facts()
self.get_env_facts()
self.get_dns_facts()
self.get_python_facts()
def populate(self):
return self.facts
# Platform
# platform.system() can be Linux, Darwin, Java, or Windows
def get_platform_facts(self):
self.facts['system'] = platform.system()
self.facts['kernel'] = platform.release()
self.facts['machine'] = platform.machine()
self.facts['python_version'] = platform.python_version()
self.facts['fqdn'] = socket.getfqdn()
self.facts['hostname'] = platform.node().split('.')[0]
self.facts['nodename'] = platform.node()
self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:])
arch_bits = platform.architecture()[0]
self.facts['userspace_bits'] = arch_bits.replace('bit', '')
if self.facts['machine'] == 'x86_64':
self.facts['architecture'] = self.facts['machine']
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
elif Facts._I386RE.search(self.facts['machine']):
self.facts['architecture'] = 'i386'
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
else:
self.facts['architecture'] = self.facts['machine']
if self.facts['system'] == 'AIX':
# Attempt to use getconf to figure out architecture
# fall back to bootinfo if needed
getconf_bin = self.module.get_bin_path('getconf')
if getconf_bin:
rc, out, err = self.module.run_command([getconf_bin, 'MACHINE_ARCHITECTURE'])
data = out.splitlines()
self.facts['architecture'] = data[0]
else:
bootinfo_bin = self.module.get_bin_path('bootinfo')
rc, out, err = self.module.run_command([bootinfo_bin, '-p'])
data = out.splitlines()
self.facts['architecture'] = data[0]
elif self.facts['system'] == 'OpenBSD':
self.facts['architecture'] = platform.uname()[5]
machine_id = get_file_content("/var/lib/dbus/machine-id") or get_file_content("/etc/machine-id")
if machine_id:
machine_id = machine_id.splitlines()[0]
self.facts["machine_id"] = machine_id
def get_local_facts(self):
fact_path = self.module.params.get('fact_path', None)
if not fact_path or not os.path.exists(fact_path):
return
local = {}
for fn in sorted(glob.glob(fact_path + '/*.fact')):
# where it will sit under local facts
fact_base = os.path.basename(fn).replace('.fact','')
if stat.S_IXUSR & os.stat(fn)[stat.ST_MODE]:
# run it
# try to read it as json first
# if that fails read it with ConfigParser
# if that fails, skip it
try:
rc, out, err = self.module.run_command(fn)
except UnicodeError:
fact = 'error loading fact - output of running %s was not utf-8' % fn
local[fact_base] = fact
self.facts['local'] = local
return
else:
out = get_file_content(fn, default='')
# load raw json
fact = 'loading %s' % fact_base
try:
fact = json.loads(out)
except ValueError:
# load raw ini
cp = configparser.ConfigParser()
try:
cp.readfp(StringIO(out))
except configparser.Error:
fact = "error loading fact - please check content"
else:
fact = {}
for sect in cp.sections():
if sect not in fact:
fact[sect] = {}
for opt in cp.options(sect):
val = cp.get(sect, opt)
fact[sect][opt]=val
local[fact_base] = fact
if not local:
return
self.facts['local'] = local
def get_cmdline(self):
data = get_file_content('/proc/cmdline')
if data:
self.facts['cmdline'] = {}
try:
for piece in shlex.split(data):
item = piece.split('=', 1)
if len(item) == 1:
self.facts['cmdline'][item[0]] = True
else:
self.facts['cmdline'][item[0]] = item[1]
except ValueError:
pass
def get_public_ssh_host_keys(self):
keytypes = ('dsa', 'rsa', 'ecdsa', 'ed25519')
# list of directories to check for ssh keys
# used in the order listed here, the first one with keys is used
keydirs = ['/etc/ssh', '/etc/openssh', '/etc']
for keydir in keydirs:
for type_ in keytypes:
factname = 'ssh_host_key_%s_public' % type_
if factname in self.facts:
# a previous keydir was already successful, stop looking
# for keys
return
key_filename = '%s/ssh_host_%s_key.pub' % (keydir, type_)
keydata = get_file_content(key_filename)
if keydata is not None:
self.facts[factname] = keydata.split()[1]
def get_pkg_mgr_facts(self):
if self.facts['system'] == 'OpenBSD':
self.facts['pkg_mgr'] = 'openbsd_pkg'
else:
self.facts['pkg_mgr'] = 'unknown'
for pkg in Facts.PKG_MGRS:
if os.path.isfile(pkg['path']):
self.facts['pkg_mgr'] = pkg['name']
def get_service_mgr_facts(self):
#TODO: detect more custom init setups like bootscripts, dmd, s6, Epoch, etc
# also other OSs other than linux might need to check across several possible candidates
# Mapping of proc_1 values to more useful names
proc_1_map = {
'procd': 'openwrt_init',
'runit-init': 'runit',
'svscan': 'svc',
'openrc-init': 'openrc',
}
# try various forms of querying pid 1
proc_1 = get_file_content('/proc/1/comm')
if proc_1 is None:
rc, proc_1, err = self.module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True)
# If the output of the command starts with what looks like a PID, then the 'ps' command
# probably didn't work the way we wanted, probably because it's busybox
if re.match(r' *[0-9]+ ', proc_1):
proc_1 = None
# The ps command above may return "COMMAND" if the user cannot read /proc, e.g. with grsecurity
if proc_1 == "COMMAND\n":
proc_1 = None
if proc_1 is not None:
proc_1 = os.path.basename(proc_1)
proc_1 = to_native(proc_1)
proc_1 = proc_1.strip()
if proc_1 is not None and (proc_1 == 'init' or proc_1.endswith('sh')):
# many systems return init, so this cannot be trusted, if it ends in 'sh' it probalby is a shell in a container
proc_1 = None
# if not init/None it should be an identifiable or custom init, so we are done!
if proc_1 is not None:
# Lookup proc_1 value in map and use proc_1 value itself if no match
self.facts['service_mgr'] = proc_1_map.get(proc_1, proc_1)
# start with the easy ones
elif self.facts['distribution'] == 'MacOSX':
#FIXME: find way to query executable, version matching is not ideal
if LooseVersion(platform.mac_ver()[0]) >= LooseVersion('10.4'):
self.facts['service_mgr'] = 'launchd'
else:
self.facts['service_mgr'] = 'systemstarter'
elif 'BSD' in self.facts['system'] or self.facts['system'] in ['Bitrig', 'DragonFly']:
#FIXME: we might want to break out to individual BSDs or 'rc'
self.facts['service_mgr'] = 'bsdinit'
elif self.facts['system'] == 'AIX':
self.facts['service_mgr'] = 'src'
elif self.facts['system'] == 'SunOS':
self.facts['service_mgr'] = 'smf'
elif self.facts['distribution'] == 'OpenWrt':
self.facts['service_mgr'] = 'openwrt_init'
elif self.facts['system'] == 'Linux':
if self.is_systemd_managed():
self.facts['service_mgr'] = 'systemd'
elif self.module.get_bin_path('initctl') and os.path.exists("/etc/init/"):
self.facts['service_mgr'] = 'upstart'
elif os.path.exists('/sbin/openrc'):
self.facts['service_mgr'] = 'openrc'
elif os.path.exists('/etc/init.d/'):
self.facts['service_mgr'] = 'sysvinit'
if not self.facts.get('service_mgr', False):
# if we cannot detect, fallback to generic 'service'
self.facts['service_mgr'] = 'service'
def get_lsb_facts(self):
lsb_path = self.module.get_bin_path('lsb_release')
if lsb_path:
rc, out, err = self.module.run_command([lsb_path, "-a"], errors='surrogate_then_replace')
if rc == 0:
self.facts['lsb'] = {}
for line in out.splitlines():
if len(line) < 1 or ':' not in line:
continue
value = line.split(':', 1)[1].strip()
if 'LSB Version:' in line:
self.facts['lsb']['release'] = value
elif 'Distributor ID:' in line:
self.facts['lsb']['id'] = value
elif 'Description:' in line:
self.facts['lsb']['description'] = value
elif 'Release:' in line:
self.facts['lsb']['release'] = value
elif 'Codename:' in line:
self.facts['lsb']['codename'] = value
elif lsb_path is None and os.path.exists('/etc/lsb-release'):
self.facts['lsb'] = {}
for line in get_file_lines('/etc/lsb-release'):
value = line.split('=',1)[1].strip()
if 'DISTRIB_ID' in line:
self.facts['lsb']['id'] = value
elif 'DISTRIB_RELEASE' in line:
self.facts['lsb']['release'] = value
elif 'DISTRIB_DESCRIPTION' in line:
self.facts['lsb']['description'] = value
elif 'DISTRIB_CODENAME' in line:
self.facts['lsb']['codename'] = value
if 'lsb' in self.facts and 'release' in self.facts['lsb']:
self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
def get_selinux_facts(self):
if not HAVE_SELINUX:
self.facts['selinux'] = False
return
self.facts['selinux'] = {}
if not selinux.is_selinux_enabled():
self.facts['selinux']['status'] = 'disabled'
else:
self.facts['selinux']['status'] = 'enabled'
try:
self.facts['selinux']['policyvers'] = selinux.security_policyvers()
except (AttributeError,OSError):
self.facts['selinux']['policyvers'] = 'unknown'
try:
(rc, configmode) = selinux.selinux_getenforcemode()
if rc == 0:
self.facts['selinux']['config_mode'] = Facts.SELINUX_MODE_DICT.get(configmode, 'unknown')
else:
self.facts['selinux']['config_mode'] = 'unknown'
except (AttributeError,OSError):
self.facts['selinux']['config_mode'] = 'unknown'
try:
mode = selinux.security_getenforce()
self.facts['selinux']['mode'] = Facts.SELINUX_MODE_DICT.get(mode, 'unknown')
except (AttributeError,OSError):
self.facts['selinux']['mode'] = 'unknown'
try:
(rc, policytype) = selinux.selinux_getpolicytype()
if rc == 0:
self.facts['selinux']['type'] = policytype
else:
self.facts['selinux']['type'] = 'unknown'
except (AttributeError,OSError):
self.facts['selinux']['type'] = 'unknown'
def get_apparmor_facts(self):
self.facts['apparmor'] = {}
if os.path.exists('/sys/kernel/security/apparmor'):
self.facts['apparmor']['status'] = 'enabled'
else:
self.facts['apparmor']['status'] = 'disabled'
def get_caps_facts(self):
capsh_path = self.module.get_bin_path('capsh')
if capsh_path:
rc, out, err = self.module.run_command([capsh_path, "--print"], errors='surrogate_then_replace')
enforced_caps = []
enforced = 'NA'
for line in out.splitlines():
if len(line) < 1:
continue
if line.startswith('Current:'):
if line.split(':')[1].strip() == '=ep':
enforced = 'False'
else:
enforced = 'True'
enforced_caps = [i.strip() for i in line.split('=')[1].split(',')]
self.facts['system_capabilities_enforced'] = enforced
self.facts['system_capabilities'] = enforced_caps
def get_fips_facts(self):
self.facts['fips'] = False
data = get_file_content('/proc/sys/crypto/fips_enabled')
if data and data == '1':
self.facts['fips'] = True
def get_date_time_facts(self):
self.facts['date_time'] = {}
now = datetime.datetime.now()
self.facts['date_time']['year'] = now.strftime('%Y')
self.facts['date_time']['month'] = now.strftime('%m')
self.facts['date_time']['weekday'] = now.strftime('%A')
self.facts['date_time']['weekday_number'] = now.strftime('%w')
self.facts['date_time']['weeknumber'] = now.strftime('%W')
self.facts['date_time']['day'] = now.strftime('%d')
self.facts['date_time']['hour'] = now.strftime('%H')
self.facts['date_time']['minute'] = now.strftime('%M')
self.facts['date_time']['second'] = now.strftime('%S')
self.facts['date_time']['epoch'] = now.strftime('%s')
if self.facts['date_time']['epoch'] == '' or self.facts['date_time']['epoch'][0] == '%':
self.facts['date_time']['epoch'] = str(int(time.time()))
self.facts['date_time']['date'] = now.strftime('%Y-%m-%d')
self.facts['date_time']['time'] = now.strftime('%H:%M:%S')
self.facts['date_time']['iso8601_micro'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
self.facts['date_time']['iso8601'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
self.facts['date_time']['iso8601_basic'] = now.strftime("%Y%m%dT%H%M%S%f")
self.facts['date_time']['iso8601_basic_short'] = now.strftime("%Y%m%dT%H%M%S")
self.facts['date_time']['tz'] = time.strftime("%Z")
self.facts['date_time']['tz_offset'] = time.strftime("%z")
def is_systemd_managed(self):
# tools must be installed
if self.module.get_bin_path('systemctl'):
# this should show if systemd is the boot init system, if checking init faild to mark as systemd
# these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html
for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]:
if os.path.exists(canary):
return True
return False
# User
def get_user_facts(self):
self.facts['user_id'] = getpass.getuser()
pwent = pwd.getpwnam(getpass.getuser())
self.facts['user_uid'] = pwent.pw_uid
self.facts['user_gid'] = pwent.pw_gid
self.facts['user_gecos'] = pwent.pw_gecos
self.facts['user_dir'] = pwent.pw_dir
self.facts['user_shell'] = pwent.pw_shell
self.facts['real_user_id'] = os.getuid()
self.facts['effective_user_id'] = os.geteuid()
self.facts['real_group_id'] = os.getgid()
self.facts['effective_group_id'] = os.getgid()
def get_env_facts(self):
self.facts['env'] = {}
for k,v in iteritems(os.environ):
self.facts['env'][k] = v
def get_dns_facts(self):
self.facts['dns'] = {}
for line in get_file_content('/etc/resolv.conf', '').splitlines():
if line.startswith('#') or line.startswith(';') or line.strip() == '':
continue
tokens = line.split()
if len(tokens) == 0:
continue
if tokens[0] == 'nameserver':
if not 'nameservers' in self.facts['dns']:
self.facts['dns']['nameservers'] = []
for nameserver in tokens[1:]:
self.facts['dns']['nameservers'].append(nameserver)
elif tokens[0] == 'domain':
if len(tokens) > 1:
self.facts['dns']['domain'] = tokens[1]
elif tokens[0] == 'search':
self.facts['dns']['search'] = []
for suffix in tokens[1:]:
self.facts['dns']['search'].append(suffix)
elif tokens[0] == 'sortlist':
self.facts['dns']['sortlist'] = []
for address in tokens[1:]:
self.facts['dns']['sortlist'].append(address)
elif tokens[0] == 'options':
self.facts['dns']['options'] = {}
if len(tokens) > 1:
for option in tokens[1:]:
option_tokens = option.split(':', 1)
if len(option_tokens) == 0:
continue
val = len(option_tokens) == 2 and option_tokens[1] or True
self.facts['dns']['options'][option_tokens[0]] = val
def _get_mount_size_facts(self, mountpoint):
size_total = None
size_available = None
try:
statvfs_result = os.statvfs(mountpoint)
size_total = statvfs_result.f_frsize * statvfs_result.f_blocks
size_available = statvfs_result.f_frsize * (statvfs_result.f_bavail)
except OSError:
pass
return size_total, size_available
def get_python_facts(self):
self.facts['python'] = {
'version': {
'major': sys.version_info[0],
'minor': sys.version_info[1],
'micro': sys.version_info[2],
'releaselevel': sys.version_info[3],
'serial': sys.version_info[4]
},
'version_info': list(sys.version_info),
'executable': sys.executable,
'has_sslcontext': HAS_SSLCONTEXT
}
try:
self.facts['python']['type'] = sys.subversion[0]
except AttributeError:
try:
self.facts['python']['type'] = sys.implementation.name
except AttributeError:
self.facts['python']['type'] = None
class Distribution(object):
"""
This subclass of Facts fills the distribution, distribution_version and distribution_release variables
To do so it checks the existence and content of typical files in /etc containing distribution information
This is unit tested. Please extend the tests to cover all distributions if you have them available.
"""
# every distribution name mentioned here, must have one of
# - allowempty == True
# - be listed in SEARCH_STRING
# - have a function get_distribution_DISTNAME implemented
OSDIST_LIST = (
{'path': '/etc/oracle-release', 'name': 'OracleLinux'},
{'path': '/etc/slackware-version', 'name': 'Slackware'},
{'path': '/etc/redhat-release', 'name': 'RedHat'},
{'path': '/etc/vmware-release', 'name': 'VMwareESX', 'allowempty': True},
{'path': '/etc/openwrt_release', 'name': 'OpenWrt'},
{'path': '/etc/system-release', 'name': 'Amazon'},
{'path': '/etc/alpine-release', 'name': 'Alpine'},
{'path': '/etc/arch-release', 'name': 'Archlinux', 'allowempty': True},
{'path': '/etc/os-release', 'name': 'SuSE'},
{'path': '/etc/SuSE-release', 'name': 'SuSE'},
{'path': '/etc/gentoo-release', 'name': 'Gentoo'},
{'path': '/etc/os-release', 'name': 'Debian'},
{'path': '/etc/lsb-release', 'name': 'Mandriva'},
{'path': '/etc/altlinux-release', 'name': 'Altlinux'},
{'path': '/etc/sourcemage-release', 'name': 'SMGL'},
{'path': '/etc/os-release', 'name': 'NA'},
{'path': '/etc/coreos/update.conf', 'name': 'Coreos'},
{'path': '/usr/lib/os-release', 'name': 'ClearLinux'},
)
SEARCH_STRING = {
'OracleLinux': 'Oracle Linux',
'RedHat': 'Red Hat',
'Altlinux': 'ALT Linux',
'ClearLinux': 'Clear Linux',
'SMGL': 'Source Mage GNU/Linux',
}
# A list with OS Family members
OS_FAMILY = dict(
RedHat = 'RedHat', Fedora = 'RedHat', CentOS = 'RedHat', Scientific = 'RedHat',
SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat',
OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat', Virtuozzo = 'RedHat',
XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', Raspbian = 'Debian', Slackware = 'Slackware', SLES = 'Suse',
SLED = 'Suse', openSUSE = 'Suse', openSUSE_Tumbleweed = 'Suse', SuSE = 'Suse', SLES_SAP = 'Suse', SUSE_LINUX = 'Suse', Gentoo = 'Gentoo',
Funtoo = 'Gentoo', Archlinux = 'Archlinux', Manjaro = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake', Altlinux = 'Altlinux', SMGL = 'SMGL',
Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris',
SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin',
FreeBSD = 'FreeBSD', HPUX = 'HP-UX', openSUSE_Leap = 'Suse', Neon = 'Debian'
)
def __init__(self, module):
self.system = platform.system()
self.facts = {}
self.module = module
def populate(self):
self.get_distribution_facts()
return self.facts
def get_distribution_facts(self):
# The platform module provides information about the running
# system/distribution. Use this as a baseline and fix buggy systems
# afterwards
self.facts['distribution'] = self.system
self.facts['distribution_release'] = platform.release()
self.facts['distribution_version'] = platform.version()
systems_implemented = ('AIX', 'HP-UX', 'Darwin', 'FreeBSD', 'OpenBSD', 'SunOS', 'DragonFly', 'NetBSD')
self.facts['distribution'] = self.system
if self.system in systems_implemented:
cleanedname = self.system.replace('-','')
distfunc = getattr(self, 'get_distribution_'+cleanedname)
distfunc()
elif self.system == 'Linux':
# try to find out which linux distribution this is
dist = platform.dist()
self.facts['distribution'] = dist[0].capitalize() or 'NA'
self.facts['distribution_version'] = dist[1] or 'NA'
self.facts['distribution_major_version'] = dist[1].split('.')[0] or 'NA'
self.facts['distribution_release'] = dist[2] or 'NA'
# Try to handle the exceptions now ...
# self.facts['distribution_debug'] = []
for ddict in self.OSDIST_LIST:
name = ddict['name']
path = ddict['path']
if not os.path.exists(path):
continue
# if allowempty is set, we only check for file existance but not content
if 'allowempty' in ddict and ddict['allowempty']:
self.facts['distribution'] = name
break
if os.path.getsize(path) == 0:
continue
data = get_file_content(path)
if name in self.SEARCH_STRING:
# look for the distribution string in the data and replace according to RELEASE_NAME_MAP
# only the distribution name is set, the version is assumed to be correct from platform.dist()
if self.SEARCH_STRING[name] in data:
# this sets distribution=RedHat if 'Red Hat' shows up in data
self.facts['distribution'] = name
else:
# this sets distribution to what's in the data, e.g. CentOS, Scientific, ...
self.facts['distribution'] = data.split()[0]
break
else:
# call a dedicated function for parsing the file content
try:
distfunc = getattr(self, 'get_distribution_' + name)
parsed = distfunc(name, data, path)
if parsed is None or parsed:
# distfunc return False if parsing failed
# break only if parsing was succesful
# otherwise continue with other distributions
break
except AttributeError:
# this should never happen, but if it does fail quitely and not with a traceback
pass
# to debug multiple matching release files, one can use:
# self.facts['distribution_debug'].append({path + ' ' + name:
# (parsed,
# self.facts['distribution'],
# self.facts['distribution_version'],
# self.facts['distribution_release'],
# )})
self.facts['os_family'] = self.facts['distribution']
distro = self.facts['distribution'].replace(' ', '_')
if distro in self.OS_FAMILY:
self.facts['os_family'] = self.OS_FAMILY[distro]
def get_distribution_AIX(self):
rc, out, err = self.module.run_command("/usr/bin/oslevel")
data = out.split('.')
self.facts['distribution_version'] = data[0]
self.facts['distribution_release'] = data[1]
def get_distribution_HPUX(self):
rc, out, err = self.module.run_command("/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True)
data = re.search('HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out)
if data:
self.facts['distribution_version'] = data.groups()[0]
self.facts['distribution_release'] = data.groups()[1]
def get_distribution_Darwin(self):
self.facts['distribution'] = 'MacOSX'
rc, out, err = self.module.run_command("/usr/bin/sw_vers -productVersion")
data = out.split()[-1]
self.facts['distribution_version'] = data
def get_distribution_FreeBSD(self):
self.facts['distribution_release'] = platform.release()
data = re.search('(\d+)\.(\d+)-RELEASE.*', self.facts['distribution_release'])
if data:
self.facts['distribution_major_version'] = data.group(1)
self.facts['distribution_version'] = '%s.%s' % (data.group(1), data.group(2))
def get_distribution_OpenBSD(self):
self.facts['distribution_version'] = platform.release()
rc, out, err = self.module.run_command("/sbin/sysctl -n kern.version")
match = re.match('OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out)
if match:
self.facts['distribution_release'] = match.groups()[0]
else:
self.facts['distribution_release'] = 'release'
def get_distribution_DragonFly(self):
pass
def get_distribution_NetBSD(self):
self.facts['distribution_major_version'] = self.facts['distribution_release'].split('.')[0]
def get_distribution_Slackware(self, name, data, path):
if 'Slackware' not in data:
return False # TODO: remove
self.facts['distribution'] = name
version = re.findall('\w+[.]\w+', data)
if version:
self.facts['distribution_version'] = version[0]
def get_distribution_Amazon(self, name, data, path):
if 'Amazon' not in data:
return False # TODO: remove
self.facts['distribution'] = 'Amazon'
self.facts['distribution_version'] = data.split()[-1]
def get_distribution_OpenWrt(self, name, data, path):
if 'OpenWrt' not in data:
return False # TODO: remove
self.facts['distribution'] = name
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
self.facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
self.facts['distribution_release'] = release.groups()[0]
def get_distribution_Alpine(self, name, data, path):
self.facts['distribution'] = 'Alpine'
self.facts['distribution_version'] = data
def get_distribution_SMGL(self):
self.facts['distribution'] = 'Source Mage GNU/Linux'
def get_distribution_SunOS(self):
data = get_file_content('/etc/release').splitlines()[0]
if 'Solaris' in data:
ora_prefix = ''
if 'Oracle Solaris' in data:
data = data.replace('Oracle ','')
ora_prefix = 'Oracle '
self.facts['distribution'] = data.split()[0]
self.facts['distribution_version'] = data.split()[1]
self.facts['distribution_release'] = ora_prefix + data
return
uname_v = get_uname_version(self.module)
distribution_version = None
if 'SmartOS' in data:
self.facts['distribution'] = 'SmartOS'
if os.path.exists('/etc/product'):
product_data = dict([l.split(': ', 1) for l in get_file_content('/etc/product').splitlines() if ': ' in l])
if 'Image' in product_data:
distribution_version = product_data.get('Image').split()[-1]
elif 'OpenIndiana' in data:
self.facts['distribution'] = 'OpenIndiana'
elif 'OmniOS' in data:
self.facts['distribution'] = 'OmniOS'
distribution_version = data.split()[-1]
elif uname_v is not None and 'NexentaOS_' in uname_v:
self.facts['distribution'] = 'Nexenta'
distribution_version = data.split()[-1].lstrip('v')
if self.facts['distribution'] in ('SmartOS', 'OpenIndiana', 'OmniOS', 'Nexenta'):
self.facts['distribution_release'] = data.strip()
if distribution_version is not None:
self.facts['distribution_version'] = distribution_version
elif uname_v is not None:
self.facts['distribution_version'] = uname_v.splitlines()[0].strip()
return
return False # TODO: remove if tested without this
def get_distribution_SuSE(self, name, data, path):
if 'suse' not in data.lower():
return False # TODO: remove if tested without this
if path == '/etc/os-release':
for line in data.splitlines():
distribution = re.search("^NAME=(.*)", line)
if distribution:
self.facts['distribution'] = distribution.group(1).strip('"')
# example pattern are 13.04 13.0 13
distribution_version = re.search('^VERSION_ID="?([0-9]+\.?[0-9]*)"?', line)
if distribution_version:
self.facts['distribution_version'] = distribution_version.group(1)
if 'open' in data.lower():
release = re.search('^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line)
if release:
self.facts['distribution_release'] = release.groups()[0]
elif 'enterprise' in data.lower() and 'VERSION_ID' in line:
# SLES doesn't got funny release names
release = re.search('^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line)
if release.group(1):
release = release.group(1)
else:
release = "0" # no minor number, so it is the first release
self.facts['distribution_release'] = release
elif path == '/etc/SuSE-release':
if 'open' in data.lower():
data = data.splitlines()
distdata = get_file_content(path).splitlines()[0]
self.facts['distribution'] = distdata.split()[0]
for line in data:
release = re.search('CODENAME *= *([^\n]+)', line)
if release:
self.facts['distribution_release'] = release.groups()[0].strip()
elif 'enterprise' in data.lower():
lines = data.splitlines()
distribution = lines[0].split()[0]
if "Server" in data:
self.facts['distribution'] = "SLES"
elif "Desktop" in data:
self.facts['distribution'] = "SLED"
for line in lines:
release = re.search('PATCHLEVEL = ([0-9]+)', line) # SLES doesn't got funny release names
if release:
self.facts['distribution_release'] = release.group(1)
self.facts['distribution_version'] = self.facts['distribution_version'] + '.' + release.group(1)
def get_distribution_Debian(self, name, data, path):
if 'Debian' in data or 'Raspbian' in data:
self.facts['distribution'] = 'Debian'
release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
if release:
self.facts['distribution_release'] = release.groups()[0]
# Last resort: try to find release from tzdata as either lsb is missing or this is very old debian
if self.facts['distribution_release'] == 'NA' and 'Debian' in data:
dpkg_cmd = self.module.get_bin_path('dpkg')
if dpkg_cmd:
cmd = "%s --status tzdata|grep Provides|cut -f2 -d'-'" % dpkg_cmd
rc, out, err = self.module.run_command(cmd)
if rc == 0:
self.facts['distribution_release'] = out.strip()
elif 'Ubuntu' in data:
self.facts['distribution'] = 'Ubuntu'
# nothing else to do, Ubuntu gets correct info from python functions
else:
return False
def get_distribution_Mandriva(self, name, data, path):
if 'Mandriva' in data:
self.facts['distribution'] = 'Mandriva'
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
self.facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
self.facts['distribution_release'] = release.groups()[0]
self.facts['distribution'] = name
else:
return False
def get_distribution_NA(self, name, data, path):
for line in data.splitlines():
distribution = re.search("^NAME=(.*)", line)
if distribution and self.facts['distribution'] == 'NA':
self.facts['distribution'] = distribution.group(1).strip('"')
version = re.search("^VERSION=(.*)", line)
if version and self.facts['distribution_version'] == 'NA':
self.facts['distribution_version'] = version.group(1).strip('"')
def get_distribution_Coreos(self, name, data, path):
if self.facts['distribution'].lower() == 'coreos':
if not data:
# include fix from #15230, #15228
return
release = re.search("^GROUP=(.*)", data)
if release:
self.facts['distribution_release'] = release.group(1).strip('"')
else:
return False # TODO: remove if tested without this
class Hardware(Facts):
"""
This is a generic Hardware subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this, it
should define:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
All subclasses MUST define platform.
"""
platform = 'Generic'
def __new__(cls, *arguments, **keyword):
# When Hardware is created, it chooses a subclass to create instead.
# This check prevents the subclass from then trying to find a subclass
# and create that.
if cls is not Hardware:
return super(Hardware, cls).__new__(cls)
subclass = cls
for sc in get_all_subclasses(Hardware):
if sc.platform == platform.system():
subclass = sc
if PY3:
return super(cls, subclass).__new__(subclass)
else:
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def populate(self):
return self.facts
def get_sysctl(self, prefixes):
sysctl_cmd = self.module.get_bin_path('sysctl')
cmd = [sysctl_cmd]
cmd.extend(prefixes)
rc, out, err = self.module.run_command(cmd)
if rc != 0:
return dict()
sysctl = dict()
for line in out.splitlines():
if not line:
continue
(key, value) = re.split('\s?=\s?|: ', line, maxsplit=1)
sysctl[key] = value.strip()
return sysctl
class LinuxHardware(Hardware):
"""
Linux-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
In addition, it also defines number of DMI facts and device facts.
"""
platform = 'Linux'
# Originally only had these four as toplevelfacts
ORIGINAL_MEMORY_FACTS = frozenset(('MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'))
# Now we have all of these in a dict structure
MEMORY_FACTS = ORIGINAL_MEMORY_FACTS.union(('Buffers', 'Cached', 'SwapCached'))
# regex used against findmnt output to detect bind mounts
BIND_MOUNT_RE = re.compile(r'.*\]')
# regex used against mtab content to find entries that are bind mounts
MTAB_BIND_MOUNT_RE = re.compile(r'.*bind.*"')
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
self.get_uptime_facts()
self.get_lvm_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_memory_facts(self):
if not os.access("/proc/meminfo", os.R_OK):
return
memstats = {}
for line in get_file_lines("/proc/meminfo"):
data = line.split(":", 1)
key = data[0]
if key in self.ORIGINAL_MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
self.facts["%s_mb" % key.lower()] = int(val) // 1024
if key in self.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
memstats[key.lower()] = int(val) // 1024
if None not in (memstats.get('memtotal'), memstats.get('memfree')):
memstats['real:used'] = memstats['memtotal'] - memstats['memfree']
if None not in (memstats.get('cached'), memstats.get('memfree'), memstats.get('buffers')):
memstats['nocache:free'] = memstats['cached'] + memstats['memfree'] + memstats['buffers']
if None not in (memstats.get('memtotal'), memstats.get('nocache:free')):
memstats['nocache:used'] = memstats['memtotal'] - memstats['nocache:free']
if None not in (memstats.get('swaptotal'), memstats.get('swapfree')):
memstats['swap:used'] = memstats['swaptotal'] - memstats['swapfree']
self.facts['memory_mb'] = {
'real' : {
'total': memstats.get('memtotal'),
'used': memstats.get('real:used'),
'free': memstats.get('memfree'),
},
'nocache' : {
'free': memstats.get('nocache:free'),
'used': memstats.get('nocache:used'),
},
'swap' : {
'total': memstats.get('swaptotal'),
'free': memstats.get('swapfree'),
'used': memstats.get('swap:used'),
'cached': memstats.get('swapcached'),
},
}
def get_cpu_facts(self):
i = 0
vendor_id_occurrence = 0
model_name_occurrence = 0
physid = 0
coreid = 0
sockets = {}
cores = {}
xen = False
xen_paravirt = False
try:
if os.path.exists('/proc/xen'):
xen = True
else:
for line in get_file_lines('/sys/hypervisor/type'):
if line.strip() == 'xen':
xen = True
# Only interested in the first line
break
except IOError:
pass
if not os.access("/proc/cpuinfo", os.R_OK):
return
self.facts['processor'] = []
for line in get_file_lines('/proc/cpuinfo'):
data = line.split(":", 1)
key = data[0].strip()
if xen:
if key == 'flags':
# Check for vme cpu flag, Xen paravirt does not expose this.
# Need to detect Xen paravirt because it exposes cpuinfo
# differently than Xen HVM or KVM and causes reporting of
# only a single cpu core.
if 'vme' not in data:
xen_paravirt = True
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key in ['model name', 'Processor', 'vendor_id', 'cpu', 'Vendor']:
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(data[1].strip())
if key == 'vendor_id':
vendor_id_occurrence += 1
if key == 'model name':
model_name_occurrence += 1
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'core id':
coreid = data[1].strip()
if coreid not in sockets:
cores[coreid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
elif key == 'siblings':
cores[coreid] = int(data[1].strip())
elif key == '# processors':
self.facts['processor_cores'] = int(data[1].strip())
# Skip for platforms without vendor_id/model_name in cpuinfo (e.g ppc64le)
if vendor_id_occurrence > 0:
if vendor_id_occurrence == model_name_occurrence:
i = vendor_id_occurrence
if self.facts['architecture'] != 's390x':
if xen_paravirt:
self.facts['processor_count'] = i
self.facts['processor_cores'] = i
self.facts['processor_threads_per_core'] = 1
self.facts['processor_vcpus'] = i
else:
if sockets:
self.facts['processor_count'] = len(sockets)
else:
self.facts['processor_count'] = i
socket_values = list(sockets.values())
if socket_values and socket_values[0]:
self.facts['processor_cores'] = socket_values[0]
else:
self.facts['processor_cores'] = 1
core_values = list(cores.values())
if core_values:
self.facts['processor_threads_per_core'] = core_values[0] // self.facts['processor_cores']
else:
self.facts['processor_threads_per_core'] = 1 // self.facts['processor_cores']
self.facts['processor_vcpus'] = (self.facts['processor_threads_per_core'] *
self.facts['processor_count'] * self.facts['processor_cores'])
def get_dmi_facts(self):
''' learn dmi facts from system
Try /sys first for dmi related facts.
If that is not available, fall back to dmidecode executable '''
if os.path.exists('/sys/devices/virtual/dmi/id/product_name'):
# Use kernel DMI info, if available
# DMI SPEC -- http://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.0.pdf
FORM_FACTOR = [ "Unknown", "Other", "Unknown", "Desktop",
"Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
"Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
"All In One", "Sub Notebook", "Space-saving", "Lunch Box",
"Main Server Chassis", "Expansion Chassis", "Sub Chassis",
"Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
"Rack Mount Chassis", "Sealed-case PC", "Multi-system",
"CompactPCI", "AdvancedTCA", "Blade" ]
DMI_DICT = {
'bios_date': '/sys/devices/virtual/dmi/id/bios_date',
'bios_version': '/sys/devices/virtual/dmi/id/bios_version',
'form_factor': '/sys/devices/virtual/dmi/id/chassis_type',
'product_name': '/sys/devices/virtual/dmi/id/product_name',
'product_serial': '/sys/devices/virtual/dmi/id/product_serial',
'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid',
'product_version': '/sys/devices/virtual/dmi/id/product_version',
'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor'
}
for (key,path) in DMI_DICT.items():
data = get_file_content(path)
if data is not None:
if key == 'form_factor':
try:
self.facts['form_factor'] = FORM_FACTOR[int(data)]
except IndexError:
self.facts['form_factor'] = 'unknown (%s)' % data
else:
self.facts[key] = data
else:
self.facts[key] = 'NA'
else:
# Fall back to using dmidecode, if available
dmi_bin = self.module.get_bin_path('dmidecode')
DMI_DICT = {
'bios_date': 'bios-release-date',
'bios_version': 'bios-version',
'form_factor': 'chassis-type',
'product_name': 'system-product-name',
'product_serial': 'system-serial-number',
'product_uuid': 'system-uuid',
'product_version': 'system-version',
'system_vendor': 'system-manufacturer'
}
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
thisvalue = ''.join([ line for line in out.splitlines() if not line.startswith('#') ])
try:
json.dumps(thisvalue)
except UnicodeDecodeError:
thisvalue = "NA"
self.facts[k] = thisvalue
else:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
def _run_lsblk(self, lsblk_path):
# call lsblk and collect all uuids
# --exclude 2 makes lsblk ignore floppy disks, which are slower to answer than typical timeouts
# this uses the linux major device number
# for details see https://www.kernel.org/doc/Documentation/devices.txt
args = ['--list', '--noheadings', '--paths', '--output', 'NAME,UUID', '--exclude', '2']
cmd = [lsblk_path] + args
rc, out, err = self.module.run_command(cmd)
return rc, out, err
def _lsblk_uuid(self):
uuids = {}
lsblk_path = self.module.get_bin_path("lsblk")
if not lsblk_path:
return uuids
rc, out, err = self._run_lsblk(lsblk_path)
if rc != 0:
return uuids
# each line will be in format:
# <devicename><some whitespace><uuid>
# /dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
for lsblk_line in out.splitlines():
if not lsblk_line:
continue
line = lsblk_line.strip()
fields = line.rsplit(None, 1)
if len(fields) < 2:
continue
device_name, uuid = fields[0].strip(), fields[1].strip()
if device_name in uuids:
continue
uuids[device_name] = uuid
return uuids
def _run_findmnt(self, findmnt_path):
args = ['--list', '--noheadings', '--notruncate']
cmd = [findmnt_path] + args
rc, out, err = self.module.run_command(cmd, errors='surrogate_then_replace')
return rc, out, err
def _find_bind_mounts(self):
bind_mounts = set()
findmnt_path = self.module.get_bin_path("findmnt")
if not findmnt_path:
return bind_mounts
rc, out, err = self._run_findmnt(findmnt_path)
if rc != 0:
return bind_mounts
# find bind mounts, in case /etc/mtab is a symlink to /proc/mounts
for line in out.splitlines():
fields = line.split()
# fields[0] is the TARGET, fields[1] is the SOURCE
if len(fields) < 2:
continue
# bind mounts will have a [/directory_name] in the SOURCE column
if self.BIND_MOUNT_RE.match(fields[1]):
bind_mounts.add(fields[0])
return bind_mounts
def _mtab_entries(self):
mtab_file = '/etc/mtab'
if not os.path.exists(mtab_file):
mtab_file = '/proc/mounts'
mtab = get_file_content(mtab_file, '')
mtab_entries = []
for line in mtab.splitlines():
fields = line.split()
if len(fields) < 4:
continue
mtab_entries.append(fields)
return mtab_entries
@timeout()
def get_mount_facts(self):
self.facts['mounts'] = []
bind_mounts = self._find_bind_mounts()
uuids = self._lsblk_uuid()
mtab_entries = self._mtab_entries()
mounts = []
for fields in mtab_entries:
device, mount, fstype, options = fields[0], fields[1], fields[2], fields[3]
if not device.startswith('/') and ':/' not in device:
continue
if fstype == 'none':
continue
size_total, size_available = self._get_mount_size_facts(mount)
if mount in bind_mounts:
# only add if not already there, we might have a plain /etc/mtab
if not self.MTAB_BIND_MOUNT_RE.match(options):
options += ",bind"
mount_info = {'mount': mount,
'device': device,
'fstype': fstype,
'options': options,
# statvfs data
'size_total': size_total,
'size_available': size_available,
'uuid': uuids.get(device, 'N/A')}
mounts.append(mount_info)
self.facts['mounts'] = mounts
def get_holders(self, block_dev_dict, sysdir):
block_dev_dict['holders'] = []
if os.path.isdir(sysdir + "/holders"):
for folder in os.listdir(sysdir + "/holders"):
if not folder.startswith("dm-"):
continue
name = get_file_content(sysdir + "/holders/" + folder + "/dm/name")
if name:
block_dev_dict['holders'].append(name)
else:
block_dev_dict['holders'].append(folder)
def get_device_facts(self):
self.facts['devices'] = {}
lspci = self.module.get_bin_path('lspci')
if lspci:
rc, pcidata, err = self.module.run_command([lspci, '-D'], errors='surrogate_then_replace')
else:
pcidata = None
try:
block_devs = os.listdir("/sys/block")
except OSError:
return
devs_wwn = {}
try:
devs_by_id = os.listdir("/dev/disk/by-id")
except OSError:
pass
else:
for link_name in devs_by_id:
if link_name.startswith("wwn-"):
try:
wwn_link = os.readlink(os.path.join("/dev/disk/by-id", link_name))
except OSError:
continue
devs_wwn[os.path.basename(wwn_link)] = link_name[4:]
for block in block_devs:
virtual = 1
sysfs_no_links = 0
try:
path = os.readlink(os.path.join("/sys/block/", block))
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.EINVAL:
path = block
sysfs_no_links = 1
else:
continue
if "virtual" in path:
continue
sysdir = os.path.join("/sys/block", path)
if sysfs_no_links == 1:
for folder in os.listdir(sysdir):
if "device" in folder:
virtual = 0
break
if virtual:
continue
d = {}
diskname = os.path.basename(sysdir)
for key in ['vendor', 'model', 'sas_address', 'sas_device_handle']:
d[key] = get_file_content(sysdir + "/device/" + key)
sg_inq = self.module.get_bin_path('sg_inq')
if sg_inq:
device = "/dev/%s" % (block)
rc, drivedata, err = self.module.run_command([sg_inq, device])
if rc == 0:
serial = re.search("Unit serial number:\s+(\w+)", drivedata)
if serial:
d['serial'] = serial.group(1)
for key in ['vendor', 'model']:
d[key] = get_file_content(sysdir + "/device/" + key)
for key,test in [ ('removable','/removable'), \
('support_discard','/queue/discard_granularity'),
]:
d[key] = get_file_content(sysdir + test)
if diskname in devs_wwn:
d['wwn'] = devs_wwn[diskname]
d['partitions'] = {}
for folder in os.listdir(sysdir):
m = re.search("(" + diskname + "\d+)", folder)
if m:
part = {}
partname = m.group(1)
part_sysdir = sysdir + "/" + partname
part['start'] = get_file_content(part_sysdir + "/start",0)
part['sectors'] = get_file_content(part_sysdir + "/size",0)
part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size")
if not part['sectorsize']:
part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size",512)
part['size'] = self.module.pretty_bytes((float(part['sectors']) * float(part['sectorsize'])))
part['uuid'] = get_partition_uuid(partname)
self.get_holders(part, part_sysdir)
d['partitions'][partname] = part
d['rotational'] = get_file_content(sysdir + "/queue/rotational")
d['scheduler_mode'] = ""
scheduler = get_file_content(sysdir + "/queue/scheduler")
if scheduler is not None:
m = re.match(".*?(\[(.*)\])", scheduler)
if m:
d['scheduler_mode'] = m.group(2)
d['sectors'] = get_file_content(sysdir + "/size")
if not d['sectors']:
d['sectors'] = 0
d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size")
if not d['sectorsize']:
d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size",512)
d['size'] = self.module.pretty_bytes(float(d['sectors']) * float(d['sectorsize']))
d['host'] = ""
# domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7).
m = re.match(".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir)
if m and pcidata:
pciid = m.group(1)
did = re.escape(pciid)
m = re.search("^" + did + "\s(.*)$", pcidata, re.MULTILINE)
if m:
d['host'] = m.group(1)
self.get_holders(d, sysdir)
self.facts['devices'][diskname] = d
def get_uptime_facts(self):
uptime_file_content = get_file_content('/proc/uptime')
if uptime_file_content:
uptime_seconds_string = uptime_file_content.split(' ')[0]
self.facts['uptime_seconds'] = int(float(uptime_seconds_string))
def _find_mapper_device_name(self, dm_device):
dm_prefix = '/dev/dm-'
mapper_device = dm_device
if dm_device.startswith(dm_prefix):
dmsetup_cmd = self.module.get_bin_path('dmsetup', True)
mapper_prefix = '/dev/mapper/'
rc, dm_name, err = self.module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
if rc == 0:
mapper_device = mapper_prefix + dm_name.rstrip()
return mapper_device
def get_lvm_facts(self):
""" Get LVM Facts if running as root and lvm utils are available """
if os.getuid() == 0 and self.module.get_bin_path('vgs'):
lvm_util_options = '--noheadings --nosuffix --units g'
vgs_path = self.module.get_bin_path('vgs')
#vgs fields: VG #PV #LV #SN Attr VSize VFree
vgs={}
if vgs_path:
rc, vg_lines, err = self.module.run_command( '%s %s' % (vgs_path, lvm_util_options))
for vg_line in vg_lines.splitlines():
items = vg_line.split()
vgs[items[0]] = {'size_g':items[-2],
'free_g':items[-1],
'num_lvs': items[2],
'num_pvs': items[1]}
lvs_path = self.module.get_bin_path('lvs')
#lvs fields:
#LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
lvs = {}
if lvs_path:
rc, lv_lines, err = self.module.run_command( '%s %s' % (lvs_path, lvm_util_options))
for lv_line in lv_lines.splitlines():
items = lv_line.split()
lvs[items[0]] = {'size_g': items[3], 'vg': items[1]}
pvs_path = self.module.get_bin_path('pvs')
#pvs fields: PV VG #Fmt #Attr PSize PFree
pvs = {}
if pvs_path:
rc, pv_lines, err = self.module.run_command( '%s %s' % (pvs_path, lvm_util_options))
for pv_line in pv_lines.splitlines():
items = pv_line.split()
pvs[self._find_mapper_device_name(items[0])] = {
'size_g': items[4],
'free_g': items[5],
'vg': items[1]}
self.facts['lvm'] = {'lvs': lvs, 'vgs': vgs, 'pvs': pvs}
class SunOSHardware(Hardware):
"""
In addition to the generic memory and cpu facts, this also sets
swap_reserved_mb and swap_allocated_mb that is available from *swap -s*.
"""
platform = 'SunOS'
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
self.get_uptime_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
physid = 0
sockets = {}
rc, out, err = self.module.run_command("/usr/bin/kstat cpu_info")
self.facts['processor'] = []
for line in out.splitlines():
if len(line) < 1:
continue
data = line.split(None, 1)
key = data[0].strip()
# "brand" works on Solaris 10 & 11. "implementation" for Solaris 9.
if key == 'module:':
brand = ''
elif key == 'brand':
brand = data[1].strip()
elif key == 'clock_MHz':
clock_mhz = data[1].strip()
elif key == 'implementation':
processor = brand or data[1].strip()
# Add clock speed to description for SPARC CPU
if self.facts['machine'] != 'i86pc':
processor += " @ " + clock_mhz + "MHz"
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(processor)
elif key == 'chip_id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
else:
sockets[physid] += 1
# Counting cores on Solaris can be complicated.
# https://blogs.oracle.com/mandalika/entry/solaris_show_me_the_cpu
# Treat 'processor_count' as physical sockets and 'processor_cores' as
# virtual CPUs visisble to Solaris. Not a true count of cores for modern SPARC as
# these processors have: sockets -> cores -> threads/virtual CPU.
if len(sockets) > 0:
self.facts['processor_count'] = len(sockets)
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
self.facts['processor_cores'] = 'NA'
self.facts['processor_count'] = len(self.facts['processor'])
def get_memory_facts(self):
rc, out, err = self.module.run_command(["/usr/sbin/prtconf"])
for line in out.splitlines():
if 'Memory size' in line:
self.facts['memtotal_mb'] = int(line.split()[2])
rc, out, err = self.module.run_command("/usr/sbin/swap -s")
allocated = int(out.split()[1][:-1])
reserved = int(out.split()[5][:-1])
used = int(out.split()[8][:-1])
free = int(out.split()[10][:-1])
self.facts['swapfree_mb'] = free // 1024
self.facts['swaptotal_mb'] = (free + used) // 1024
self.facts['swap_allocated_mb'] = allocated // 1024
self.facts['swap_reserved_mb'] = reserved // 1024
@timeout()
def get_mount_facts(self):
self.facts['mounts'] = []
# For a detailed format description see mnttab(4)
# special mount_point fstype options time
fstab = get_file_content('/etc/mnttab')
if fstab:
for line in fstab.splitlines():
fields = line.split('\t')
size_total, size_available = self._get_mount_size_facts(fields[1])
self.facts['mounts'].append({
'mount': fields[1],
'device': fields[0],
'fstype' : fields[2],
'options': fields[3],
'time': fields[4],
'size_total': size_total,
'size_available': size_available
})
def get_dmi_facts(self):
uname_path = self.module.get_bin_path("prtdiag")
rc, out, err = self.module.run_command(uname_path)
"""
rc returns 1
"""
if out:
system_conf = out.split('\n')[0]
found = re.search(r'(\w+\sEnterprise\s\w+)',system_conf)
if found:
self.facts['product_name'] = found.group(1)
def get_device_facts(self):
# Device facts are derived for sdderr kstats. This code does not use the
# full output, but rather queries for specific stats.
# Example output:
# sderr:0:sd0,err:Hard Errors 0
# sderr:0:sd0,err:Illegal Request 6
# sderr:0:sd0,err:Media Error 0
# sderr:0:sd0,err:Predictive Failure Analysis 0
# sderr:0:sd0,err:Product VBOX HARDDISK 9
# sderr:0:sd0,err:Revision 1.0
# sderr:0:sd0,err:Serial No VB0ad2ec4d-074a
# sderr:0:sd0,err:Size 53687091200
# sderr:0:sd0,err:Soft Errors 0
# sderr:0:sd0,err:Transport Errors 0
# sderr:0:sd0,err:Vendor ATA
self.facts['devices'] = {}
disk_stats = {
'Product': 'product',
'Revision': 'revision',
'Serial No': 'serial',
'Size': 'size',
'Vendor': 'vendor',
'Hard Errors': 'hard_errors',
'Soft Errors': 'soft_errors',
'Transport Errors': 'transport_errors',
'Media Error': 'media_errors',
'Predictive Failure Analysis': 'predictive_failure_analysis',
'Illegal Request': 'illegal_request',
}
cmd = ['/usr/bin/kstat', '-p']
for ds in disk_stats:
cmd.append('sderr:::%s' % ds)
d = {}
rc, out, err = self.module.run_command(cmd)
if rc != 0:
return dict()
sd_instances = frozenset(line.split(':')[1] for line in out.split('\n') if line.startswith('sderr'))
for instance in sd_instances:
lines = (line for line in out.split('\n') if ':' in line and line.split(':')[1] == instance)
for line in lines:
text, value = line.split('\t')
stat = text.split(':')[3]
if stat == 'Size':
d[disk_stats.get(stat)] = self.module.pretty_bytes(float(value))
else:
d[disk_stats.get(stat)] = value.rstrip()
diskname = 'sd' + instance
self.facts['devices'][diskname] = d
d = {}
def get_uptime_facts(self):
# On Solaris, unix:0:system_misc:snaptime is created shortly after machine boots up
# and displays tiem in seconds. This is much easier than using uptime as we would
# need to have a parsing procedure for translating from human-readable to machine-readable
# format.
# Example output:
# unix:0:system_misc:snaptime 1175.410463590
rc, out, err = self.module.run_command('/usr/bin/kstat -p unix:0:system_misc:snaptime')
if rc != 0:
return
self.facts['uptime_seconds'] = int(float(out.split('\t')[1]))
class OpenBSDHardware(Hardware):
"""
OpenBSD-specific subclass of Hardware. Defines memory, CPU and device facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- processor_speed
In addition, it also defines number of DMI facts and device facts.
"""
platform = 'OpenBSD'
def populate(self):
self.sysctl = self.get_sysctl(['hw'])
self.get_memory_facts()
self.get_processor_facts()
self.get_device_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
self.get_dmi_facts()
return self.facts
@timeout()
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.splitlines():
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ', line).split()
if fields[1] == 'none' or fields[3] == 'xx':
continue
size_total, size_available = self._get_mount_size_facts(fields[1])
self.facts['mounts'].append({
'mount': fields[1],
'device': fields[0],
'fstype' : fields[2],
'options': fields[3],
'size_total': size_total,
'size_available': size_available
})
def get_memory_facts(self):
# Get free memory. vmstat output looks like:
# procs memory page disks traps cpu
# r b w avm fre flt re pi po fr sr wd0 fd0 int sys cs us sy id
# 0 0 0 47512 28160 51 0 0 0 0 0 1 0 116 89 17 0 1 99
rc, out, err = self.module.run_command("/usr/bin/vmstat")
if rc == 0:
self.facts['memfree_mb'] = int(out.splitlines()[-1].split()[4]) // 1024
self.facts['memtotal_mb'] = int(self.sysctl['hw.usermem']) // 1024 // 1024
# Get swapctl info. swapctl output looks like:
# total: 69268 1K-blocks allocated, 0 used, 69268 available
# And for older OpenBSD:
# total: 69268k bytes allocated = 0k used, 69268k available
rc, out, err = self.module.run_command("/sbin/swapctl -sk")
if rc == 0:
swaptrans = { ord(u'k'): None, ord(u'm'): None, ord(u'g'): None}
data = to_text(out, errors='surrogate_or_strict').split()
self.facts['swapfree_mb'] = int(data[-2].translate(swaptrans)) // 1024
self.facts['swaptotal_mb'] = int(data[1].translate(swaptrans)) // 1024
def get_processor_facts(self):
processor = []
for i in range(int(self.sysctl['hw.ncpu'])):
processor.append(self.sysctl['hw.model'])
self.facts['processor'] = processor
# The following is partly a lie because there is no reliable way to
# determine the number of physical CPUs in the system. We can only
# query the number of logical CPUs, which hides the number of cores.
# On amd64/i386 we could try to inspect the smt/core/package lines in
# dmesg, however even those have proven to be unreliable.
# So take a shortcut and report the logical number of processors in
# 'processor_count' and 'processor_cores' and leave it at that.
self.facts['processor_count'] = self.sysctl['hw.ncpu']
self.facts['processor_cores'] = self.sysctl['hw.ncpu']
def get_device_facts(self):
devices = []
devices.extend(self.sysctl['hw.disknames'].split(','))
self.facts['devices'] = devices
def get_dmi_facts(self):
# We don't use dmidecode(1) here because:
# - it would add dependency on an external package
# - dmidecode(1) can only be ran as root
# So instead we rely on sysctl(8) to provide us the information on a
# best-effort basis. As a bonus we also get facts on non-amd64/i386
# platforms this way.
sysctl_to_dmi = {
'hw.product': 'product_name',
'hw.version': 'product_version',
'hw.uuid': 'product_uuid',
'hw.serialno': 'product_serial',
'hw.vendor': 'system_vendor',
}
for mib in sysctl_to_dmi:
if mib in self.sysctl:
self.facts[sysctl_to_dmi[mib]] = self.sysctl[mib]
class FreeBSDHardware(Hardware):
"""
FreeBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
"""
platform = 'FreeBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
self.facts['processor'] = []
rc, out, err = self.module.run_command("/sbin/sysctl -n hw.ncpu")
self.facts['processor_count'] = out.strip()
dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT)
if not dmesg_boot:
rc, dmesg_boot, err = self.module.run_command("/sbin/dmesg")
for line in dmesg_boot.splitlines():
if 'CPU:' in line:
cpu = re.sub(r'CPU:\s+', r"", line)
self.facts['processor'].append(cpu.strip())
if 'Logical CPUs per core' in line:
self.facts['processor_cores'] = line.split()[4]
def get_memory_facts(self):
rc, out, err = self.module.run_command("/sbin/sysctl vm.stats")
for line in out.splitlines():
data = line.split()
if 'vm.stats.vm.v_page_size' in line:
pagesize = int(data[1])
if 'vm.stats.vm.v_page_count' in line:
pagecount = int(data[1])
if 'vm.stats.vm.v_free_count' in line:
freecount = int(data[1])
self.facts['memtotal_mb'] = pagesize * pagecount // 1024 // 1024
self.facts['memfree_mb'] = pagesize * freecount // 1024 // 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = self.module.run_command("/usr/sbin/swapinfo -k")
lines = out.splitlines()
if len(lines[-1]) == 0:
lines.pop()
data = lines[-1].split()
if data[0] != 'Device':
self.facts['swaptotal_mb'] = int(data[1]) // 1024
self.facts['swapfree_mb'] = int(data[3]) // 1024
@timeout()
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.splitlines():
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line).split()
size_total, size_available = self._get_mount_size_facts(fields[1])
self.facts['mounts'].append({
'mount': fields[1],
'device': fields[0],
'fstype': fields[2],
'options': fields[3],
'size_total': size_total,
'size_available': size_available
})
def get_device_facts(self):
sysdir = '/dev'
self.facts['devices'] = {}
drives = re.compile('(ada?\d+|da\d+|a?cd\d+)') #TODO: rc, disks, err = self.module.run_command("/sbin/sysctl kern.disks")
slices = re.compile('(ada?\d+s\d+\w*|da\d+s\d+\w*)')
if os.path.isdir(sysdir):
dirlist = sorted(os.listdir(sysdir))
for device in dirlist:
d = drives.match(device)
if d:
self.facts['devices'][d.group(1)] = []
s = slices.match(device)
if s:
self.facts['devices'][d.group(1)].append(s.group(1))
def get_dmi_facts(self):
''' learn dmi facts from system
Use dmidecode executable if available'''
# Fall back to using dmidecode, if available
dmi_bin = self.module.get_bin_path('dmidecode')
DMI_DICT = dict(
bios_date='bios-release-date',
bios_version='bios-version',
form_factor='chassis-type',
product_name='system-product-name',
product_serial='system-serial-number',
product_uuid='system-uuid',
product_version='system-version',
system_vendor='system-manufacturer'
)
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
self.facts[k] = ''.join([line for line in out.splitlines() if not line.startswith('#') ])
try:
json.dumps(self.facts[k])
except UnicodeDecodeError:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
class DragonFlyHardware(FreeBSDHardware):
platform = 'DragonFly'
class NetBSDHardware(Hardware):
"""
NetBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
"""
platform = 'NetBSD'
MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
def populate(self):
self.sysctl = self.get_sysctl(['machdep'])
self.get_cpu_facts()
self.get_memory_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
self.get_dmi_facts()
return self.facts
def get_cpu_facts(self):
i = 0
physid = 0
sockets = {}
if not os.access("/proc/cpuinfo", os.R_OK):
return
self.facts['processor'] = []
for line in get_file_lines("/proc/cpuinfo"):
data = line.split(":", 1)
key = data[0].strip()
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key == 'model name' or key == 'Processor':
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(data[1].strip())
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
if len(sockets) > 0:
self.facts['processor_count'] = len(sockets)
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
self.facts['processor_count'] = i
self.facts['processor_cores'] = 'NA'
def get_memory_facts(self):
if not os.access("/proc/meminfo", os.R_OK):
return
for line in get_file_lines("/proc/meminfo"):
data = line.split(":", 1)
key = data[0]
if key in NetBSDHardware.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
self.facts["%s_mb" % key.lower()] = int(val) // 1024
@timeout()
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.splitlines():
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line).split()
size_total, size_available = self._get_mount_size_facts(fields[1])
self.facts['mounts'].append({
'mount': fields[1],
'device': fields[0],
'fstype' : fields[2],
'options': fields[3],
'size_total': size_total,
'size_available': size_available
})
def get_dmi_facts(self):
# We don't use dmidecode(1) here because:
# - it would add dependency on an external package
# - dmidecode(1) can only be ran as root
# So instead we rely on sysctl(8) to provide us the information on a
# best-effort basis. As a bonus we also get facts on non-amd64/i386
# platforms this way.
sysctl_to_dmi = {
'machdep.dmi.system-product': 'product_name',
'machdep.dmi.system-version': 'product_version',
'machdep.dmi.system-uuid': 'product_uuid',
'machdep.dmi.system-serial': 'product_serial',
'machdep.dmi.system-vendor': 'system_vendor',
}
for mib in sysctl_to_dmi:
if mib in self.sysctl:
self.facts[sysctl_to_dmi[mib]] = self.sysctl[mib]
class AIX(Hardware):
"""
AIX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
"""
platform = 'AIX'
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_vgs_facts()
self.get_mount_facts()
return self.facts
def get_cpu_facts(self):
self.facts['processor'] = []
rc, out, err = self.module.run_command("/usr/sbin/lsdev -Cc processor")
if out:
i = 0
for line in out.splitlines():
if 'Available' in line:
if i == 0:
data = line.split(' ')
cpudev = data[0]
i += 1
self.facts['processor_count'] = int(i)
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type")
data = out.split(' ')
self.facts['processor'] = data[1]
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads")
data = out.split(' ')
self.facts['processor_cores'] = int(data[1])
def get_memory_facts(self):
pagesize = 4096
rc, out, err = self.module.run_command("/usr/bin/vmstat -v")
for line in out.splitlines():
data = line.split()
if 'memory pages' in line:
pagecount = int(data[0])
if 'free pages' in line:
freecount = int(data[0])
self.facts['memtotal_mb'] = pagesize * pagecount // 1024 // 1024
self.facts['memfree_mb'] = pagesize * freecount // 1024 // 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = self.module.run_command("/usr/sbin/lsps -s")
if out:
lines = out.splitlines()
data = lines[1].split()
swaptotal_mb = int(data[0].rstrip('MB'))
percused = int(data[1].rstrip('%'))
self.facts['swaptotal_mb'] = swaptotal_mb
self.facts['swapfree_mb'] = int(swaptotal_mb * ( 100 - percused ) / 100)
def get_dmi_facts(self):
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
data = out.split()
self.facts['firmware_version'] = data[1].strip('IBM,')
lsconf_path = self.module.get_bin_path("lsconf")
if lsconf_path:
rc, out, err = self.module.run_command(lsconf_path)
if rc == 0 and out:
for line in out.splitlines():
data = line.split(':')
if 'Machine Serial Number' in line:
self.facts['product_serial'] = data[1].strip()
if 'LPAR Info' in line:
self.facts['lpar_info'] = data[1].strip()
if 'System Model' in line:
self.facts['product_name'] = data[1].strip()
def get_vgs_facts(self):
"""
Get vg and pv Facts
rootvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk0 active 546 0 00..00..00..00..00
hdisk1 active 546 113 00..00..00..21..92
realsyncvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk74 active 1999 6 00..00..00..00..06
testvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk105 active 999 838 200..39..199..200..200
hdisk106 active 999 599 200..00..00..199..200
"""
lsvg_path = self.module.get_bin_path("lsvg")
xargs_path = self.module.get_bin_path("xargs")
cmd = "%s | %s %s -p" % (lsvg_path ,xargs_path,lsvg_path)
if lsvg_path and xargs_path:
rc, out, err = self.module.run_command(cmd,use_unsafe_shell=True)
if rc == 0 and out:
self.facts['vgs']= {}
for m in re.finditer(r'(\S+):\n.*FREE DISTRIBUTION(\n(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*)+', out):
self.facts['vgs'][m.group(1)] = []
pp_size = 0
cmd = "%s %s" % (lsvg_path,m.group(1))
rc, out, err = self.module.run_command(cmd)
if rc == 0 and out:
pp_size = re.search(r'PP SIZE:\s+(\d+\s+\S+)',out).group(1)
for n in re.finditer(r'(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*',m.group(0)):
pv_info = { 'pv_name': n.group(1),
'pv_state': n.group(2),
'total_pps': n.group(3),
'free_pps': n.group(4),
'pp_size': pp_size
}
self.facts['vgs'][m.group(1)].append(pv_info)
def get_mount_facts(self):
self.facts['mounts'] = []
# AIX does not have mtab but mount command is only source of info (or to use
# api calls to get same info)
mount_path = self.module.get_bin_path('mount')
rc, mount_out, err = self.module.run_command(mount_path)
if mount_out:
for line in mount_out.split('\n'):
fields = line.split()
if len(fields) != 0 and fields[0] != 'node' and fields[0][0] != '-' and re.match('^/.*|^[a-zA-Z].*|^[0-9].*', fields[0]):
if re.match('^/', fields[0]):
# normal mount
self.facts['mounts'].append({'mount': fields[1],
'device': fields[0],
'fstype' : fields[2],
'options': fields[6],
'time': '%s %s %s' % ( fields[3], fields[4], fields[5])})
else:
# nfs or cifs based mount
# in case of nfs if no mount options are provided on command line
# add into fields empty string...
if len(fields) < 8:
fields.append("")
self.facts['mounts'].append({'mount': fields[2],
'device': '%s:%s' % (fields[0], fields[1]),
'fstype' : fields[3],
'options': fields[7],
'time': '%s %s %s' % ( fields[4], fields[5], fields[6])})
class HPUX(Hardware):
"""
HP-UX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor
- processor_cores
- processor_count
- model
- firmware
"""
platform = 'HP-UX'
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_hw_facts()
return self.facts
def get_cpu_facts(self):
if self.facts['architecture'] == '9000/800':
rc, out, err = self.module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip())
#Working with machinfo mess
elif self.facts['architecture'] == 'ia64':
if self.facts['distribution_version'] == "B.11.23":
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split('=')[1])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True)
self.facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip()
rc, out, err = self.module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
self.facts['processor_cores'] = int(out.strip())
if self.facts['distribution_version'] == "B.11.31":
#if machinfo return cores strings release B.11.31 > 1204
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True)
if out.strip()== '0':
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split(" ")[0])
#If hyperthreading is active divide cores by 2
rc, out, err = self.module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True)
data = re.sub(' +',' ',out).strip().split(' ')
if len(data) == 1:
hyperthreading = 'OFF'
else:
hyperthreading = data[1]
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True)
data = out.strip().split(" ")
if hyperthreading == 'ON':
self.facts['processor_cores'] = int(data[0])/2
else:
if len(data) == 1:
self.facts['processor_cores'] = self.facts['processor_count']
else:
self.facts['processor_cores'] = int(data[0])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True)
self.facts['processor'] = out.strip()
else:
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split(" ")[0])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True)
self.facts['processor_cores'] = int(out.strip().split(" ")[0])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
self.facts['processor'] = out.strip()
def get_memory_facts(self):
pagesize = 4096
rc, out, err = self.module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True)
data = int(re.sub(' +',' ',out).split(' ')[5].strip())
self.facts['memfree_mb'] = pagesize * data // 1024 // 1024
if self.facts['architecture'] == '9000/800':
try:
rc, out, err = self.module.run_command("grep Physical /var/adm/syslog/syslog.log")
data = re.search('.*Physical: ([0-9]*) Kbytes.*',out).groups()[0].strip()
self.facts['memtotal_mb'] = int(data) // 1024
except AttributeError:
#For systems where memory details aren't sent to syslog or the log has rotated, use parsed
#adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root.
if os.access("/dev/kmem", os.R_OK):
rc, out, err = self.module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'",
use_unsafe_shell=True)
if not err:
data = out
self.facts['memtotal_mb'] = int(data) / 256
else:
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True)
data = re.search('Memory[\ :=]*([0-9]*).*MB.*',out).groups()[0].strip()
self.facts['memtotal_mb'] = int(data)
rc, out, err = self.module.run_command("/usr/sbin/swapinfo -m -d -f -q")
self.facts['swaptotal_mb'] = int(out.strip())
rc, out, err = self.module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True)
swap = 0
for line in out.strip().splitlines():
swap += int(re.sub(' +',' ',line).split(' ')[3].strip())
self.facts['swapfree_mb'] = swap
def get_hw_facts(self):
rc, out, err = self.module.run_command("model")
self.facts['model'] = out.strip()
if self.facts['architecture'] == 'ia64':
separator = ':'
if self.facts['distribution_version'] == "B.11.23":
separator = '='
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True)
self.facts['firmware_version'] = out.split(separator)[1].strip()
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo |grep -i 'Machine serial number' ",use_unsafe_shell=True)
if rc == 0 and out:
self.facts['product_serial'] = out.split(separator)[1].strip()
class Darwin(Hardware):
"""
Darwin-specific subclass of Hardware. Defines memory and CPU facts:
- processor
- processor_cores
- memtotal_mb
- memfree_mb
- model
- osversion
- osrevision
"""
platform = 'Darwin'
def populate(self):
self.sysctl = self.get_sysctl(['hw','machdep','kern'])
self.get_mac_facts()
self.get_cpu_facts()
self.get_memory_facts()
return self.facts
def get_system_profile(self):
rc, out, err = self.module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"])
if rc != 0:
return dict()
system_profile = dict()
for line in out.splitlines():
if ': ' in line:
(key, value) = line.split(': ', 1)
system_profile[key.strip()] = ' '.join(value.strip().split())
return system_profile
def get_mac_facts(self):
rc, out, err = self.module.run_command("sysctl hw.model")
if rc == 0:
self.facts['model'] = out.splitlines()[-1].split()[1]
self.facts['osversion'] = self.sysctl['kern.osversion']
self.facts['osrevision'] = self.sysctl['kern.osrevision']
def get_cpu_facts(self):
if 'machdep.cpu.brand_string' in self.sysctl: # Intel
self.facts['processor'] = self.sysctl['machdep.cpu.brand_string']
self.facts['processor_cores'] = self.sysctl['machdep.cpu.core_count']
else: # PowerPC
system_profile = self.get_system_profile()
self.facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed'])
self.facts['processor_cores'] = self.sysctl['hw.physicalcpu']
def get_memory_facts(self):
self.facts['memtotal_mb'] = int(self.sysctl['hw.memsize']) // 1024 // 1024
rc, out, err = self.module.run_command("sysctl hw.usermem")
if rc == 0:
self.facts['memfree_mb'] = int(out.splitlines()[-1].split()[1]) // 1024 // 1024
class HurdHardware(LinuxHardware):
"""
GNU Hurd specific subclass of Hardware. Define memory and mount facts
based on procfs compatibility translator mimicking the interface of
the Linux kernel.
"""
platform = 'GNU'
def populate(self):
self.get_uptime_facts()
self.get_memory_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
class Network(Facts):
"""
This is a generic Network subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you must define:
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
All subclasses MUST define platform.
"""
platform = 'Generic'
IPV6_SCOPE = { '0' : 'global',
'10' : 'host',
'20' : 'link',
'40' : 'admin',
'50' : 'site',
'80' : 'organization' }
def __new__(cls, *arguments, **keyword):
# When Network is created, it chooses a subclass to create instead.
# This check prevents the subclass from then trying to find a subclass
# and create that.
if cls is not Network:
return super(Network, cls).__new__(cls)
subclass = cls
for sc in get_all_subclasses(Network):
if sc.platform == platform.system():
subclass = sc
if PY3:
return super(cls, subclass).__new__(subclass)
else:
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def populate(self):
return self.facts
class LinuxNetwork(Network):
"""
This is a Linux-specific subclass of Network. It defines
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
- ipv4_address and ipv6_address: the first non-local address for each family.
"""
platform = 'Linux'
INTERFACE_TYPE = {
'1': 'ether',
'32': 'infiniband',
'512': 'ppp',
'772': 'loopback',
'65534': 'tunnel',
}
def populate(self):
ip_path = self.module.get_bin_path('ip')
if ip_path is None:
return self.facts
default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path)
interfaces, ips = self.get_interfaces_info(ip_path, default_ipv4, default_ipv6)
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
self.facts['default_ipv4'] = default_ipv4
self.facts['default_ipv6'] = default_ipv6
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
return self.facts
def get_default_interfaces(self, ip_path):
# Use the commands:
# ip -4 route get 8.8.8.8 -> Google public DNS
# ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com
# to find out the default outgoing interface, address, and gateway
command = dict(
v4 = [ip_path, '-4', 'route', 'get', '8.8.8.8'],
v6 = [ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012']
)
interface = dict(v4 = {}, v6 = {})
for v in 'v4', 'v6':
if (v == 'v6' and self.facts['os_family'] == 'RedHat' and
self.facts['distribution_version'].startswith('4.')):
continue
if v == 'v6' and not socket.has_ipv6:
continue
rc, out, err = self.module.run_command(command[v], errors='surrogate_then_replace')
if not out:
# v6 routing may result in
# RTNETLINK answers: Invalid argument
continue
words = out.splitlines()[0].split()
# A valid output starts with the queried address on the first line
if len(words) > 0 and words[0] == command[v][-1]:
for i in range(len(words) - 1):
if words[i] == 'dev':
interface[v]['interface'] = words[i+1]
elif words[i] == 'src':
interface[v]['address'] = words[i+1]
elif words[i] == 'via' and words[i+1] != command[v][-1]:
interface[v]['gateway'] = words[i+1]
return interface['v4'], interface['v6']
def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6):
interfaces = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
for path in glob.glob('/sys/class/net/*'):
if not os.path.isdir(path):
continue
device = os.path.basename(path)
interfaces[device] = { 'device': device }
if os.path.exists(os.path.join(path, 'address')):
macaddress = get_file_content(os.path.join(path, 'address'), default='')
if macaddress and macaddress != '00:00:00:00:00:00':
interfaces[device]['macaddress'] = macaddress
if os.path.exists(os.path.join(path, 'mtu')):
interfaces[device]['mtu'] = int(get_file_content(os.path.join(path, 'mtu')))
if os.path.exists(os.path.join(path, 'operstate')):
interfaces[device]['active'] = get_file_content(os.path.join(path, 'operstate')) != 'down'
if os.path.exists(os.path.join(path, 'device','driver', 'module')):
interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module')))
if os.path.exists(os.path.join(path, 'type')):
_type = get_file_content(os.path.join(path, 'type'))
interfaces[device]['type'] = self.INTERFACE_TYPE.get(_type, 'unknown')
if os.path.exists(os.path.join(path, 'bridge')):
interfaces[device]['type'] = 'bridge'
interfaces[device]['interfaces'] = [ os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*')) ]
if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')):
interfaces[device]['id'] = get_file_content(os.path.join(path, 'bridge', 'bridge_id'), default='')
if os.path.exists(os.path.join(path, 'bridge', 'stp_state')):
interfaces[device]['stp'] = get_file_content(os.path.join(path, 'bridge', 'stp_state')) == '1'
if os.path.exists(os.path.join(path, 'bonding')):
interfaces[device]['type'] = 'bonding'
interfaces[device]['slaves'] = get_file_content(os.path.join(path, 'bonding', 'slaves'), default='').split()
interfaces[device]['mode'] = get_file_content(os.path.join(path, 'bonding', 'mode'), default='').split()[0]
interfaces[device]['miimon'] = get_file_content(os.path.join(path, 'bonding', 'miimon'), default='').split()[0]
interfaces[device]['lacp_rate'] = get_file_content(os.path.join(path, 'bonding', 'lacp_rate'), default='').split()[0]
primary = get_file_content(os.path.join(path, 'bonding', 'primary'))
if primary:
interfaces[device]['primary'] = primary
path = os.path.join(path, 'bonding', 'all_slaves_active')
if os.path.exists(path):
interfaces[device]['all_slaves_active'] = get_file_content(path) == '1'
if os.path.exists(os.path.join(path, 'bonding_slave')):
interfaces[device]['perm_macaddress'] = get_file_content(os.path.join(path, 'bonding_slave', 'perm_hwaddr'), default='')
if os.path.exists(os.path.join(path,'device')):
interfaces[device]['pciid'] = os.path.basename(os.readlink(os.path.join(path,'device')))
if os.path.exists(os.path.join(path, 'speed')):
speed = get_file_content(os.path.join(path, 'speed'))
if speed is not None:
interfaces[device]['speed'] = int(speed)
# Check whether an interface is in promiscuous mode
if os.path.exists(os.path.join(path,'flags')):
promisc_mode = False
# The second byte indicates whether the interface is in promiscuous mode.
# 1 = promisc
# 0 = no promisc
data = int(get_file_content(os.path.join(path, 'flags')),16)
promisc_mode = (data & 0x0100 > 0)
interfaces[device]['promisc'] = promisc_mode
def parse_ip_output(output, secondary=False):
for line in output.splitlines():
if not line:
continue
words = line.split()
broadcast = ''
if words[0] == 'inet':
if '/' in words[1]:
address, netmask_length = words[1].split('/')
if len(words) > 3:
broadcast = words[3]
else:
# pointopoint interfaces do not have a prefix
address = words[1]
netmask_length = "32"
address_bin = struct.unpack('!L', socket.inet_aton(address))[0]
netmask_bin = (1<<32) - (1<<32>>int(netmask_length))
netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin))
network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
iface = words[-1]
if iface != device:
interfaces[iface] = {}
if not secondary and "ipv4" not in interfaces[iface]:
interfaces[iface]['ipv4'] = {'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network}
else:
if "ipv4_secondaries" not in interfaces[iface]:
interfaces[iface]["ipv4_secondaries"] = []
interfaces[iface]["ipv4_secondaries"].append({
'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network,
})
# add this secondary IP to the main device
if secondary:
if "ipv4_secondaries" not in interfaces[device]:
interfaces[device]["ipv4_secondaries"] = []
interfaces[device]["ipv4_secondaries"].append({
'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network,
})
# If this is the default address, update default_ipv4
if 'address' in default_ipv4 and default_ipv4['address'] == address:
default_ipv4['broadcast'] = broadcast
default_ipv4['netmask'] = netmask
default_ipv4['network'] = network
default_ipv4['macaddress'] = macaddress
default_ipv4['mtu'] = interfaces[device]['mtu']
default_ipv4['type'] = interfaces[device].get("type", "unknown")
default_ipv4['alias'] = words[-1]
if not address.startswith('127.'):
ips['all_ipv4_addresses'].append(address)
elif words[0] == 'inet6':
if 'peer' == words[2]:
address = words[1]
_, prefix = words[3].split('/')
scope = words[5]
else:
address, prefix = words[1].split('/')
scope = words[3]
if 'ipv6' not in interfaces[device]:
interfaces[device]['ipv6'] = []
interfaces[device]['ipv6'].append({
'address' : address,
'prefix' : prefix,
'scope' : scope
})
# If this is the default address, update default_ipv6
if 'address' in default_ipv6 and default_ipv6['address'] == address:
default_ipv6['prefix'] = prefix
default_ipv6['scope'] = scope
default_ipv6['macaddress'] = macaddress
default_ipv6['mtu'] = interfaces[device]['mtu']
default_ipv6['type'] = interfaces[device].get("type", "unknown")
if not address == '::1':
ips['all_ipv6_addresses'].append(address)
ip_path = self.module.get_bin_path("ip")
args = [ip_path, 'addr', 'show', 'primary', device]
rc, primary_data, stderr = self.module.run_command(args, errors='surrogate_then_replace')
args = [ip_path, 'addr', 'show', 'secondary', device]
rc, secondary_data, stderr = self.module.run_command(args, errors='surrogate_then_replace')
parse_ip_output(primary_data)
parse_ip_output(secondary_data, secondary=True)
interfaces[device].update(self.get_ethtool_data(device))
# replace : by _ in interface name since they are hard to use in template
new_interfaces = {}
for i in interfaces:
if ':' in i:
new_interfaces[i.replace(':','_')] = interfaces[i]
else:
new_interfaces[i] = interfaces[i]
return new_interfaces, ips
def get_ethtool_data(self, device):
data = {}
ethtool_path = self.module.get_bin_path("ethtool")
if ethtool_path:
args = [ethtool_path, '-k', device]
rc, stdout, stderr = self.module.run_command(args, errors='surrogate_then_replace')
if rc == 0:
features = {}
for line in stdout.strip().splitlines():
if not line or line.endswith(":"):
continue
key,value = line.split(": ")
if not value:
continue
features[key.strip().replace('-','_')] = value.strip()
data['features'] = features
args = [ethtool_path, '-T', device]
rc, stdout, stderr = self.module.run_command(args, errors='surrogate_then_replace')
if rc == 0:
data['timestamping'] = [m.lower() for m in re.findall('SOF_TIMESTAMPING_(\w+)', stdout)]
data['hw_timestamp_filters'] = [m.lower() for m in re.findall('HWTSTAMP_FILTER_(\w+)', stdout)]
m = re.search('PTP Hardware Clock: (\d+)', stdout)
if m:
data['phc_index'] = int(m.groups()[0])
return data
class GenericBsdIfconfigNetwork(Network):
"""
This is a generic BSD subclass of Network using the ifconfig command.
It defines
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
"""
platform = 'Generic_BSD_Ifconfig'
def populate(self):
ifconfig_path = self.module.get_bin_path('ifconfig')
if ifconfig_path is None:
return self.facts
route_path = self.module.get_bin_path('route')
if route_path is None:
return self.facts
default_ipv4, default_ipv6 = self.get_default_interfaces(route_path)
interfaces, ips = self.get_interfaces_info(ifconfig_path)
self.detect_type_media(interfaces)
self.merge_default_interface(default_ipv4, interfaces, 'ipv4')
self.merge_default_interface(default_ipv6, interfaces, 'ipv6')
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
self.facts['default_ipv4'] = default_ipv4
self.facts['default_ipv6'] = default_ipv6
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
return self.facts
def detect_type_media(self, interfaces):
for iface in interfaces:
if 'media' in interfaces[iface]:
if 'ether' in interfaces[iface]['media'].lower():
interfaces[iface]['type'] = 'ether'
def get_default_interfaces(self, route_path):
# Use the commands:
# route -n get 8.8.8.8 -> Google public DNS
# route -n get -inet6 2404:6800:400a:800::1012 -> ipv6.google.com
# to find out the default outgoing interface, address, and gateway
command = dict(
v4 = [route_path, '-n', 'get', '8.8.8.8'],
v6 = [route_path, '-n', 'get', '-inet6', '2404:6800:400a:800::1012']
)
interface = dict(v4 = {}, v6 = {})
for v in 'v4', 'v6':
if v == 'v6' and not socket.has_ipv6:
continue
rc, out, err = self.module.run_command(command[v])
if not out:
# v6 routing may result in
# RTNETLINK answers: Invalid argument
continue
for line in out.splitlines():
words = line.split()
# Collect output from route command
if len(words) > 1:
if words[0] == 'interface:':
interface[v]['interface'] = words[1]
if words[0] == 'gateway:':
interface[v]['gateway'] = words[1]
return interface['v4'], interface['v6']
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
# FreeBSD, DragonflyBSD, NetBSD, OpenBSD and OS X all implicitly add '-a'
# when running the command 'ifconfig'.
# Solaris must explicitly run the command 'ifconfig -a'.
rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options])
for line in out.splitlines():
if line:
words = line.split()
if words[0] == 'pass':
continue
elif re.match('^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
elif words[0] == 'tunnel':
self.parse_tunnel_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
return interfaces, ips
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
if 'LOOPBACK' in current_if['flags']:
current_if['type'] = 'loopback'
current_if['macaddress'] = 'unknown' # will be overwritten later
if len(words) >= 5 : # Newer FreeBSD versions
current_if['metric'] = words[3]
current_if['mtu'] = words[5]
else:
current_if['mtu'] = words[3]
return current_if
def parse_options_line(self, words, current_if, ips):
# Mac has options like this...
current_if['options'] = self.get_options(words[0])
def parse_nd6_line(self, words, current_if, ips):
# FreeBSD has options like this...
current_if['options'] = self.get_options(words[1])
def parse_ether_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
current_if['type'] = 'ether'
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = words[1]
if len(words) > 2:
current_if['media_select'] = words[2]
if len(words) > 3:
current_if['media_type'] = words[3][1:]
if len(words) > 4:
current_if['media_options'] = self.get_options(words[4])
def parse_status_line(self, words, current_if, ips):
current_if['status'] = words[1]
def parse_lladdr_line(self, words, current_if, ips):
current_if['lladdr'] = words[1]
def parse_inet_line(self, words, current_if, ips):
# netbsd show aliases like this
# lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> mtu 33184
# inet 127.0.0.1 netmask 0xff000000
# inet alias 127.1.1.1 netmask 0xff000000
if words[1] == 'alias':
del words[1]
address = {'address': words[1]}
# deal with hex netmask
if re.match('([0-9a-f]){8}', words[3]) and len(words[3]) == 8:
words[3] = '0x' + words[3]
if words[3].startswith('0x'):
address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(words[3], base=16)))
else:
# otherwise assume this is a dotted quad
address['netmask'] = words[3]
# calculate the network
address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0]
netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0]
address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
# broadcast may be given or we need to calculate
if len(words) > 5:
address['broadcast'] = words[5]
else:
address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff)))
# add to our list of addresses
if not words[1].startswith('127.'):
ips['all_ipv4_addresses'].append(address['address'])
current_if['ipv4'].append(address)
def parse_inet6_line(self, words, current_if, ips):
address = {'address': words[1]}
if (len(words) >= 4) and (words[2] == 'prefixlen'):
address['prefix'] = words[3]
if (len(words) >= 6) and (words[4] == 'scopeid'):
address['scope'] = words[5]
localhost6 = ['::1', '::1/128', 'fe80::1%lo0']
if address['address'] not in localhost6:
ips['all_ipv6_addresses'].append(address['address'])
current_if['ipv6'].append(address)
def parse_tunnel_line(self, words, current_if, ips):
current_if['type'] = 'tunnel'
def parse_unknown_line(self, words, current_if, ips):
# we are going to ignore unknown lines here - this may be
# a bad idea - but you can override it in your subclass
pass
def get_options(self, option_string):
start = option_string.find('<') + 1
end = option_string.rfind('>')
if (start > 0) and (end > 0) and (end > start + 1):
option_csv = option_string[start:end]
return option_csv.split(',')
else:
return []
def merge_default_interface(self, defaults, interfaces, ip_type):
if 'interface' not in defaults:
return
if not defaults['interface'] in interfaces:
return
ifinfo = interfaces[defaults['interface']]
# copy all the interface values across except addresses
for item in ifinfo:
if item != 'ipv4' and item != 'ipv6':
defaults[item] = ifinfo[item]
if len(ifinfo[ip_type]) > 0:
for item in ifinfo[ip_type][0]:
defaults[item] = ifinfo[ip_type][0][item]
class HPUXNetwork(Network):
"""
HP-UX-specifig subclass of Network. Defines networking facts:
- default_interface
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4 address information.
"""
platform = 'HP-UX'
def populate(self):
netstat_path = self.module.get_bin_path('netstat')
if netstat_path is None:
return self.facts
self.get_default_interfaces()
interfaces = self.get_interfaces_info()
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
return self.facts
def get_default_interfaces(self):
rc, out, err = self.module.run_command("/usr/bin/netstat -nr")
lines = out.splitlines()
for line in lines:
words = line.split()
if len(words) > 1:
if words[0] == 'default':
self.facts['default_interface'] = words[4]
self.facts['default_gateway'] = words[1]
def get_interfaces_info(self):
interfaces = {}
rc, out, err = self.module.run_command("/usr/bin/netstat -ni")
lines = out.splitlines()
for line in lines:
words = line.split()
for i in range(len(words) - 1):
if words[i][:3] == 'lan':
device = words[i]
interfaces[device] = { 'device': device }
address = words[i+3]
interfaces[device]['ipv4'] = { 'address': address }
network = words[i+2]
interfaces[device]['ipv4'] = { 'network': network,
'interface': device,
'address': address }
return interfaces
class DarwinNetwork(GenericBsdIfconfigNetwork):
"""
This is the Mac OS X/Darwin Network Class.
It uses the GenericBsdIfconfigNetwork unchanged
"""
platform = 'Darwin'
# media line is different to the default FreeBSD one
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = 'Unknown' # Mac does not give us this
current_if['media_select'] = words[1]
if len(words) > 2:
# MacOSX sets the media to '<unknown type>' for bridge interface
# and parsing splits this into two words; this if/else helps
if words[1] == '<unknown' and words[2] == 'type>':
current_if['media_select'] = 'Unknown'
current_if['media_type'] = 'unknown type'
else:
current_if['media_type'] = words[2][1:-1]
if len(words) > 3:
current_if['media_options'] = self.get_options(words[3])
class FreeBSDNetwork(GenericBsdIfconfigNetwork):
"""
This is the FreeBSD Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'FreeBSD'
class DragonFlyNetwork(GenericBsdIfconfigNetwork):
"""
This is the DragonFly Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'DragonFly'
class AIXNetwork(GenericBsdIfconfigNetwork):
"""
This is the AIX Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'AIX'
def get_default_interfaces(self, route_path):
netstat_path = self.module.get_bin_path('netstat')
rc, out, err = self.module.run_command([netstat_path, '-nr'])
interface = dict(v4 = {}, v6 = {})
lines = out.splitlines()
for line in lines:
words = line.split()
if len(words) > 1 and words[0] == 'default':
if '.' in words[1]:
interface['v4']['gateway'] = words[1]
interface['v4']['interface'] = words[5]
elif ':' in words[1]:
interface['v6']['gateway'] = words[1]
interface['v6']['interface'] = words[5]
return interface['v4'], interface['v6']
# AIX 'ifconfig -a' does not have three words in the interface line
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
uname_rc = None
uname_out = None
uname_err = None
uname_path = self.module.get_bin_path('uname')
if uname_path:
uname_rc, uname_out, uname_err = self.module.run_command([uname_path, '-W'])
rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options])
for line in out.splitlines():
if line:
words = line.split()
# only this condition differs from GenericBsdIfconfigNetwork
if re.match('^\w*\d*:', line):
current_if = self.parse_interface_line(words)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
# don't bother with wpars it does not work
# zero means not in wpar
if not uname_rc and uname_out.split()[0] == '0':
if current_if['macaddress'] == 'unknown' and re.match('^en', current_if['device']):
entstat_path = self.module.get_bin_path('entstat')
if entstat_path:
rc, out, err = self.module.run_command([entstat_path, current_if['device'] ])
if rc != 0:
break
for line in out.splitlines():
if not line:
pass
buff = re.match('^Hardware Address: (.*)', line)
if buff:
current_if['macaddress'] = buff.group(1)
buff = re.match('^Device Type:', line)
if buff and re.match('.*Ethernet', line):
current_if['type'] = 'ether'
# device must have mtu attribute in ODM
if 'mtu' not in current_if:
lsattr_path = self.module.get_bin_path('lsattr')
if lsattr_path:
rc, out, err = self.module.run_command([lsattr_path,'-El', current_if['device'] ])
if rc != 0:
break
for line in out.splitlines():
if line:
words = line.split()
if words[0] == 'mtu':
current_if['mtu'] = words[1]
return interfaces, ips
# AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
class OpenBSDNetwork(GenericBsdIfconfigNetwork):
"""
This is the OpenBSD Network Class.
It uses the GenericBsdIfconfigNetwork.
"""
platform = 'OpenBSD'
# OpenBSD 'ifconfig -a' does not have information about aliases
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-aA'):
return super(OpenBSDNetwork, self).get_interfaces_info(ifconfig_path, ifconfig_options)
# Return macaddress instead of lladdr
def parse_lladdr_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
current_if['type'] = 'ether'
class NetBSDNetwork(GenericBsdIfconfigNetwork):
"""
This is the NetBSD Network Class.
It uses the GenericBsdIfconfigNetwork
"""
platform = 'NetBSD'
def parse_media_line(self, words, current_if, ips):
# example of line:
# $ ifconfig
# ne0: flags=8863<UP,BROADCAST,NOTRAILERS,RUNNING,SIMPLEX,MULTICAST> mtu 1500
# ec_capabilities=1<VLAN_MTU>
# ec_enabled=0
# address: 00:20:91:45:00:78
# media: Ethernet 10baseT full-duplex
# inet 192.168.156.29 netmask 0xffffff00 broadcast 192.168.156.255
current_if['media'] = words[1]
if len(words) > 2:
current_if['media_type'] = words[2]
if len(words) > 3:
current_if['media_options'] = words[3].split(',')
class SunOSNetwork(GenericBsdIfconfigNetwork):
"""
This is the SunOS Network Class.
It uses the GenericBsdIfconfigNetwork.
Solaris can have different FLAGS and MTU for IPv4 and IPv6 on the same interface
so these facts have been moved inside the 'ipv4' and 'ipv6' lists.
"""
platform = 'SunOS'
# Solaris 'ifconfig -a' will print interfaces twice, once for IPv4 and again for IPv6.
# MTU and FLAGS also may differ between IPv4 and IPv6 on the same interface.
# 'parse_interface_line()' checks for previously seen interfaces before defining
# 'current_if' so that IPv6 facts don't clobber IPv4 facts (or vice versa).
def get_interfaces_info(self, ifconfig_path):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
rc, out, err = self.module.run_command([ifconfig_path, '-a'])
for line in out.splitlines():
if line:
words = line.split()
if re.match('^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words, current_if, interfaces)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
# 'parse_interface_line' and 'parse_inet*_line' leave two dicts in the
# ipv4/ipv6 lists which is ugly and hard to read.
# This quick hack merges the dictionaries. Purely cosmetic.
for iface in interfaces:
for v in 'ipv4', 'ipv6':
combined_facts = {}
for facts in interfaces[iface][v]:
combined_facts.update(facts)
if len(combined_facts.keys()) > 0:
interfaces[iface][v] = [combined_facts]
return interfaces, ips
def parse_interface_line(self, words, current_if, interfaces):
device = words[0][0:-1]
if device not in interfaces:
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
else:
current_if = interfaces[device]
flags = self.get_options(words[1])
v = 'ipv4'
if 'IPv6' in flags:
v = 'ipv6'
if 'LOOPBACK' in flags:
current_if['type'] = 'loopback'
current_if[v].append({'flags': flags, 'mtu': words[3]})
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
# Solaris displays single digit octets in MAC addresses e.g. 0:1:2:d:e:f
# Add leading zero to each octet where needed.
def parse_ether_line(self, words, current_if, ips):
macaddress = ''
for octet in words[1].split(':'):
octet = ('0' + octet)[-2:None]
macaddress += (octet + ':')
current_if['macaddress'] = macaddress[0:-1]
class HurdPfinetNetwork(Network):
"""
This is a GNU Hurd specific subclass of Network. It use fsysopts to
get the ip address and support only pfinet.
"""
platform = 'GNU'
_socket_dir = '/servers/socket/'
def populate(self):
fsysopts_path = self.module.get_bin_path('fsysopts')
if fsysopts_path is None:
return self.facts
socket_path = None
for l in ('inet', 'inet6'):
link = os.path.join(self._socket_dir, l)
if os.path.exists(link):
socket_path = link
break
if socket_path:
rc, out, err = self.module.run_command([fsysopts_path, '-L', socket_path])
self.facts['interfaces'] = []
for i in out.split():
if '=' in i and i.startswith('--'):
k,v = i.split('=',1)
# remove '--'
k = k[2:]
if k == 'interface':
# remove /dev/ from /dev/eth0
v = v[5:]
self.facts['interfaces'].append(v)
self.facts[v] = {
'active': True,
'device': v,
'ipv4': {},
'ipv6': [],
}
current_if = v
elif k == 'address':
self.facts[current_if]['ipv4']['address'] = v
elif k == 'netmask':
self.facts[current_if]['ipv4']['netmask'] = v
elif k == 'address6':
address,prefix = v.split('/')
self.facts[current_if]['ipv6'].append({
'address': address,
'prefix': prefix,
})
return self.facts
class Virtual(Facts):
"""
This is a generic Virtual subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you should define:
- virtualization_type
- virtualization_role
- container (e.g. solaris zones, freebsd jails, linux containers)
All subclasses MUST define platform.
"""
def __new__(cls, *arguments, **keyword):
# When Virtual is created, it chooses a subclass to create instead.
# This check prevents the subclass from then trying to find a subclass
# and create that.
if cls is not Virtual:
return super(Virtual, cls).__new__(cls)
subclass = cls
for sc in get_all_subclasses(Virtual):
if sc.platform == platform.system():
subclass = sc
if PY3:
return super(cls, subclass).__new__(subclass)
else:
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
self.facts['virtualization_type'] = ''
self.facts['virtualization_role'] = ''
class LinuxVirtual(Virtual):
"""
This is a Linux-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'Linux'
# For more information, check: http://people.redhat.com/~rjones/virt-what/
def get_virtual_facts(self):
# lxc/docker
if os.path.exists('/proc/1/cgroup'):
for line in get_file_lines('/proc/1/cgroup'):
if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line):
self.facts['virtualization_type'] = 'docker'
self.facts['virtualization_role'] = 'guest'
return
if re.search('/lxc/', line) or re.search('/machine.slice/machine-lxc', line):
self.facts['virtualization_type'] = 'lxc'
self.facts['virtualization_role'] = 'guest'
return
# lxc does not always appear in cgroups anymore but sets 'container=lxc' environment var, requires root privs
if os.path.exists('/proc/1/environ'):
for line in get_file_lines('/proc/1/environ'):
if re.search('container=lxc', line):
self.facts['virtualization_type'] = 'lxc'
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/vz'):
self.facts['virtualization_type'] = 'openvz'
if os.path.exists('/proc/bc'):
self.facts['virtualization_role'] = 'host'
else:
self.facts['virtualization_role'] = 'guest'
return
systemd_container = get_file_content('/run/systemd/container')
if systemd_container:
self.facts['virtualization_type'] = systemd_container
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists("/proc/xen"):
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
try:
for line in get_file_lines('/proc/xen/capabilities'):
if "control_d" in line:
self.facts['virtualization_role'] = 'host'
except IOError:
pass
return
product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
if product_name in ['KVM', 'Bochs']:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'RHEV Hypervisor':
self.facts['virtualization_type'] = 'RHEV'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'VMware Virtual Platform':
self.facts['virtualization_type'] = 'VMware'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'OpenStack Nova':
self.facts['virtualization_type'] = 'openstack'
self.facts['virtualization_role'] = 'guest'
return
bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
if bios_vendor == 'Xen':
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
return
if bios_vendor == 'innotek GmbH':
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
return
sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
# FIXME: This does also match hyperv
if sys_vendor == 'Microsoft Corporation':
self.facts['virtualization_type'] = 'VirtualPC'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'Parallels Software International Inc.':
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'QEMU':
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'oVirt':
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'OpenStack Foundation':
self.facts['virtualization_type'] = 'openstack'
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/self/status'):
for line in get_file_lines('/proc/self/status'):
if re.match('^VxID: \d+', line):
self.facts['virtualization_type'] = 'linux_vserver'
if re.match('^VxID: 0', line):
self.facts['virtualization_role'] = 'host'
else:
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/cpuinfo'):
for line in get_file_lines('/proc/cpuinfo'):
if re.match('^model name.*QEMU Virtual CPU', line):
self.facts['virtualization_type'] = 'kvm'
elif re.match('^vendor_id.*User Mode Linux', line):
self.facts['virtualization_type'] = 'uml'
elif re.match('^model name.*UML', line):
self.facts['virtualization_type'] = 'uml'
elif re.match('^vendor_id.*PowerVM Lx86', line):
self.facts['virtualization_type'] = 'powervm_lx86'
elif re.match('^vendor_id.*IBM/S390', line):
self.facts['virtualization_type'] = 'PR/SM'
lscpu = self.module.get_bin_path('lscpu')
if lscpu:
rc, out, err = self.module.run_command(["lscpu"])
if rc == 0:
for line in out.splitlines():
data = line.split(":", 1)
key = data[0].strip()
if key == 'Hypervisor':
self.facts['virtualization_type'] = data[1].strip()
else:
self.facts['virtualization_type'] = 'ibm_systemz'
else:
continue
if self.facts['virtualization_type'] == 'PR/SM':
self.facts['virtualization_role'] = 'LPAR'
else:
self.facts['virtualization_role'] = 'guest'
return
# Beware that we can have both kvm and virtualbox running on a single system
if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK):
modules = []
for line in get_file_lines("/proc/modules"):
data = line.split(" ", 1)
modules.append(data[0])
if 'kvm' in modules:
if os.path.isdir('/rhev/'):
# Check whether this is a RHEV hypervisor (is vdsm running ?)
for f in glob.glob('/proc/[0-9]*/comm'):
try:
if open(f).read().rstrip() == 'vdsm':
self.facts['virtualization_type'] = 'RHEV'
break
except:
pass
else:
self.facts['virtualization_type'] = 'kvm'
else:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'host'
return
if 'vboxdrv' in modules:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'host'
return
# If none of the above matches, return 'NA' for virtualization_type
# and virtualization_role. This allows for proper grouping.
self.facts['virtualization_type'] = 'NA'
self.facts['virtualization_role'] = 'NA'
return
class VirtualSysctlDetectionMixin(object):
def detect_sysctl(self):
self.sysctl_path = self.module.get_bin_path('sysctl')
def detect_virt_product(self, key):
self.detect_sysctl()
if self.sysctl_path:
rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key))
if rc == 0:
if re.match('(KVM|Bochs|SmartDC).*', out):
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
elif re.match('.*VMware.*', out):
self.facts['virtualization_type'] = 'VMware'
self.facts['virtualization_role'] = 'guest'
elif out.rstrip() == 'VirtualBox':
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
elif out.rstrip() == 'HVM domU':
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
elif out.rstrip() == 'Parallels':
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
elif out.rstrip() == 'RHEV Hypervisor':
self.facts['virtualization_type'] = 'RHEV'
self.facts['virtualization_role'] = 'guest'
def detect_virt_vendor(self, key):
self.detect_sysctl()
if self.sysctl_path:
rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key))
if rc == 0:
if out.rstrip() == 'QEMU':
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
if out.rstrip() == 'OpenBSD':
self.facts['virtualization_type'] = 'vmm'
self.facts['virtualization_role'] = 'guest'
class FreeBSDVirtual(Virtual):
"""
This is a FreeBSD-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'FreeBSD'
def get_virtual_facts(self):
# Set empty values as default
self.facts['virtualization_type'] = ''
self.facts['virtualization_role'] = ''
if os.path.exists('/dev/xen/xenstore'):
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
class DragonFlyVirtual(FreeBSDVirtual):
platform = 'DragonFly'
class OpenBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
"""
This is a OpenBSD-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'OpenBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def get_virtual_facts(self):
# Set empty values as default
self.facts['virtualization_type'] = ''
self.facts['virtualization_role'] = ''
self.detect_virt_product('hw.product')
if self.facts['virtualization_type'] == '':
self.detect_virt_vendor('hw.vendor')
# Check the dmesg if vmm(4) attached, indicating the host is
# capable of virtualization.
dmesg_boot = get_file_content(OpenBSDVirtual.DMESG_BOOT)
for line in dmesg_boot.splitlines():
match = re.match('^vmm0 at mainbus0: (SVM/RVI|VMX/EPT)$', line)
if match:
self.facts['virtualization_type'] = 'vmm'
self.facts['virtualization_role'] = 'host'
class NetBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
platform = 'NetBSD'
def get_virtual_facts(self):
# Set empty values as default
self.facts['virtualization_type'] = ''
self.facts['virtualization_role'] = ''
self.detect_virt_product('machdep.dmi.system-product')
if self.facts['virtualization_type'] == '':
self.detect_virt_vendor('machdep.dmi.system-vendor')
if os.path.exists('/dev/xencons'):
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
class HPUXVirtual(Virtual):
"""
This is a HP-UX specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'HP-UX'
def get_virtual_facts(self):
if os.path.exists('/usr/sbin/vecheck'):
rc, out, err = self.module.run_command("/usr/sbin/vecheck")
if rc == 0:
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HP vPar'
if os.path.exists('/opt/hpvm/bin/hpvminfo'):
rc, out, err = self.module.run_command("/opt/hpvm/bin/hpvminfo")
if rc == 0 and re.match('.*Running.*HPVM vPar.*', out):
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HPVM vPar'
elif rc == 0 and re.match('.*Running.*HPVM guest.*', out):
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HPVM IVM'
elif rc == 0 and re.match('.*Running.*HPVM host.*', out):
self.facts['virtualization_type'] = 'host'
self.facts['virtualization_role'] = 'HPVM'
if os.path.exists('/usr/sbin/parstatus'):
rc, out, err = self.module.run_command("/usr/sbin/parstatus")
if rc == 0:
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HP nPar'
class SunOSVirtual(Virtual):
"""
This is a SunOS-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
- container
"""
platform = 'SunOS'
def get_virtual_facts(self):
# Check if it's a zone
zonename = self.module.get_bin_path('zonename')
if zonename:
rc, out, err = self.module.run_command(zonename)
if rc == 0 and out.rstrip() != "global":
self.facts['container'] = 'zone'
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
if os.path.isdir('/.SUNWnative'):
self.facts['container'] = 'zone'
# If it's a zone check if we can detect if our global zone is itself virtualized.
# Relies on the "guest tools" (e.g. vmware tools) to be installed
if 'container' in self.facts and self.facts['container'] == 'zone':
modinfo = self.module.get_bin_path('modinfo')
if modinfo:
rc, out, err = self.module.run_command(modinfo)
if rc == 0:
for line in out.splitlines():
if 'VMware' in line:
self.facts['virtualization_type'] = 'vmware'
self.facts['virtualization_role'] = 'guest'
if 'VirtualBox' in line:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
if os.path.exists('/proc/vz'):
self.facts['virtualization_type'] = 'virtuozzo'
self.facts['virtualization_role'] = 'guest'
# Detect domaining on Sparc hardware
virtinfo = self.module.get_bin_path('virtinfo')
if virtinfo:
# The output of virtinfo is different whether we are on a machine with logical
# domains ('LDoms') on a T-series or domains ('Domains') on a M-series. Try LDoms first.
rc, out, err = self.module.run_command("/usr/sbin/virtinfo -p")
# The output contains multiple lines with different keys like this:
# DOMAINROLE|impl=LDoms|control=false|io=false|service=false|root=false
# The output may also be not formatted and the returncode is set to 0 regardless of the error condition:
# virtinfo can only be run from the global zone
if rc == 0:
try:
for line in out.splitlines():
fields = line.split('|')
if( fields[0] == 'DOMAINROLE' and fields[1] == 'impl=LDoms' ):
self.facts['virtualization_type'] = 'ldom'
self.facts['virtualization_role'] = 'guest'
hostfeatures = []
for field in fields[2:]:
arg = field.split('=')
if( arg[1] == 'true' ):
hostfeatures.append(arg[0])
if( len(hostfeatures) > 0 ):
self.facts['virtualization_role'] = 'host (' + ','.join(hostfeatures) + ')'
except ValueError:
pass
else:
smbios = self.module.get_bin_path('smbios')
if not smbios:
return
rc, out, err = self.module.run_command(smbios)
if rc == 0:
for line in out.splitlines():
if 'VMware' in line:
self.facts['virtualization_type'] = 'vmware'
self.facts['virtualization_role'] = 'guest'
elif 'Parallels' in line:
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
elif 'VirtualBox' in line:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
elif 'HVM domU' in line:
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
elif 'KVM' in line:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
class Ohai(Facts):
"""
This is a subclass of Facts for including information gathered from Ohai.
"""
def populate(self):
self.run_ohai()
return self.facts
def run_ohai(self):
ohai_path = self.module.get_bin_path('ohai')
if ohai_path is None:
return
rc, out, err = self.module.run_command(ohai_path)
try:
self.facts.update(json.loads(out))
except:
pass
class Facter(Facts):
"""
This is a subclass of Facts for including information gathered from Facter.
"""
def populate(self):
self.run_facter()
return self.facts
def run_facter(self):
facter_path = self.module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
cfacter_path = self.module.get_bin_path('cfacter', opt_dirs=['/opt/puppetlabs/bin'])
# Prefer to use cfacter if available
if cfacter_path is not None:
facter_path = cfacter_path
if facter_path is None:
return
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
rc, out, err = self.module.run_command(facter_path + " --puppet --json")
try:
self.facts = json.loads(out)
except:
pass
def get_file_content(path, default=None, strip=True):
data = default
if os.path.exists(path) and os.access(path, os.R_OK):
try:
try:
datafile = open(path)
data = datafile.read()
if strip:
data = data.strip()
if len(data) == 0:
data = default
finally:
datafile.close()
except:
# ignore errors as some jails/containers might have readable permissions but not allow reads to proc
# done in 2 blocks for 2.4 compat
pass
return data
def get_uname_version(module):
rc, out, err = module.run_command(['uname', '-v'])
if rc == 0:
return out
return None
def get_partition_uuid(partname):
try:
uuids = os.listdir("/dev/disk/by-uuid")
except OSError:
return
for uuid in uuids:
dev = os.path.realpath("/dev/disk/by-uuid/" + uuid)
if dev == ("/dev/" + partname):
return uuid
return None
def get_file_lines(path):
'''get list of lines from file'''
data = get_file_content(path)
if data:
ret = data.splitlines()
else:
ret = []
return ret
def ansible_facts(module, gather_subset):
facts = {}
facts['gather_subset'] = list(gather_subset)
facts.update(Facts(module).populate())
for subset in gather_subset:
facts.update(FACT_SUBSETS[subset](module,
load_on_init=False,
cached_facts=facts).populate())
return facts
def get_all_facts(module):
setup_options = dict(module_setup=True)
# Retrieve module parameters
gather_subset = module.params['gather_subset']
global GATHER_TIMEOUT
GATHER_TIMEOUT = module.params['gather_timeout']
# Retrieve all facts elements
additional_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
additional_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
raise TypeError("Bad subset '%s' given to Ansible. gather_subset options allowed: all, %s" % (subset, ", ".join(FACT_SUBSETS.keys())))
if exclude:
exclude_subsets.add(subset)
else:
additional_subsets.add(subset)
if not additional_subsets:
additional_subsets.update(VALID_SUBSETS)
additional_subsets.difference_update(exclude_subsets)
# facter and ohai are given a different prefix than other subsets
if 'facter' in additional_subsets:
additional_subsets.difference_update(('facter',))
facter_ds = FACT_SUBSETS['facter'](module, load_on_init=False).populate()
if facter_ds:
for (k, v) in facter_ds.items():
setup_options['facter_%s' % k.replace('-', '_')] = v
if 'ohai' in additional_subsets:
additional_subsets.difference_update(('ohai',))
ohai_ds = FACT_SUBSETS['ohai'](module, load_on_init=False).populate()
if ohai_ds:
for (k, v) in ohai_ds.items():
setup_options['ohai_%s' % k.replace('-', '_')] = v
facts = ansible_facts(module, additional_subsets)
for (k, v) in facts.items():
setup_options["ansible_%s" % k.replace('-', '_')] = v
setup_result = { 'ansible_facts': {} }
for (k,v) in setup_options.items():
if module.params['filter'] == '*' or fnmatch.fnmatch(k, module.params['filter']):
setup_result['ansible_facts'][k] = v
return setup_result
# Allowed fact subset for gather_subset options and what classes they use
# Note: have to define this at the bottom as it references classes defined earlier in this file
FACT_SUBSETS = dict(
hardware=Hardware,
network=Network,
virtual=Virtual,
ohai=Ohai,
facter=Facter,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
| gpl-3.0 |
pyca/pynacl | docs/vectors/python/argondriver.py | 2 | 4700 | #!/usr/bin/python
#
from __future__ import division, print_function
import argparse
import json
import random
import string
import subprocess
import sys
class argonRunner(object):
GOODCHARS = string.ascii_letters + string.digits
def __init__(self, args):
self.exe = args.exe
self.mnsaltlen = args.mnsaltlen
self.mnpwlen = args.mnpwlen
self.mndgstlen = args.mndgstlen
self.mnmem = args.mnmem
self.mniters = args.mniters
self.mxsaltlen = args.mxsaltlen
self.mxpwlen = args.mxpwlen
self.mxdgstlen = args.mxdgstlen
self.mxmem = args.mxmem
self.mxiters = args.mxiters
self.encoded = args.encoded
self.rng = random.SystemRandom()
self.version = args.version
self.construct = args.construct
self.maxcount = args.n
self.count = 0
def _runOnce(self, passwd, salt, dgst_len, maxmem, iters):
argv = [
self.exe,
salt.encode("ascii"),
"-t",
"{0:2d}".format(iters),
"-m",
"{0:2d}".format(maxmem),
"-l",
"{0:3d}".format(dgst_len),
"-v",
self.version,
]
if self.encoded:
argv.append("-e")
mode = "crypt"
else:
argv.append("-r")
mode = "raw"
if self.construct == "argon2i":
argv.append("-i")
elif self.construct == "argon2d":
argv.append("-d")
elif self.construct == "argon2id":
argv.append("-id")
p = subprocess.Popen(
argv, stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
out, err = p.communicate(passwd.encode("ascii"))
return dict(
passwd=passwd,
salt=salt,
dgst_len=dgst_len,
maxmem=2 ** maxmem,
iters=iters,
mode=mode,
pwhash=out.decode("ascii").rstrip(),
construct=self.construct,
)
def _genSalt(self):
sltln = self.rng.randint(self.mnsaltlen, self.mxsaltlen)
chrs = [self.rng.choice(self.GOODCHARS) for x in range(sltln)]
return "".join(chrs)
def _genPw(self):
pwln = self.rng.randint(self.mnpwlen, self.mxpwlen)
chrs = [self.rng.choice(self.GOODCHARS) for x in range(pwln)]
return "".join(chrs)
def __next__(self):
if self.count >= self.maxcount:
raise StopIteration
psw = self._genPw()
slt = self._genSalt()
mem = self.rng.randint(self.mnmem, self.mxmem)
iters = self.rng.randint(self.mniters, self.mxiters)
dgstln = self.rng.randint(self.mndgstlen, self.mxdgstlen)
rs = self._runOnce(psw, slt, dgstln, mem, iters)
self.count += 1
return rs
def __iter__(self):
return self
next = __next__
if __name__ == "__main__":
p = argparse.ArgumentParser()
p.add_argument("-x", "--executable", dest="exe", required=True)
p.add_argument(
"-c", "--construction", dest="construct", type=str, default="argon2i"
)
p.add_argument("-v", "--version", dest="version", type=str, default="13")
p.add_argument(
"-e",
"--encoded",
dest="encoded",
default=False,
action="store_true",
)
p.add_argument(
"-s", "--min-salt-len", dest="mnsaltlen", type=int, default=8
)
p.add_argument(
"-S", "--max-salt-len", dest="mxsaltlen", type=int, default=8
)
p.add_argument(
"-p", "--min-password-len", dest="mnpwlen", type=int, default=16
)
p.add_argument(
"-P", "--max-password-len", dest="mxpwlen", type=int, default=16
)
p.add_argument(
"-l", "--min-digest-len", dest="mndgstlen", type=int, default=64
)
p.add_argument(
"-L", "--max-digest-len", dest="mxdgstlen", type=int, default=64
)
p.add_argument(
"-m", "--min-memory-exponent", dest="mnmem", type=int, default=16
)
p.add_argument(
"-M", "--max-memory-exponent", dest="mxmem", type=int, default=16
)
p.add_argument(
"-t", "--min-time-opscount", dest="mniters", type=int, default=3
)
p.add_argument(
"-T", "--max-time-opscount", dest="mxiters", type=int, default=3
)
p.add_argument("-n", "--count", dest="n", type=int, default=10)
p.add_argument(
"-w",
"--output",
dest="outfile",
default=sys.stdout,
type=argparse.FileType("w"),
)
args = p.parse_args()
res = [x for x in argonRunner(args)]
json.dump(res, args.outfile, indent=2, separators=(",", ": "))
| apache-2.0 |
rytaft/h-store | third_party/python/boto/mturk/question.py | 11 | 15235 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Question(object):
template = "<Question>%(items)s</Question>"
def __init__(self, identifier, content, answer_spec,
is_required=False, display_name=None):
# copy all of the parameters into object attributes
self.__dict__.update(vars())
del self.self
def get_as_params(self, label='Question'):
return { label : self.get_as_xml() }
def get_as_xml(self):
items = [
SimpleField('QuestionIdentifier', self.identifier),
SimpleField('IsRequired', str(self.is_required).lower()),
self.content,
self.answer_spec,
]
if self.display_name is not None:
items.insert(1, SimpleField('DisplayName', self.display_name))
items = ''.join(item.get_as_xml() for item in items)
return self.template % vars()
try:
from lxml import etree
class ValidatingXML(object):
def validate(self):
import urllib2
schema_src_file = urllib2.urlopen(self.schema_url)
schema_doc = etree.parse(schema_src_file)
schema = etree.XMLSchema(schema_doc)
doc = etree.fromstring(self.get_as_xml())
schema.assertValid(doc)
except ImportError:
class ValidatingXML(object):
def validate(self): pass
class ExternalQuestion(ValidatingXML):
"""
An object for constructing an External Question.
"""
schema_url = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/ExternalQuestion.xsd"
template = '<ExternalQuestion xmlns="%(schema_url)s"><ExternalURL>%%(external_url)s</ExternalURL><FrameHeight>%%(frame_height)s</FrameHeight></ExternalQuestion>' % vars()
def __init__(self, external_url, frame_height):
self.external_url = external_url
self.frame_height = frame_height
def get_as_params(self, label='ExternalQuestion'):
return { label : self.get_as_xml() }
def get_as_xml(self):
return self.template % vars(self)
class XMLTemplate:
def get_as_xml(self):
return self.template % vars(self)
class SimpleField(object, XMLTemplate):
"""
A Simple name/value pair that can be easily rendered as XML.
>>> SimpleField('Text', 'A text string').get_as_xml()
'<Text>A text string</Text>'
"""
template = '<%(field)s>%(value)s</%(field)s>'
def __init__(self, field, value):
self.field = field
self.value = value
class Binary(object, XMLTemplate):
template = """<Binary><MimeType><Type>%(type)s</Type><SubType>%(subtype)s</SubType></MimeType><DataURL>%(url)s</DataURL><AltText>%(alt_text)s</AltText></Binary>"""
def __init__(self, type, subtype, url, alt_text):
self.__dict__.update(vars())
del self.self
class List(list):
"""A bulleted list suitable for OrderedContent or Overview content"""
def get_as_xml(self):
items = ''.join('<ListItem>%s</ListItem>' % item for item in self)
return '<List>%s</List>' % items
class Application(object):
template = "<Application><%(class_)s>%(content)s</%(class_)s></Application>"
parameter_template = "<Name>%(name)s</Name><Value>%(value)s</Value>"
def __init__(self, width, height, **parameters):
self.width = width
self.height = height
self.parameters = parameters
def get_inner_content(self, content):
content.append_field('Width', self.width)
content.append_field('Height', self.height)
for name, value in self.parameters.items():
value = self.parameter_template % vars()
content.append_field('ApplicationParameter', value)
def get_as_xml(self):
content = OrderedContent()
self.get_inner_content(content)
content = content.get_as_xml()
class_ = self.__class__.__name__
return self.template % vars()
class JavaApplet(Application):
def __init__(self, path, filename, *args, **kwargs):
self.path = path
self.filename = filename
super(JavaApplet, self).__init__(*args, **kwargs)
def get_inner_content(self, content):
content = OrderedContent()
content.append_field('AppletPath', self.path)
content.append_field('AppletFilename', self.filename)
super(JavaApplet, self).get_inner_content(content)
class Flash(Application):
def __init__(self, url, *args, **kwargs):
self.url = url
super(Flash, self).__init__(*args, **kwargs)
def get_inner_content(self, content):
content = OrderedContent()
content.append_field('FlashMovieURL', self.url)
super(Flash, self).get_inner_content(content)
class FormattedContent(object, XMLTemplate):
schema_url = 'http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/FormattedContentXHTMLSubset.xsd'
template = '<FormattedContent><![CDATA[%(content)s]]></FormattedContent>'
def __init__(self, content):
self.content = content
class OrderedContent(list):
def append_field(self, field, value):
self.append(SimpleField(field, value))
def get_as_xml(self):
return ''.join(item.get_as_xml() for item in self)
class Overview(OrderedContent):
template = '<Overview>%(content)s</Overview>'
def get_as_params(self, label='Overview'):
return { label : self.get_as_xml() }
def get_as_xml(self):
content = super(Overview, self).get_as_xml()
return self.template % vars()
class QuestionForm(ValidatingXML, list):
"""
From the AMT API docs:
The top-most element of the QuestionForm data structure is a
QuestionForm element. This element contains optional Overview
elements and one or more Question elements. There can be any
number of these two element types listed in any order. The
following example structure has an Overview element and a
Question element followed by a second Overview element and
Question element--all within the same QuestionForm.
::
<QuestionForm xmlns="[the QuestionForm schema URL]">
<Overview>
[...]
</Overview>
<Question>
[...]
</Question>
<Overview>
[...]
</Overview>
<Question>
[...]
</Question>
[...]
</QuestionForm>
QuestionForm is implemented as a list, so to construct a
QuestionForm, simply append Questions and Overviews (with at least
one Question).
"""
schema_url = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2005-10-01/QuestionForm.xsd"
xml_template = """<QuestionForm xmlns="%(schema_url)s">%%(items)s</QuestionForm>""" % vars()
def is_valid(self):
return (
any(isinstance(item, Question) for item in self)
and
all(isinstance(item, (Question, Overview)) for item in self)
)
def get_as_xml(self):
assert self.is_valid(), "QuestionForm contains invalid elements"
items = ''.join(item.get_as_xml() for item in self)
return self.xml_template % vars()
class QuestionContent(OrderedContent):
template = '<QuestionContent>%(content)s</QuestionContent>'
def get_as_xml(self):
content = super(QuestionContent, self).get_as_xml()
return self.template % vars()
class AnswerSpecification(object):
template = '<AnswerSpecification>%(spec)s</AnswerSpecification>'
def __init__(self, spec):
self.spec = spec
def get_as_xml(self):
spec = self.spec.get_as_xml()
return self.template % vars()
class Constraints(OrderedContent):
template = '<Constraints>%(content)s</Constraints>'
def get_as_xml(self):
content = super(Constraints, self).get_as_xml()
return self.template % vars()
class Constraint(object):
def get_attributes(self):
pairs = zip(self.attribute_names, self.attribute_values)
attrs = ' '.join(
'%s="%d"' % (name,value)
for (name,value) in pairs
if value is not None
)
return attrs
def get_as_xml(self):
attrs = self.get_attributes()
return self.template % vars()
class NumericConstraint(Constraint):
attribute_names = 'minValue', 'maxValue'
template = '<IsNumeric %(attrs)s />'
def __init__(self, min_value=None, max_value=None):
self.attribute_values = min_value, max_value
class LengthConstraint(Constraint):
attribute_names = 'minLength', 'maxLength'
template = '<Length %(attrs)s />'
def __init__(self, min_length=None, max_length=None):
self.attribute_values = min_length, max_length
class RegExConstraint(Constraint):
attribute_names = 'regex', 'errorText', 'flags'
template = '<AnswerFormatRegex %(attrs)s />'
def __init__(self, pattern, error_text=None, flags=None):
self.attribute_values = pattern, error_text, flags
class NumberOfLinesSuggestion(object):
template = '<NumberOfLinesSuggestion>%(num_lines)s</NumberOfLinesSuggestion>'
def __init__(self, num_lines=1):
self.num_lines = num_lines
def get_as_xml(self):
num_lines = self.num_lines
return self.template % vars()
class FreeTextAnswer(object):
template = '<FreeTextAnswer>%(items)s</FreeTextAnswer>'
def __init__(self, default=None, constraints=None, num_lines=None):
self.default = default
if constraints is None: constraints = Constraints()
self.constraints = Constraints(constraints)
self.num_lines = num_lines
def get_as_xml(self):
constraints = Constraints()
items = [constraints]
if self.default:
items.append(SimpleField('DefaultText', self.default))
if self.num_lines:
items.append(NumberOfLinesSuggestion(self.num_lines))
items = ''.join(item.get_as_xml() for item in items)
return self.template % vars()
class FileUploadAnswer(object):
template = """<FileUploadAnswer><MinFileSizeInBytes>%(min_bytes)d</MinFileSizeInBytes><MaxFileSizeInBytes>%(max_bytes)d</MaxFileSizeInBytes></FileUploadAnswer>"""
def __init__(self, min_bytes, max_bytes):
assert 0 <= min_bytes <= max_bytes <= 2*10**9
self.min_bytes = min_bytes
self.max_bytes = max_bytes
def get_as_xml(self):
return self.template % vars(self)
class SelectionAnswer(object):
"""
A class to generate SelectionAnswer XML data structures.
Does not yet implement Binary selection options.
"""
SELECTIONANSWER_XML_TEMPLATE = """<SelectionAnswer>%s%s<Selections>%s</Selections></SelectionAnswer>""" # % (count_xml, style_xml, selections_xml)
SELECTION_XML_TEMPLATE = """<Selection><SelectionIdentifier>%s</SelectionIdentifier>%s</Selection>""" # (identifier, value_xml)
SELECTION_VALUE_XML_TEMPLATE = """<%s>%s</%s>""" # (type, value, type)
STYLE_XML_TEMPLATE = """<StyleSuggestion>%s</StyleSuggestion>""" # (style)
MIN_SELECTION_COUNT_XML_TEMPLATE = """<MinSelectionCount>%s</MinSelectionCount>""" # count
MAX_SELECTION_COUNT_XML_TEMPLATE = """<MaxSelectionCount>%s</MaxSelectionCount>""" # count
ACCEPTED_STYLES = ['radiobutton', 'dropdown', 'checkbox', 'list', 'combobox', 'multichooser']
OTHER_SELECTION_ELEMENT_NAME = 'OtherSelection'
def __init__(self, min=1, max=1, style=None, selections=None, type='text', other=False):
if style is not None:
if style in SelectionAnswer.ACCEPTED_STYLES:
self.style_suggestion = style
else:
raise ValueError("style '%s' not recognized; should be one of %s" % (style, ', '.join(SelectionAnswer.ACCEPTED_STYLES)))
else:
self.style_suggestion = None
if selections is None:
raise ValueError("SelectionAnswer.__init__(): selections must be a non-empty list of (content, identifier) tuples")
else:
self.selections = selections
self.min_selections = min
self.max_selections = max
assert len(selections) >= self.min_selections, "# of selections is less than minimum of %d" % self.min_selections
#assert len(selections) <= self.max_selections, "# of selections exceeds maximum of %d" % self.max_selections
self.type = type
self.other = other
def get_as_xml(self):
if self.type == 'text':
TYPE_TAG = "Text"
elif self.type == 'binary':
TYPE_TAG = "Binary"
else:
raise ValueError("illegal type: %s; must be either 'text' or 'binary'" % str(self.type))
# build list of <Selection> elements
selections_xml = ""
for tpl in self.selections:
value_xml = SelectionAnswer.SELECTION_VALUE_XML_TEMPLATE % (TYPE_TAG, tpl[0], TYPE_TAG)
selection_xml = SelectionAnswer.SELECTION_XML_TEMPLATE % (tpl[1], value_xml)
selections_xml += selection_xml
if self.other:
# add OtherSelection element as xml if available
if hasattr(self.other, 'get_as_xml'):
assert type(self.other) == FreeTextAnswer, 'OtherSelection can only be a FreeTextAnswer'
selections_xml += self.other.get_as_xml().replace('FreeTextAnswer', 'OtherSelection')
else:
selections_xml += "<OtherSelection />"
if self.style_suggestion is not None:
style_xml = SelectionAnswer.STYLE_XML_TEMPLATE % self.style_suggestion
else:
style_xml = ""
if self.style_suggestion != 'radiobutton':
count_xml = SelectionAnswer.MIN_SELECTION_COUNT_XML_TEMPLATE %self.min_selections
count_xml += SelectionAnswer.MAX_SELECTION_COUNT_XML_TEMPLATE %self.max_selections
else:
count_xml = ""
ret = SelectionAnswer.SELECTIONANSWER_XML_TEMPLATE % (count_xml, style_xml, selections_xml)
# return XML
return ret
| gpl-3.0 |
vrv/tensorflow | tensorflow/python/kernel_tests/edit_distance_op_test.py | 139 | 8145 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.edit_distance_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def ConstantOf(x):
x = np.asarray(x)
# Convert to int64 if it's not a string or unicode
if x.dtype.char not in "SU":
x = np.asarray(x, dtype=np.int64)
return constant_op.constant(x)
class EditDistanceTest(test.TestCase):
def _testEditDistanceST(self,
hypothesis_st,
truth_st,
normalize,
expected_output,
expected_shape,
expected_err_re=None):
edit_distance = array_ops.edit_distance(
hypothesis=hypothesis_st, truth=truth_st, normalize=normalize)
if expected_err_re is None:
self.assertEqual(edit_distance.get_shape(), expected_shape)
output = edit_distance.eval()
self.assertAllClose(output, expected_output)
else:
with self.assertRaisesOpError(expected_err_re):
edit_distance.eval()
def _testEditDistance(self,
hypothesis,
truth,
normalize,
expected_output,
expected_err_re=None):
# Shape inference figures out the shape from the shape variables
# Explicit tuple() needed since zip returns an iterator in Python 3.
expected_shape = [
max(h, t) for h, t in tuple(zip(hypothesis[2], truth[2]))[:-1]
]
# SparseTensorValue inputs.
with ops.Graph().as_default() as g, self.test_session(g):
# hypothesis and truth are (index, value, shape) tuples
self._testEditDistanceST(
hypothesis_st=sparse_tensor.SparseTensorValue(
*[ConstantOf(x) for x in hypothesis]),
truth_st=sparse_tensor.SparseTensorValue(
*[ConstantOf(x) for x in truth]),
normalize=normalize,
expected_output=expected_output,
expected_shape=expected_shape,
expected_err_re=expected_err_re)
# SparseTensor inputs.
with ops.Graph().as_default() as g, self.test_session(g):
# hypothesis and truth are (index, value, shape) tuples
self._testEditDistanceST(
hypothesis_st=sparse_tensor.SparseTensor(
*[ConstantOf(x) for x in hypothesis]),
truth_st=sparse_tensor.SparseTensor(*[ConstantOf(x) for x in truth]),
normalize=normalize,
expected_output=expected_output,
expected_shape=expected_shape,
expected_err_re=expected_err_re)
def testEditDistanceNormalized(self):
hypothesis_indices = [[0, 0], [0, 1], [1, 0], [1, 1]]
hypothesis_values = [0, 1, 1, -1]
hypothesis_shape = [2, 2]
truth_indices = [[0, 0], [1, 0], [1, 1]]
truth_values = [0, 1, 1]
truth_shape = [2, 2]
expected_output = [1.0, 0.5]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceUnnormalized(self):
hypothesis_indices = [[0, 0], [1, 0], [1, 1]]
hypothesis_values = [10, 10, 11]
hypothesis_shape = [2, 2]
truth_indices = [[0, 0], [0, 1], [1, 0], [1, 1]]
truth_values = [1, 2, 1, -1]
truth_shape = [2, 3]
expected_output = [2.0, 2.0]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=False,
expected_output=expected_output)
def testEditDistanceProperDistance(self):
# In this case, the values are individual characters stored in the
# SparseTensor (type DT_STRING)
hypothesis_indices = ([[0, i] for i, _ in enumerate("algorithm")] +
[[1, i] for i, _ in enumerate("altruistic")])
hypothesis_values = [x for x in "algorithm"] + [x for x in "altruistic"]
hypothesis_shape = [2, 11]
truth_indices = ([[0, i] for i, _ in enumerate("altruistic")] +
[[1, i] for i, _ in enumerate("algorithm")])
truth_values = [x for x in "altruistic"] + [x for x in "algorithm"]
truth_shape = [2, 11]
expected_unnormalized = [6.0, 6.0]
expected_normalized = [6.0 / len("altruistic"), 6.0 / len("algorithm")]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=False,
expected_output=expected_unnormalized)
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_normalized)
def testEditDistance3D(self):
hypothesis_indices = [[0, 0, 0], [1, 0, 0]]
hypothesis_values = [0, 1]
hypothesis_shape = [2, 1, 1]
truth_indices = [[0, 1, 0], [1, 0, 0], [1, 1, 0]]
truth_values = [0, 1, 1]
truth_shape = [2, 2, 1]
expected_output = [
[np.inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.0, 1.0]
] # (1,0): match, (1,1): no hypothesis
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceZeroLengthHypothesis(self):
hypothesis_indices = np.empty((0, 2), dtype=np.int64)
hypothesis_values = []
hypothesis_shape = [1, 0]
truth_indices = [[0, 0]]
truth_values = [0]
truth_shape = [1, 1]
expected_output = [1.0]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceZeroLengthTruth(self):
hypothesis_indices = [[0, 0]]
hypothesis_values = [0]
hypothesis_shape = [1, 1]
truth_indices = np.empty((0, 2), dtype=np.int64)
truth_values = []
truth_shape = [1, 0]
expected_output = [np.inf] # Normalized, loss is 1/0 = inf
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceZeroLengthHypothesisAndTruth(self):
hypothesis_indices = np.empty((0, 2), dtype=np.int64)
hypothesis_values = []
hypothesis_shape = [1, 0]
truth_indices = np.empty((0, 2), dtype=np.int64)
truth_values = []
truth_shape = [1, 0]
expected_output = [0] # Normalized is 0 because of exact match
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
if __name__ == "__main__":
test.main()
| apache-2.0 |
hydrospanner/DForurm | DForurm/env/Lib/site-packages/django/contrib/gis/gdal/field.py | 112 | 6735 | from ctypes import byref, c_int
from datetime import date, datetime, time
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils.encoding import force_text
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr__api_8h.html
#
# The OGR_Fld_* routines are relevant here.
class Field(GDALBase):
"""
This class wraps an OGR Field, and needs to be instantiated
from a Feature object.
"""
def __init__(self, feat, index):
"""
Initializes on the feature object and the integer index of
the field within the feature.
"""
# Setting the feature pointer and index.
self._feat = feat
self._index = index
# Getting the pointer for this field.
fld_ptr = capi.get_feat_field_defn(feat.ptr, index)
if not fld_ptr:
raise GDALException('Cannot create OGR Field, invalid pointer given.')
self.ptr = fld_ptr
# Setting the class depending upon the OGR Field Type (OFT)
self.__class__ = OGRFieldTypes[self.type]
# OFTReal with no precision should be an OFTInteger.
if isinstance(self, OFTReal) and self.precision == 0:
self.__class__ = OFTInteger
self._double = True
def __str__(self):
"Returns the string representation of the Field."
return str(self.value).strip()
# #### Field Methods ####
def as_double(self):
"Retrieves the Field's value as a double (float)."
return capi.get_field_as_double(self._feat.ptr, self._index)
def as_int(self, is_64=False):
"Retrieves the Field's value as an integer."
if is_64:
return capi.get_field_as_integer64(self._feat.ptr, self._index)
else:
return capi.get_field_as_integer(self._feat.ptr, self._index)
def as_string(self):
"Retrieves the Field's value as a string."
string = capi.get_field_as_string(self._feat.ptr, self._index)
return force_text(string, encoding=self._feat.encoding, strings_only=True)
def as_datetime(self):
"Retrieves the Field's value as a tuple of date & time components."
yy, mm, dd, hh, mn, ss, tz = [c_int() for i in range(7)]
status = capi.get_field_as_datetime(
self._feat.ptr, self._index, byref(yy), byref(mm), byref(dd),
byref(hh), byref(mn), byref(ss), byref(tz))
if status:
return (yy, mm, dd, hh, mn, ss, tz)
else:
raise GDALException('Unable to retrieve date & time information from the field.')
# #### Field Properties ####
@property
def name(self):
"Returns the name of this Field."
name = capi.get_field_name(self.ptr)
return force_text(name, encoding=self._feat.encoding, strings_only=True)
@property
def precision(self):
"Returns the precision of this Field."
return capi.get_field_precision(self.ptr)
@property
def type(self):
"Returns the OGR type of this Field."
return capi.get_field_type(self.ptr)
@property
def type_name(self):
"Return the OGR field type name for this Field."
return capi.get_field_type_name(self.type)
@property
def value(self):
"Returns the value of this Field."
# Default is to get the field as a string.
return self.as_string()
@property
def width(self):
"Returns the width of this Field."
return capi.get_field_width(self.ptr)
# ### The Field sub-classes for each OGR Field type. ###
class OFTInteger(Field):
_double = False
_bit64 = False
@property
def value(self):
"Returns an integer contained in this field."
if self._double:
# If this is really from an OFTReal field with no precision,
# read as a double and cast as Python int (to prevent overflow).
return int(self.as_double())
else:
return self.as_int(self._bit64)
@property
def type(self):
"""
GDAL uses OFTReals to represent OFTIntegers in created
shapefiles -- forcing the type here since the underlying field
type may actually be OFTReal.
"""
return 0
class OFTReal(Field):
@property
def value(self):
"Returns a float contained in this field."
return self.as_double()
# String & Binary fields, just subclasses
class OFTString(Field):
pass
class OFTWideString(Field):
pass
class OFTBinary(Field):
pass
# OFTDate, OFTTime, OFTDateTime fields.
class OFTDate(Field):
@property
def value(self):
"Returns a Python `date` object for the OFTDate field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return date(yy.value, mm.value, dd.value)
except (ValueError, GDALException):
return None
class OFTDateTime(Field):
@property
def value(self):
"Returns a Python `datetime` object for this OFTDateTime field."
# TODO: Adapt timezone information.
# See http://lists.osgeo.org/pipermail/gdal-dev/2006-February/007990.html
# The `tz` variable has values of: 0=unknown, 1=localtime (ambiguous),
# 100=GMT, 104=GMT+1, 80=GMT-5, etc.
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return datetime(yy.value, mm.value, dd.value, hh.value, mn.value, ss.value)
except (ValueError, GDALException):
return None
class OFTTime(Field):
@property
def value(self):
"Returns a Python `time` object for this OFTTime field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return time(hh.value, mn.value, ss.value)
except (ValueError, GDALException):
return None
class OFTInteger64(OFTInteger):
_bit64 = True
# List fields are also just subclasses
class OFTIntegerList(Field):
pass
class OFTRealList(Field):
pass
class OFTStringList(Field):
pass
class OFTWideStringList(Field):
pass
class OFTInteger64List(Field):
pass
# Class mapping dictionary for OFT Types and reverse mapping.
OGRFieldTypes = {
0: OFTInteger,
1: OFTIntegerList,
2: OFTReal,
3: OFTRealList,
4: OFTString,
5: OFTStringList,
6: OFTWideString,
7: OFTWideStringList,
8: OFTBinary,
9: OFTDate,
10: OFTTime,
11: OFTDateTime,
# New 64-bit integer types in GDAL 2
12: OFTInteger64,
13: OFTInteger64List,
}
ROGRFieldTypes = {cls: num for num, cls in OGRFieldTypes.items()}
| mit |
skevy/django | tests/modeltests/reserved_names/tests.py | 92 | 1671 | import datetime
from django.test import TestCase
from models import Thing
class ReservedNameTests(TestCase):
def generate(self):
day1 = datetime.date(2005, 1, 1)
t = Thing.objects.create(when='a', join='b', like='c', drop='d',
alter='e', having='f', where=day1, has_hyphen='h')
day2 = datetime.date(2006, 2, 2)
u = Thing.objects.create(when='h', join='i', like='j', drop='k',
alter='l', having='m', where=day2)
def test_simple(self):
day1 = datetime.date(2005, 1, 1)
t = Thing.objects.create(when='a', join='b', like='c', drop='d',
alter='e', having='f', where=day1, has_hyphen='h')
self.assertEqual(t.when, 'a')
day2 = datetime.date(2006, 2, 2)
u = Thing.objects.create(when='h', join='i', like='j', drop='k',
alter='l', having='m', where=day2)
self.assertEqual(u.when, 'h')
def test_order_by(self):
self.generate()
things = [t.when for t in Thing.objects.order_by('when')]
self.assertEqual(things, ['a', 'h'])
def test_fields(self):
self.generate()
v = Thing.objects.get(pk='a')
self.assertEqual(v.join, 'b')
self.assertEqual(v.where, datetime.date(year=2005, month=1, day=1))
def test_dates(self):
self.generate()
resp = Thing.objects.dates('where', 'year')
self.assertEqual(list(resp), [
datetime.datetime(2005, 1, 1, 0, 0),
datetime.datetime(2006, 1, 1, 0, 0),
])
def test_month_filter(self):
self.generate()
self.assertEqual(Thing.objects.filter(where__month=1)[0].when, 'a')
| bsd-3-clause |
aspiers/gertty | gertty/search/parser.py | 1 | 14100 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import re
import ply.yacc as yacc
from sqlalchemy.sql.expression import and_, or_, not_, select, func
import gertty.db
import gertty.search
from tokenizer import tokens # NOQA
def SearchParser():
precedence = ( # NOQA
('left', 'NOT', 'NEG'),
)
def p_terms(p):
'''expression : list_expr
| paren_expr
| boolean_expr
| negative_expr
| term'''
p[0] = p[1]
def p_list_expr(p):
'''list_expr : expression expression'''
p[0] = and_(p[1], p[2])
def p_paren_expr(p):
'''paren_expr : LPAREN expression RPAREN'''
p[0] = p[2]
def p_boolean_expr(p):
'''boolean_expr : expression AND expression
| expression OR expression'''
if p[2].lower() == 'and':
p[0] = and_(p[1], p[3])
elif p[2].lower() == 'or':
p[0] = or_(p[1], p[3])
else:
raise gertty.search.SearchSyntaxError("Boolean %s not recognized" % p[2])
def p_negative_expr(p):
'''negative_expr : NOT expression
| NEG expression'''
p[0] = not_(p[2])
def p_term(p):
'''term : age_term
| change_term
| owner_term
| reviewer_term
| commit_term
| project_term
| project_key_term
| branch_term
| topic_term
| ref_term
| label_term
| message_term
| comment_term
| has_term
| is_term
| status_term
| file_term
| limit_term
| op_term'''
p[0] = p[1]
def p_string(p):
'''string : SSTRING
| DSTRING
| USTRING'''
p[0] = p[1]
def p_age_term(p):
'''age_term : OP_AGE NUMBER string'''
now = datetime.datetime.utcnow()
delta = p[2]
unit = p[3]
if unit in ['seconds', 'second', 'sec', 's']:
pass
elif unit in ['minutes', 'minute', 'min', 'm']:
delta = delta * 60
elif unit in ['hours', 'hour', 'hr', 'h']:
delta = delta * 60 * 60
elif unit in ['days', 'day', 'd']:
delta = delta * 60 * 60 * 24
elif unit in ['weeks', 'week', 'w']:
delta = delta * 60 * 60 * 24 * 7
elif unit in ['months', 'month', 'mon']:
delta = delta * 60 * 60 * 24 * 30
elif unit in ['years', 'year', 'y']:
delta = delta * 60 * 60 * 24 * 365
p[0] = gertty.db.change_table.c.updated < (now-datetime.timedelta(seconds=delta))
def p_change_term(p):
'''change_term : OP_CHANGE CHANGE_ID
| OP_CHANGE NUMBER'''
if type(p[2]) == int:
p[0] = gertty.db.change_table.c.number == p[2]
else:
p[0] = gertty.db.change_table.c.change_id == p[2]
def p_owner_term(p):
'''owner_term : OP_OWNER string'''
if p[2] == 'self':
username = p.parser.username
p[0] = gertty.db.account_table.c.username == username
else:
p[0] = or_(gertty.db.account_table.c.username == p[2],
gertty.db.account_table.c.email == p[2],
gertty.db.account_table.c.name == p[2])
def p_reviewer_term(p):
'''reviewer_term : OP_REVIEWER string
| OP_REVIEWER NUMBER'''
filters = []
filters.append(gertty.db.approval_table.c.change_key == gertty.db.change_table.c.key)
filters.append(gertty.db.approval_table.c.account_key == gertty.db.account_table.c.key)
try:
number = int(p[2])
except:
number = None
if number is not None:
filters.append(gertty.db.account_table.c.id == number)
elif p[2] == 'self':
username = p.parser.username
filters.append(gertty.db.account_table.c.username == username)
else:
filters.append(or_(gertty.db.account_table.c.username == p[2],
gertty.db.account_table.c.email == p[2],
gertty.db.account_table.c.name == p[2]))
s = select([gertty.db.change_table.c.key], correlate=False).where(and_(*filters))
p[0] = gertty.db.change_table.c.key.in_(s)
def p_commit_term(p):
'''commit_term : OP_COMMIT string'''
filters = []
filters.append(gertty.db.revision_table.c.change_key == gertty.db.change_table.c.key)
filters.append(gertty.db.revision_table.c.commit == p[2])
s = select([gertty.db.change_table.c.key], correlate=False).where(and_(*filters))
p[0] = gertty.db.change_table.c.key.in_(s)
def p_project_term(p):
'''project_term : OP_PROJECT string'''
if p[2].startswith('^'):
p[0] = func.matches(p[2], gertty.db.project_table.c.name)
else:
p[0] = gertty.db.project_table.c.name == p[2]
def p_project_key_term(p):
'''project_key_term : OP_PROJECT_KEY NUMBER'''
p[0] = gertty.db.change_table.c.project_key == p[2]
def p_branch_term(p):
'''branch_term : OP_BRANCH string'''
if p[2].startswith('^'):
p[0] = func.matches(p[2], gertty.db.change_table.c.branch)
else:
p[0] = gertty.db.change_table.c.branch == p[2]
def p_topic_term(p):
'''topic_term : OP_TOPIC string'''
if p[2].startswith('^'):
p[0] = func.matches(p[2], gertty.db.change_table.c.topic)
else:
p[0] = gertty.db.change_table.c.topic == p[2]
def p_ref_term(p):
'''ref_term : OP_REF string'''
if p[2].startswith('^'):
p[0] = func.matches(p[2], 'refs/heads/'+gertty.db.change_table.c.branch)
else:
p[0] = gertty.db.change_table.c.branch == p[2][len('refs/heads/'):]
label_re = re.compile(r'(?P<label>[a-zA-Z0-9_-]+([a-zA-Z]|((?<![-+])[0-9])))'
r'(?P<operator>[<>]?=?)(?P<value>[-+]?[0-9]+)'
r'($|,(user=)?(?P<user>\S+))')
def p_label_term(p):
'''label_term : OP_LABEL string'''
args = label_re.match(p[2])
label = args.group('label')
op = args.group('operator') or '='
value = int(args.group('value'))
user = args.group('user')
filters = []
filters.append(gertty.db.approval_table.c.change_key == gertty.db.change_table.c.key)
filters.append(gertty.db.approval_table.c.category == label)
if op == '=':
filters.append(gertty.db.approval_table.c.value == value)
elif op == '>=':
filters.append(gertty.db.approval_table.c.value >= value)
elif op == '<=':
filters.append(gertty.db.approval_table.c.value <= value)
if user is not None:
filters.append(gertty.db.approval_table.c.account_key == gertty.db.account_table.c.key)
if user == 'self':
filters.append(gertty.db.account_table.c.username == p.parser.username)
else:
filters.append(
or_(gertty.db.account_table.c.username == user,
gertty.db.account_table.c.email == user,
gertty.db.account_table.c.name == user))
s = select([gertty.db.change_table.c.key], correlate=False).where(and_(*filters))
p[0] = gertty.db.change_table.c.key.in_(s)
def p_message_term(p):
'''message_term : OP_MESSAGE string'''
filters = []
filters.append(gertty.db.revision_table.c.change_key == gertty.db.change_table.c.key)
filters.append(gertty.db.revision_table.c.message.like('%%%s%%' % p[2]))
s = select([gertty.db.change_table.c.key], correlate=False).where(and_(*filters))
p[0] = gertty.db.change_table.c.key.in_(s)
def p_comment_term(p):
'''comment_term : OP_COMMENT string'''
filters = []
filters.append(gertty.db.revision_table.c.change_key == gertty.db.change_table.c.key)
filters.append(gertty.db.revision_table.c.message == p[2])
revision_select = select([gertty.db.change_table.c.key], correlate=False).where(and_(*filters))
filters = []
filters.append(gertty.db.revision_table.c.change_key == gertty.db.change_table.c.key)
filters.append(gertty.db.comment_table.c.revision_key == gertty.db.revision_table.c.key)
filters.append(gertty.db.comment_table.c.message == p[2])
comment_select = select([gertty.db.change_table.c.key], correlate=False).where(and_(*filters))
p[0] = or_(gertty.db.change_table.c.key.in_(comment_select),
gertty.db.change_table.c.key.in_(revision_select))
def p_has_term(p):
'''has_term : OP_HAS string'''
#TODO: implement star
if p[2] == 'draft':
filters = []
filters.append(gertty.db.revision_table.c.change_key == gertty.db.change_table.c.key)
filters.append(gertty.db.message_table.c.revision_key == gertty.db.revision_table.c.key)
filters.append(gertty.db.message_table.c.draft == True)
s = select([gertty.db.change_table.c.key], correlate=False).where(and_(*filters))
p[0] = gertty.db.change_table.c.key.in_(s)
else:
raise gertty.search.SearchSyntaxError('Syntax error: has:%s is not supported' % p[2])
def p_is_term(p):
'''is_term : OP_IS string'''
#TODO: implement draft
username = p.parser.username
if p[2] == 'reviewed':
filters = []
filters.append(gertty.db.approval_table.c.change_key == gertty.db.change_table.c.key)
filters.append(gertty.db.approval_table.c.value != 0)
s = select([gertty.db.change_table.c.key], correlate=False).where(and_(*filters))
p[0] = gertty.db.change_table.c.key.in_(s)
elif p[2] == 'open':
p[0] = gertty.db.change_table.c.status.notin_(['MERGED', 'ABANDONED'])
elif p[2] == 'closed':
p[0] = gertty.db.change_table.c.status.in_(['MERGED', 'ABANDONED'])
elif p[2] == 'submitted':
p[0] = gertty.db.change_table.c.status == 'SUBMITTED'
elif p[2] == 'merged':
p[0] = gertty.db.change_table.c.status == 'MERGED'
elif p[2] == 'abandoned':
p[0] = gertty.db.change_table.c.status == 'ABANDONED'
elif p[2] == 'owner':
p[0] = gertty.db.account_table.c.username == username
elif p[2] == 'starred':
p[0] = gertty.db.change_table.c.starred == True
elif p[2] == 'held':
# A gertty extension
p[0] = gertty.db.change_table.c.held == True
elif p[2] == 'reviewer':
filters = []
filters.append(gertty.db.approval_table.c.change_key == gertty.db.change_table.c.key)
filters.append(gertty.db.approval_table.c.account_key == gertty.db.account_table.c.key)
filters.append(gertty.db.account_table.c.username == username)
s = select([gertty.db.change_table.c.key], correlate=False).where(and_(*filters))
p[0] = gertty.db.change_table.c.key.in_(s)
elif p[2] == 'watched':
p[0] = gertty.db.project_table.c.subscribed == True
else:
raise gertty.search.SearchSyntaxError('Syntax error: is:%s is not supported' % p[2])
def p_file_term(p):
'''file_term : OP_FILE string'''
if p[2].startswith('^'):
p[0] = and_(or_(func.matches(p[2], gertty.db.file_table.c.path),
func.matches(p[2], gertty.db.file_table.c.old_path)),
gertty.db.file_table.c.status is not None)
else:
p[0] = and_(or_(gertty.db.file_table.c.path == p[2],
gertty.db.file_table.c.old_path == p[2]),
gertty.db.file_table.c.status is not None)
def p_status_term(p):
'''status_term : OP_STATUS string'''
if p[2] == 'open':
p[0] = gertty.db.change_table.c.status.notin_(['MERGED', 'ABANDONED'])
elif p[2] == 'closed':
p[0] = gertty.db.change_table.c.status.in_(['MERGED', 'ABANDONED'])
else:
p[0] = gertty.db.change_table.c.status == p[2].upper()
def p_limit_term(p):
'''limit_term : OP_LIMIT NUMBER'''
# TODO: Implement this. The sqlalchemy limit call needs to be
# applied to the query operation and so can not be returned as
# part of the production here. The information would need to
# be returned out-of-band. In the mean time, since limits are
# not as important in gertty, make this a no-op for now so
# that it does not produce a syntax error.
p[0] = (True == True)
def p_op_term(p):
'op_term : OP'
raise SyntaxError()
def p_error(p):
if p:
raise gertty.search.SearchSyntaxError('Syntax error at "%s" in search string "%s" (col %s)' % (
p.lexer.lexdata[p.lexpos:], p.lexer.lexdata, p.lexpos))
else:
raise gertty.search.SearchSyntaxError('Syntax error: EOF in search string')
return yacc.yacc(debug=0, write_tables=0)
| apache-2.0 |
stackforge/python-solumclient | solumclient/common/base.py | 1 | 3402 | # Copyright 2013 - Noorul Islam K M
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves.urllib import parse as urlparse
from solumclient.common.apiclient import base
from solumclient.common.apiclient import exceptions
class FindMixin(object):
"""Just `findone()`/`findall()` methods.
Note: this is largely a copy of apiclient.base.ManagerWithFind
but without the inheritance and the find() method which
does not clash with the Manager find() - now called findone().
"""
def findone(self, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
matches = self.findall(**kwargs)
num_matches = len(matches)
if num_matches == 0:
msg = "No %s matching %s." % (self.resource_class.__name__, kwargs)
raise exceptions.NotFound(msg)
elif num_matches > 1:
raise exceptions.NoUniqueMatch()
else:
return matches[0]
def findall(self, **kwargs):
"""Find all items with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
found = []
searches = kwargs.items()
for obj in self.list(app_id=kwargs.get('app_id')):
try:
if all(getattr(obj, attr) == value
for (attr, value) in searches):
found.append(obj)
except AttributeError:
continue
return found
class CrudManager(base.CrudManager):
def list(self, base_url=None, **kwargs):
"""List the collection.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % urlparse.urlencode(kwargs) if kwargs else '',
})
def get(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._get(
self.build_url(**kwargs))
def create(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._post(
self.build_url(**kwargs), kwargs)
def update(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
params = kwargs.copy()
params.pop('%s_id' % self.key)
return self._put(
self.build_url(**kwargs), params)
def patch(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
params = kwargs.copy()
params.pop('%s_id' % self.key)
params.pop('base_url')
return self._patch(
self.build_url(**kwargs), params)
| apache-2.0 |
vgrem/Office365-REST-Python-Client | office365/runtime/client_object.py | 1 | 6459 | import copy
from office365.runtime.client_value import ClientValue
from office365.runtime.odata.json_light_format import JsonLightFormat
from office365.runtime.odata.odata_query_options import QueryOptions
class ClientObject(object):
def __init__(self, context, resource_path=None, parent_collection=None, namespace=None):
"""
Base client object which define named properties and relationships of an entity
:type parent_collection: office365.runtime.client_object_collection.ClientObjectCollection or None
:type resource_path: office365.runtime.resource_path.ResourcePath or None
:type context: office365.runtime.client_runtime_context.ClientRuntimeContext
:type namespace: str
"""
self._properties = {}
self._changed_properties = []
self._entity_type_name = None
self._query_options = QueryOptions()
self._parent_collection = parent_collection
self._context = context
self._resource_path = resource_path
self._namespace = namespace
def clear(self):
self._changed_properties = []
def execute_query(self):
self.context.execute_query()
return self
def execute_query_retry(self, max_retry=5, timeout_secs=5, success_callback=None, failure_callback=None):
self.context.execute_query_retry(max_retry=max_retry,
timeout_secs=timeout_secs,
success_callback=success_callback,
failure_callback=failure_callback)
return self
def build_request(self):
return self.context.build_request()
def get(self):
self.context.load(self)
return self
def is_property_available(self, name):
"""Returns a Boolean value that indicates whether the specified property has been retrieved or set.
:param str name: A Property name
"""
if name in self.properties:
return True
return False
def expand(self, names):
"""
:type names: list[str]
"""
self.query_options.expand = names
return self
def select(self, names):
"""
:param list[str] names:
:return:
"""
self.query_options.select = names
return self
def remove_from_parent_collection(self):
if self._parent_collection is None:
return
self._parent_collection.remove_child(self)
def get_property(self, name):
"""
Gets property value
:param str name: property name
"""
normalized_name = name[0].lower() + name[1:]
return getattr(self, normalized_name, self._properties.get(name, None))
def set_property(self, name, value, persist_changes=True):
"""Sets property value
:param str name: Property name
:param any value: Property value
:param bool persist_changes: Persist changes
"""
if persist_changes:
self._changed_properties.append(name)
prop_type = self.get_property(name)
if isinstance(prop_type, ClientObject) or isinstance(prop_type, ClientValue) and value is not None:
if isinstance(value, list):
[prop_type.set_property(i, v, persist_changes) for i, v in enumerate(value)]
self._properties[name] = prop_type
elif isinstance(value, dict):
[prop_type.set_property(k, v, persist_changes) for k, v in value.items()]
self._properties[name] = prop_type
else:
self._properties[name] = value
else:
self._properties[name] = value
return self
def ensure_property(self, name, action=None):
"""
Ensures if property is loaded
:type action: () -> None
:type name: str
"""
return self.ensure_properties([name], action)
def ensure_properties(self, names, action):
"""
Ensure if list of properties are loaded
:type action: () -> None
:type names: str or list[str]
"""
def _exec_action():
if callable(action):
action()
names_to_include = [n for n in names if not self.is_property_available(n)]
if len(names_to_include) > 0:
qry = self.context.load(self, names_to_include)
self.context.execute_after_query(qry, _exec_action)
else:
_exec_action()
return self
def clone_object(self):
result = copy.deepcopy(self)
result._context = self.context
return result
@property
def entity_type_name(self):
if self._entity_type_name is None:
if self._namespace is None:
self._entity_type_name = type(self).__name__
else:
self._entity_type_name = ".".join([self._namespace, type(self).__name__])
return self._entity_type_name
@property
def resource_url(self):
"""Generate resource Url
:rtype: str or None
"""
if self.resource_path:
url = self.context.service_root_url() + self.resource_path.to_url()
if not self.query_options.is_empty:
url = url + "?" + self._query_options.to_url()
return url
return None
@property
def context(self):
return self._context
@property
def resource_path(self):
return self._resource_path
@property
def query_options(self):
return self._query_options
@property
def properties(self):
return self._properties
@property
def parent_collection(self):
return self._parent_collection
def to_json(self, json_format=None):
"""
:type json_format: office365.runtime.odata.odata_json_format.ODataJsonFormat or None
"""
json = dict((k, self.get_property(k)) for k in self.properties if k in self._changed_properties)
for k, v in json.items():
if isinstance(v, ClientObject) or isinstance(v, ClientValue):
json[k] = v.to_json(json_format)
if isinstance(json_format, JsonLightFormat) and json_format.is_verbose and self.entity_type_name is not None:
json[json_format.metadata_type_tag_name] = {'type': self.entity_type_name}
return json
| mit |
mtrbean/scipy | scipy/stats/mstats_basic.py | 8 | 68598 | """
An extension of scipy.stats.stats to support masked arrays
"""
# Original author (2007): Pierre GF Gerard-Marchant
# TODO : f_value_wilks_lambda looks botched... what are dfnum & dfden for ?
# TODO : ttest_rel looks botched: what are x1,x2,v1,v2 for ?
# TODO : reimplement ksonesamp
from __future__ import division, print_function, absolute_import
__all__ = ['argstoarray',
'betai',
'chisquare','count_tied_groups',
'describe',
'f_oneway','f_value_wilks_lambda','find_repeats','friedmanchisquare',
'kendalltau','kendalltau_seasonal','kruskal','kruskalwallis',
'ks_twosamp','ks_2samp','kurtosis','kurtosistest',
'linregress',
'mannwhitneyu', 'meppf','mode','moment','mquantiles','msign',
'normaltest',
'obrientransform',
'pearsonr','plotting_positions','pointbiserialr',
'rankdata',
'scoreatpercentile','sem',
'sen_seasonal_slopes','signaltonoise','skew','skewtest','spearmanr',
'theilslopes','threshold','tmax','tmean','tmin','trim','trimboth',
'trimtail','trima','trimr','trimmed_mean','trimmed_std',
'trimmed_stde','trimmed_var','tsem','ttest_1samp','ttest_onesamp',
'ttest_ind','ttest_rel','tvar',
'variation',
'winsorize',
'zmap', 'zscore'
]
import numpy as np
from numpy import ndarray
import numpy.ma as ma
from numpy.ma import masked, nomask
from scipy._lib.six import iteritems
import itertools
import warnings
from collections import namedtuple
from . import stats
from . import distributions
import scipy.special as special
from . import futil
genmissingvaldoc = """
Notes
-----
Missing values are considered pair-wise: if a value is missing in x,
the corresponding value in y is masked.
"""
def _chk_asarray(a, axis):
# Always returns a masked array, raveled for axis=None
a = ma.asanyarray(a)
if axis is None:
a = ma.ravel(a)
outaxis = 0
else:
outaxis = axis
return a, outaxis
def _chk2_asarray(a, b, axis):
a = ma.asanyarray(a)
b = ma.asanyarray(b)
if axis is None:
a = ma.ravel(a)
b = ma.ravel(b)
outaxis = 0
else:
outaxis = axis
return a, b, outaxis
def _chk_size(a,b):
a = ma.asanyarray(a)
b = ma.asanyarray(b)
(na, nb) = (a.size, b.size)
if na != nb:
raise ValueError("The size of the input array should match!"
" (%s <> %s)" % (na, nb))
return (a, b, na)
def argstoarray(*args):
"""
Constructs a 2D array from a group of sequences.
Sequences are filled with missing values to match the length of the longest
sequence.
Parameters
----------
args : sequences
Group of sequences.
Returns
-------
argstoarray : MaskedArray
A ( `m` x `n` ) masked array, where `m` is the number of arguments and
`n` the length of the longest argument.
Notes
-----
`numpy.ma.row_stack` has identical behavior, but is called with a sequence
of sequences.
"""
if len(args) == 1 and not isinstance(args[0], ndarray):
output = ma.asarray(args[0])
if output.ndim != 2:
raise ValueError("The input should be 2D")
else:
n = len(args)
m = max([len(k) for k in args])
output = ma.array(np.empty((n,m), dtype=float), mask=True)
for (k,v) in enumerate(args):
output[k,:len(v)] = v
output[np.logical_not(np.isfinite(output._data))] = masked
return output
def find_repeats(arr):
"""Find repeats in arr and return a tuple (repeats, repeat_count).
Masked values are discarded.
Parameters
----------
arr : sequence
Input array. The array is flattened if it is not 1D.
Returns
-------
repeats : ndarray
Array of repeated values.
counts : ndarray
Array of counts.
"""
marr = ma.compressed(arr)
if not marr.size:
return (np.array(0), np.array(0))
(v1, v2, n) = futil.dfreps(ma.array(ma.compressed(arr), copy=True))
return (v1[:n], v2[:n])
def count_tied_groups(x, use_missing=False):
"""
Counts the number of tied values.
Parameters
----------
x : sequence
Sequence of data on which to counts the ties
use_missing : bool, optional
Whether to consider missing values as tied.
Returns
-------
count_tied_groups : dict
Returns a dictionary (nb of ties: nb of groups).
Examples
--------
>>> from scipy.stats import mstats
>>> z = [0, 0, 0, 2, 2, 2, 3, 3, 4, 5, 6]
>>> mstats.count_tied_groups(z)
{2: 1, 3: 2}
In the above example, the ties were 0 (3x), 2 (3x) and 3 (2x).
>>> z = np.ma.array([0, 0, 1, 2, 2, 2, 3, 3, 4, 5, 6])
>>> mstats.count_tied_groups(z)
{2: 2, 3: 1}
>>> z[[1,-1]] = np.ma.masked
>>> mstats.count_tied_groups(z, use_missing=True)
{2: 2, 3: 1}
"""
nmasked = ma.getmask(x).sum()
# We need the copy as find_repeats will overwrite the initial data
data = ma.compressed(x).copy()
(ties, counts) = find_repeats(data)
nties = {}
if len(ties):
nties = dict(zip(np.unique(counts), itertools.repeat(1)))
nties.update(dict(zip(*find_repeats(counts))))
if nmasked and use_missing:
try:
nties[nmasked] += 1
except KeyError:
nties[nmasked] = 1
return nties
def rankdata(data, axis=None, use_missing=False):
"""Returns the rank (also known as order statistics) of each data point
along the given axis.
If some values are tied, their rank is averaged.
If some values are masked, their rank is set to 0 if use_missing is False,
or set to the average rank of the unmasked values if use_missing is True.
Parameters
----------
data : sequence
Input data. The data is transformed to a masked array
axis : {None,int}, optional
Axis along which to perform the ranking.
If None, the array is first flattened. An exception is raised if
the axis is specified for arrays with a dimension larger than 2
use_missing : bool, optional
Whether the masked values have a rank of 0 (False) or equal to the
average rank of the unmasked values (True).
"""
def _rank1d(data, use_missing=False):
n = data.count()
rk = np.empty(data.size, dtype=float)
idx = data.argsort()
rk[idx[:n]] = np.arange(1,n+1)
if use_missing:
rk[idx[n:]] = (n+1)/2.
else:
rk[idx[n:]] = 0
repeats = find_repeats(data.copy())
for r in repeats[0]:
condition = (data == r).filled(False)
rk[condition] = rk[condition].mean()
return rk
data = ma.array(data, copy=False)
if axis is None:
if data.ndim > 1:
return _rank1d(data.ravel(), use_missing).reshape(data.shape)
else:
return _rank1d(data, use_missing)
else:
return ma.apply_along_axis(_rank1d,axis,data,use_missing).view(ndarray)
def mode(a, axis=0):
a, axis = _chk_asarray(a, axis)
def _mode1D(a):
(rep,cnt) = find_repeats(a)
if not cnt.ndim:
return (0, 0)
elif cnt.size:
return (rep[cnt.argmax()], cnt.max())
else:
not_masked_indices = ma.flatnotmasked_edges(a)
first_not_masked_index = not_masked_indices[0]
return (a[first_not_masked_index], 1)
if axis is None:
output = _mode1D(ma.ravel(a))
output = (ma.array(output[0]), ma.array(output[1]))
else:
output = ma.apply_along_axis(_mode1D, axis, a)
newshape = list(a.shape)
newshape[axis] = 1
slices = [slice(None)] * output.ndim
slices[axis] = 0
modes = output[tuple(slices)].reshape(newshape)
slices[axis] = 1
counts = output[tuple(slices)].reshape(newshape)
output = (modes, counts)
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
return ModeResult(*output)
mode.__doc__ = stats.mode.__doc__
@np.deprecate(message="mstats.betai is deprecated in scipy 0.17.0; "
"use special.betainc instead.")
def betai(a, b, x):
return _betai(a, b, x)
betai.__doc__ = stats.betai.__doc__
def _betai(a, b, x):
x = np.asanyarray(x)
x = ma.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
def msign(x):
"""Returns the sign of x, or 0 if x is masked."""
return ma.filled(np.sign(x), 0)
def pearsonr(x,y):
"""
Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear
relationship. Positive correlations imply that as `x` increases, so does
`y`. Negative correlations imply that as `x` increases, `y` decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : 1-D array_like
Input
y : 1-D array_like
Input
Returns
-------
pearsonr : float
Pearson's correlation coefficient, 2-tailed p-value.
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.ravel(), y.ravel())
# Get the common mask and the total nb of unmasked elements
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
n -= m.sum()
df = n-2
if df < 0:
return (masked, masked)
(mx, my) = (x.mean(), y.mean())
(xm, ym) = (x-mx, y-my)
r_num = ma.add.reduce(xm*ym)
r_den = ma.sqrt(ma.dot(xm,xm) * ma.dot(ym,ym))
r = r_num / r_den
# Presumably, if r > 1, then it is only some small artifact of floating
# point arithmetic.
r = min(r, 1.0)
r = max(r, -1.0)
df = n - 2
if r is masked or abs(r) == 1.0:
prob = 0.
else:
t_squared = (df / ((1.0 - r) * (1.0 + r))) * r * r
prob = _betai(0.5*df, 0.5, df/(df + t_squared))
return r, prob
def spearmanr(x, y, use_ties=True):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the linear
relationship between two datasets. Unlike the Pearson correlation, the
Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact linear relationship. Positive correlations imply that
as `x` increases, so does `y`. Negative correlations imply that as `x`
increases, `y` decreases.
Missing values are discarded pair-wise: if a value is missing in `x`, the
corresponding value in `y` is masked.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : array_like
The length of `x` must be > 2.
y : array_like
The length of `y` must be > 2.
use_ties : bool, optional
Whether the correction for ties should be computed.
Returns
-------
correlation : float
Spearman correlation coefficient
pvalue : float
2-tailed p-value.
References
----------
[CRCProbStat2000] section 14.7
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.ravel(), y.ravel())
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
n -= m.sum()
if m is not nomask:
x = ma.array(x, mask=m, copy=True)
y = ma.array(y, mask=m, copy=True)
df = n-2
if df < 0:
raise ValueError("The input must have at least 3 entries!")
# Gets the ranks and rank differences
rankx = rankdata(x)
ranky = rankdata(y)
dsq = np.add.reduce((rankx-ranky)**2)
# Tie correction
if use_ties:
xties = count_tied_groups(x)
yties = count_tied_groups(y)
corr_x = np.sum(v*k*(k**2-1) for (k,v) in iteritems(xties))/12.
corr_y = np.sum(v*k*(k**2-1) for (k,v) in iteritems(yties))/12.
else:
corr_x = corr_y = 0
denom = n*(n**2 - 1)/6.
if corr_x != 0 or corr_y != 0:
rho = denom - dsq - corr_x - corr_y
rho /= ma.sqrt((denom-2*corr_x)*(denom-2*corr_y))
else:
rho = 1. - dsq/denom
t = ma.sqrt(ma.divide(df,(rho+1.0)*(1.0-rho))) * rho
if t is masked:
prob = 0.
else:
prob = _betai(0.5*df, 0.5, df/(df + t * t))
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
return SpearmanrResult(rho, prob)
def kendalltau(x, y, use_ties=True, use_missing=False):
"""
Computes Kendall's rank correlation tau on two variables *x* and *y*.
Parameters
----------
x : sequence
First data list (for example, time).
y : sequence
Second data list.
use_ties : {True, False}, optional
Whether ties correction should be performed.
use_missing : {False, True}, optional
Whether missing data should be allocated a rank of 0 (False) or the
average rank (True)
Returns
-------
correlation : float
Kendall tau
pvalue : float
Approximate 2-side p-value.
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.flatten(), y.flatten())
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
if m is not nomask:
x = ma.array(x, mask=m, copy=True)
y = ma.array(y, mask=m, copy=True)
n -= m.sum()
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
if n < 2:
return KendalltauResult(np.nan, np.nan)
rx = ma.masked_equal(rankdata(x, use_missing=use_missing), 0)
ry = ma.masked_equal(rankdata(y, use_missing=use_missing), 0)
idx = rx.argsort()
(rx, ry) = (rx[idx], ry[idx])
C = np.sum([((ry[i+1:] > ry[i]) * (rx[i+1:] > rx[i])).filled(0).sum()
for i in range(len(ry)-1)], dtype=float)
D = np.sum([((ry[i+1:] < ry[i])*(rx[i+1:] > rx[i])).filled(0).sum()
for i in range(len(ry)-1)], dtype=float)
if use_ties:
xties = count_tied_groups(x)
yties = count_tied_groups(y)
corr_x = np.sum([v*k*(k-1) for (k,v) in iteritems(xties)], dtype=float)
corr_y = np.sum([v*k*(k-1) for (k,v) in iteritems(yties)], dtype=float)
denom = ma.sqrt((n*(n-1)-corr_x)/2. * (n*(n-1)-corr_y)/2.)
else:
denom = n*(n-1)/2.
tau = (C-D) / denom
var_s = n*(n-1)*(2*n+5)
if use_ties:
var_s -= np.sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in iteritems(xties))
var_s -= np.sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in iteritems(yties))
v1 = np.sum([v*k*(k-1) for (k, v) in iteritems(xties)], dtype=float) *\
np.sum([v*k*(k-1) for (k, v) in iteritems(yties)], dtype=float)
v1 /= 2.*n*(n-1)
if n > 2:
v2 = np.sum([v*k*(k-1)*(k-2) for (k,v) in iteritems(xties)],
dtype=float) * \
np.sum([v*k*(k-1)*(k-2) for (k,v) in iteritems(yties)],
dtype=float)
v2 /= 9.*n*(n-1)*(n-2)
else:
v2 = 0
else:
v1 = v2 = 0
var_s /= 18.
var_s += (v1 + v2)
z = (C-D)/np.sqrt(var_s)
prob = special.erfc(abs(z)/np.sqrt(2))
return KendalltauResult(tau, prob)
def kendalltau_seasonal(x):
"""
Computes a multivariate Kendall's rank correlation tau, for seasonal data.
Parameters
----------
x : 2-D ndarray
Array of seasonal data, with seasons in columns.
"""
x = ma.array(x, subok=True, copy=False, ndmin=2)
(n,m) = x.shape
n_p = x.count(0)
S_szn = np.sum(msign(x[i:]-x[i]).sum(0) for i in range(n))
S_tot = S_szn.sum()
n_tot = x.count()
ties = count_tied_groups(x.compressed())
corr_ties = np.sum(v*k*(k-1) for (k,v) in iteritems(ties))
denom_tot = ma.sqrt(1.*n_tot*(n_tot-1)*(n_tot*(n_tot-1)-corr_ties))/2.
R = rankdata(x, axis=0, use_missing=True)
K = ma.empty((m,m), dtype=int)
covmat = ma.empty((m,m), dtype=float)
denom_szn = ma.empty(m, dtype=float)
for j in range(m):
ties_j = count_tied_groups(x[:,j].compressed())
corr_j = np.sum(v*k*(k-1) for (k,v) in iteritems(ties_j))
cmb = n_p[j]*(n_p[j]-1)
for k in range(j,m,1):
K[j,k] = np.sum(msign((x[i:,j]-x[i,j])*(x[i:,k]-x[i,k])).sum()
for i in range(n))
covmat[j,k] = (K[j,k] + 4*(R[:,j]*R[:,k]).sum() -
n*(n_p[j]+1)*(n_p[k]+1))/3.
K[k,j] = K[j,k]
covmat[k,j] = covmat[j,k]
denom_szn[j] = ma.sqrt(cmb*(cmb-corr_j)) / 2.
var_szn = covmat.diagonal()
z_szn = msign(S_szn) * (abs(S_szn)-1) / ma.sqrt(var_szn)
z_tot_ind = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(var_szn.sum())
z_tot_dep = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(covmat.sum())
prob_szn = special.erfc(abs(z_szn)/np.sqrt(2))
prob_tot_ind = special.erfc(abs(z_tot_ind)/np.sqrt(2))
prob_tot_dep = special.erfc(abs(z_tot_dep)/np.sqrt(2))
chi2_tot = (z_szn*z_szn).sum()
chi2_trd = m * z_szn.mean()**2
output = {'seasonal tau': S_szn/denom_szn,
'global tau': S_tot/denom_tot,
'global tau (alt)': S_tot/denom_szn.sum(),
'seasonal p-value': prob_szn,
'global p-value (indep)': prob_tot_ind,
'global p-value (dep)': prob_tot_dep,
'chi2 total': chi2_tot,
'chi2 trend': chi2_trd,
}
return output
def pointbiserialr(x, y):
x = ma.fix_invalid(x, copy=True).astype(bool)
y = ma.fix_invalid(y, copy=True).astype(float)
# Get rid of the missing data
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
if m is not nomask:
unmask = np.logical_not(m)
x = x[unmask]
y = y[unmask]
n = len(x)
# phat is the fraction of x values that are True
phat = x.sum() / float(n)
y0 = y[~x] # y-values where x is False
y1 = y[x] # y-values where x is True
y0m = y0.mean()
y1m = y1.mean()
rpb = (y1m - y0m)*np.sqrt(phat * (1-phat)) / y.std()
df = n-2
t = rpb*ma.sqrt(df/(1.0-rpb**2))
prob = _betai(0.5*df, 0.5, df/(df+t*t))
PointbiserialrResult = namedtuple('PointbiserialrResult', ('correlation',
'pvalue'))
return PointbiserialrResult(rpb, prob)
if stats.pointbiserialr.__doc__:
pointbiserialr.__doc__ = stats.pointbiserialr.__doc__ + genmissingvaldoc
def linregress(*args):
"""
Linear regression calculation
Note that the non-masked version is used, and that this docstring is
replaced by the non-masked docstring + some info on missing data.
"""
if len(args) == 1:
# Input is a single 2-D array containing x and y
args = ma.array(args[0], copy=True)
if len(args) == 2:
x = args[0]
y = args[1]
else:
x = args[:, 0]
y = args[:, 1]
else:
# Input is two 1-D arrays
x = ma.array(args[0]).flatten()
y = ma.array(args[1]).flatten()
m = ma.mask_or(ma.getmask(x), ma.getmask(y), shrink=False)
if m is not nomask:
x = ma.array(x, mask=m)
y = ma.array(y, mask=m)
if np.any(~m):
slope, intercept, r, prob, sterrest = stats.linregress(x.data[~m],
y.data[~m])
else:
# All data is masked
return None, None, None, None, None
else:
slope, intercept, r, prob, sterrest = stats.linregress(x.data, y.data)
LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept',
'rvalue', 'pvalue',
'stderr'))
return LinregressResult(slope, intercept, r, prob, sterrest)
if stats.linregress.__doc__:
linregress.__doc__ = stats.linregress.__doc__ + genmissingvaldoc
def theilslopes(y, x=None, alpha=0.95):
y = ma.asarray(y).flatten()
if x is None:
x = ma.arange(len(y), dtype=float)
else:
x = ma.asarray(x).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y),len(x)))
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
y._mask = x._mask = m
# Disregard any masked elements of x or y
y = y.compressed()
x = x.compressed().astype(float)
# We now have unmasked arrays so can use `stats.theilslopes`
return stats.theilslopes(y, x, alpha=alpha)
theilslopes.__doc__ = stats.theilslopes.__doc__
def sen_seasonal_slopes(x):
x = ma.array(x, subok=True, copy=False, ndmin=2)
(n,_) = x.shape
# Get list of slopes per season
szn_slopes = ma.vstack([(x[i+1:]-x[i])/np.arange(1,n-i)[:,None]
for i in range(n)])
szn_medslopes = ma.median(szn_slopes, axis=0)
medslope = ma.median(szn_slopes, axis=None)
return szn_medslopes, medslope
def ttest_1samp(a, popmean, axis=0):
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return (np.nan, np.nan)
x = a.mean(axis=axis)
v = a.var(axis=axis, ddof=1)
n = a.count(axis=axis)
df = n - 1.
svar = ((n - 1) * v) / df
t = (x - popmean) / ma.sqrt(svar / n)
prob = _betai(0.5*df, 0.5, df/(df + t*t))
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
return Ttest_1sampResult(t, prob)
ttest_1samp.__doc__ = stats.ttest_1samp.__doc__
ttest_onesamp = ttest_1samp
def ttest_ind(a, b, axis=0):
a, b, axis = _chk2_asarray(a, b, axis)
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
(x1, x2) = (a.mean(axis), b.mean(axis))
(v1, v2) = (a.var(axis=axis, ddof=1), b.var(axis=axis, ddof=1))
(n1, n2) = (a.count(axis), b.count(axis))
df = n1 + n2 - 2.
svar = ((n1-1)*v1+(n2-1)*v2) / df
t = (x1-x2)/ma.sqrt(svar*(1.0/n1 + 1.0/n2)) # n-D computation here!
t = ma.filled(t, 1) # replace NaN t-values with 1.0
probs = _betai(0.5*df, 0.5, df/(df + t*t)).reshape(t.shape)
return Ttest_indResult(t, probs.squeeze())
ttest_ind.__doc__ = stats.ttest_ind.__doc__
def ttest_rel(a, b, axis=0):
a, b, axis = _chk2_asarray(a, b, axis)
if len(a) != len(b):
raise ValueError('unequal length arrays')
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
if a.size == 0 or b.size == 0:
return Ttest_relResult(np.nan, np.nan)
n = a.count(axis)
df = (n-1.0)
d = (a-b).astype('d')
denom = ma.sqrt((n*ma.add.reduce(d*d,axis) - ma.add.reduce(d,axis)**2) / df)
t = ma.add.reduce(d, axis) / denom
t = ma.filled(t, 1)
probs = _betai(0.5*df, 0.5, df/(df + t*t)).reshape(t.shape).squeeze()
return Ttest_relResult(t, probs)
ttest_rel.__doc__ = stats.ttest_rel.__doc__
# stats.chisquare works with masked arrays, so we don't need to
# implement it here.
# For backwards compatibilty, stats.chisquare is included in
# the stats.mstats namespace.
chisquare = stats.chisquare
def mannwhitneyu(x,y, use_continuity=True):
"""
Computes the Mann-Whitney statistic
Missing values in `x` and/or `y` are discarded.
Parameters
----------
x : sequence
Input
y : sequence
Input
use_continuity : {True, False}, optional
Whether a continuity correction (1/2.) should be taken into account.
Returns
-------
statistic : float
The Mann-Whitney statistics
pvalue : float
Approximate p-value assuming a normal distribution.
"""
x = ma.asarray(x).compressed().view(ndarray)
y = ma.asarray(y).compressed().view(ndarray)
ranks = rankdata(np.concatenate([x,y]))
(nx, ny) = (len(x), len(y))
nt = nx + ny
U = ranks[:nx].sum() - nx*(nx+1)/2.
U = max(U, nx*ny - U)
u = nx*ny - U
mu = (nx*ny)/2.
sigsq = (nt**3 - nt)/12.
ties = count_tied_groups(ranks)
sigsq -= np.sum(v*(k**3-k) for (k,v) in iteritems(ties))/12.
sigsq *= nx*ny/float(nt*(nt-1))
if use_continuity:
z = (U - 1/2. - mu) / ma.sqrt(sigsq)
else:
z = (U - mu) / ma.sqrt(sigsq)
prob = special.erfc(abs(z)/np.sqrt(2))
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic',
'pvalue'))
return MannwhitneyuResult(u, prob)
def kruskalwallis(*args):
output = argstoarray(*args)
ranks = ma.masked_equal(rankdata(output, use_missing=False), 0)
sumrk = ranks.sum(-1)
ngrp = ranks.count(-1)
ntot = ranks.count()
H = 12./(ntot*(ntot+1)) * (sumrk**2/ngrp).sum() - 3*(ntot+1)
# Tie correction
ties = count_tied_groups(ranks)
T = 1. - np.sum(v*(k**3-k) for (k,v) in iteritems(ties))/float(ntot**3-ntot)
if T == 0:
raise ValueError('All numbers are identical in kruskal')
H /= T
df = len(output) - 1
prob = stats.distributions.chi2.sf(H, df)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
return KruskalResult(H, prob)
kruskal = kruskalwallis
kruskalwallis.__doc__ = stats.kruskal.__doc__
def ks_twosamp(data1, data2, alternative="two-sided"):
"""
Computes the Kolmogorov-Smirnov test on two samples.
Missing values are discarded.
Parameters
----------
data1 : array_like
First data set
data2 : array_like
Second data set
alternative : {'two-sided', 'less', 'greater'}, optional
Indicates the alternative hypothesis. Default is 'two-sided'.
Returns
-------
d : float
Value of the Kolmogorov Smirnov test
p : float
Corresponding p-value.
"""
(data1, data2) = (ma.asarray(data1), ma.asarray(data2))
(n1, n2) = (data1.count(), data2.count())
n = (n1*n2/float(n1+n2))
mix = ma.concatenate((data1.compressed(), data2.compressed()))
mixsort = mix.argsort(kind='mergesort')
csum = np.where(mixsort < n1, 1./n1, -1./n2).cumsum()
# Check for ties
if len(np.unique(mix)) < (n1+n2):
csum = csum[np.r_[np.diff(mix[mixsort]).nonzero()[0],-1]]
alternative = str(alternative).lower()[0]
if alternative == 't':
d = ma.abs(csum).max()
prob = special.kolmogorov(np.sqrt(n)*d)
elif alternative == 'l':
d = -csum.min()
prob = np.exp(-2*n*d**2)
elif alternative == 'g':
d = csum.max()
prob = np.exp(-2*n*d**2)
else:
raise ValueError("Invalid value for the alternative hypothesis: "
"should be in 'two-sided', 'less' or 'greater'")
return (d, prob)
ks_2samp = ks_twosamp
def ks_twosamp_old(data1, data2):
""" Computes the Kolmogorov-Smirnov statistic on 2 samples.
Returns
-------
KS D-value, p-value
"""
(data1, data2) = [ma.asarray(d).compressed() for d in (data1,data2)]
return stats.ks_2samp(data1,data2)
@np.deprecate(message="mstats.threshold is deprecated in scipy 0.17.0")
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : ndarray
Input data
threshmin : {None, float}, optional
Lower threshold. If None, set to the minimum value.
threshmax : {None, float}, optional
Upper threshold. If None, set to the maximum value.
newval : {0, float}, optional
Value outside the thresholds.
Returns
-------
threshold : ndarray
Returns `a`, with values less then `threshmin` and values greater
`threshmax` replaced with `newval`.
"""
a = ma.array(a, copy=True)
mask = np.zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin).filled(False)
if threshmax is not None:
mask |= (a > threshmax).filled(False)
a[mask] = newval
return a
def trima(a, limits=None, inclusive=(True,True)):
"""
Trims an array by masking the data outside some given limits.
Returns a masked version of the input array.
Parameters
----------
a : array_like
Input array.
limits : {None, tuple}, optional
Tuple of (lower limit, upper limit) in absolute values.
Values of the input array lower (greater) than the lower (upper) limit
will be masked. A limit is None indicates an open interval.
inclusive : (bool, bool) tuple, optional
Tuple of (lower flag, upper flag), indicating whether values exactly
equal to the lower (upper) limit are allowed.
"""
a = ma.asarray(a)
a.unshare_mask()
if (limits is None) or (limits == (None, None)):
return a
(lower_lim, upper_lim) = limits
(lower_in, upper_in) = inclusive
condition = False
if lower_lim is not None:
if lower_in:
condition |= (a < lower_lim)
else:
condition |= (a <= lower_lim)
if upper_lim is not None:
if upper_in:
condition |= (a > upper_lim)
else:
condition |= (a >= upper_lim)
a[condition.filled(True)] = masked
return a
def trimr(a, limits=None, inclusive=(True, True), axis=None):
"""
Trims an array by masking some proportion of the data on each end.
Returns a masked version of the input array.
Parameters
----------
a : sequence
Input array.
limits : {None, tuple}, optional
Tuple of the percentages to cut on each side of the array, with respect
to the number of unmasked data, as floats between 0. and 1.
Noting n the number of unmasked data before trimming, the
(n*limits[0])th smallest data and the (n*limits[1])th largest data are
masked, and the total number of unmasked data after trimming is
n*(1.-sum(limits)). The value of one limit can be set to None to
indicate an open interval.
inclusive : {(True,True) tuple}, optional
Tuple of flags indicating whether the number of data being masked on
the left (right) end should be truncated (True) or rounded (False) to
integers.
axis : {None,int}, optional
Axis along which to trim. If None, the whole array is trimmed, but its
shape is maintained.
"""
def _trimr1D(a, low_limit, up_limit, low_inclusive, up_inclusive):
n = a.count()
idx = a.argsort()
if low_limit:
if low_inclusive:
lowidx = int(low_limit*n)
else:
lowidx = np.round(low_limit*n)
a[idx[:lowidx]] = masked
if up_limit is not None:
if up_inclusive:
upidx = n - int(n*up_limit)
else:
upidx = n - np.round(n*up_limit)
a[idx[upidx:]] = masked
return a
a = ma.asarray(a)
a.unshare_mask()
if limits is None:
return a
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
(loinc, upinc) = inclusive
if axis is None:
shp = a.shape
return _trimr1D(a.ravel(),lolim,uplim,loinc,upinc).reshape(shp)
else:
return ma.apply_along_axis(_trimr1D, axis, a, lolim,uplim,loinc,upinc)
trimdoc = """
Parameters
----------
a : sequence
Input array
limits : {None, tuple}, optional
If `relative` is False, tuple (lower limit, upper limit) in absolute values.
Values of the input array lower (greater) than the lower (upper) limit are
masked.
If `relative` is True, tuple (lower percentage, upper percentage) to cut
on each side of the array, with respect to the number of unmasked data.
Noting n the number of unmasked data before trimming, the (n*limits[0])th
smallest data and the (n*limits[1])th largest data are masked, and the
total number of unmasked data after trimming is n*(1.-sum(limits))
In each case, the value of one limit can be set to None to indicate an
open interval.
If limits is None, no trimming is performed
inclusive : {(bool, bool) tuple}, optional
If `relative` is False, tuple indicating whether values exactly equal
to the absolute limits are allowed.
If `relative` is True, tuple indicating whether the number of data
being masked on each side should be rounded (True) or truncated
(False).
relative : bool, optional
Whether to consider the limits as absolute values (False) or proportions
to cut (True).
axis : int, optional
Axis along which to trim.
"""
def trim(a, limits=None, inclusive=(True,True), relative=False, axis=None):
"""
Trims an array by masking the data outside some given limits.
Returns a masked version of the input array.
%s
Examples
--------
>>> z = [ 1, 2, 3, 4, 5, 6, 7, 8, 9,10]
>>> trim(z,(3,8))
[--,--, 3, 4, 5, 6, 7, 8,--,--]
>>> trim(z,(0.1,0.2),relative=True)
[--, 2, 3, 4, 5, 6, 7, 8,--,--]
"""
if relative:
return trimr(a, limits=limits, inclusive=inclusive, axis=axis)
else:
return trima(a, limits=limits, inclusive=inclusive)
if trim.__doc__ is not None:
trim.__doc__ = trim.__doc__ % trimdoc
def trimboth(data, proportiontocut=0.2, inclusive=(True,True), axis=None):
"""
Trims the smallest and largest data values.
Trims the `data` by masking the ``int(proportiontocut * n)`` smallest and
``int(proportiontocut * n)`` largest values of data along the given axis,
where n is the number of unmasked values before trimming.
Parameters
----------
data : ndarray
Data to trim.
proportiontocut : float, optional
Percentage of trimming (as a float between 0 and 1).
If n is the number of unmasked values before trimming, the number of
values after trimming is ``(1 - 2*proportiontocut) * n``.
Default is 0.2.
inclusive : {(bool, bool) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
axis : int, optional
Axis along which to perform the trimming.
If None, the input array is first flattened.
"""
return trimr(data, limits=(proportiontocut,proportiontocut),
inclusive=inclusive, axis=axis)
def trimtail(data, proportiontocut=0.2, tail='left', inclusive=(True,True),
axis=None):
"""
Trims the data by masking values from one tail.
Parameters
----------
data : array_like
Data to trim.
proportiontocut : float, optional
Percentage of trimming. If n is the number of unmasked values
before trimming, the number of values after trimming is
``(1 - proportiontocut) * n``. Default is 0.2.
tail : {'left','right'}, optional
If 'left' the `proportiontocut` lowest values will be masked.
If 'right' the `proportiontocut` highest values will be masked.
Default is 'left'.
inclusive : {(bool, bool) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False). Default is
(True, True).
axis : int, optional
Axis along which to perform the trimming.
If None, the input array is first flattened. Default is None.
Returns
-------
trimtail : ndarray
Returned array of same shape as `data` with masked tail values.
"""
tail = str(tail).lower()[0]
if tail == 'l':
limits = (proportiontocut,None)
elif tail == 'r':
limits = (None, proportiontocut)
else:
raise TypeError("The tail argument should be in ('left','right')")
return trimr(data, limits=limits, axis=axis, inclusive=inclusive)
trim1 = trimtail
def trimmed_mean(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None):
"""Returns the trimmed mean of the data along the given axis.
%s
""" % trimdoc
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
return trimr(a,limits=limits,inclusive=inclusive,axis=axis).mean(axis=axis)
else:
return trima(a,limits=limits,inclusive=inclusive).mean(axis=axis)
def trimmed_var(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None, ddof=0):
"""Returns the trimmed variance of the data along the given axis.
%s
ddof : {0,integer}, optional
Means Delta Degrees of Freedom. The denominator used during computations
is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un-
biased estimate of the variance.
""" % trimdoc
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
out = trimr(a,limits=limits, inclusive=inclusive,axis=axis)
else:
out = trima(a,limits=limits,inclusive=inclusive)
return out.var(axis=axis, ddof=ddof)
def trimmed_std(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None, ddof=0):
"""Returns the trimmed standard deviation of the data along the given axis.
%s
ddof : {0,integer}, optional
Means Delta Degrees of Freedom. The denominator used during computations
is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un-
biased estimate of the variance.
""" % trimdoc
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
out = trimr(a,limits=limits,inclusive=inclusive,axis=axis)
else:
out = trima(a,limits=limits,inclusive=inclusive)
return out.std(axis=axis,ddof=ddof)
def trimmed_stde(a, limits=(0.1,0.1), inclusive=(1,1), axis=None):
"""
Returns the standard error of the trimmed mean along the given axis.
Parameters
----------
a : sequence
Input array
limits : {(0.1,0.1), tuple of float}, optional
tuple (lower percentage, upper percentage) to cut on each side of the
array, with respect to the number of unmasked data.
If n is the number of unmasked data before trimming, the values
smaller than ``n * limits[0]`` and the values larger than
``n * `limits[1]`` are masked, and the total number of unmasked
data after trimming is ``n * (1.-sum(limits))``. In each case,
the value of one limit can be set to None to indicate an open interval.
If `limits` is None, no trimming is performed.
inclusive : {(bool, bool) tuple} optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
axis : int, optional
Axis along which to trim.
Returns
-------
trimmed_stde : scalar or ndarray
"""
def _trimmed_stde_1D(a, low_limit, up_limit, low_inclusive, up_inclusive):
"Returns the standard error of the trimmed mean for a 1D input data."
n = a.count()
idx = a.argsort()
if low_limit:
if low_inclusive:
lowidx = int(low_limit*n)
else:
lowidx = np.round(low_limit*n)
a[idx[:lowidx]] = masked
if up_limit is not None:
if up_inclusive:
upidx = n - int(n*up_limit)
else:
upidx = n - np.round(n*up_limit)
a[idx[upidx:]] = masked
a[idx[:lowidx]] = a[idx[lowidx]]
a[idx[upidx:]] = a[idx[upidx-1]]
winstd = a.std(ddof=1)
return winstd / ((1-low_limit-up_limit)*np.sqrt(len(a)))
a = ma.array(a, copy=True, subok=True)
a.unshare_mask()
if limits is None:
return a.std(axis=axis,ddof=1)/ma.sqrt(a.count(axis))
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
(loinc, upinc) = inclusive
if (axis is None):
return _trimmed_stde_1D(a.ravel(),lolim,uplim,loinc,upinc)
else:
if a.ndim > 2:
raise ValueError("Array 'a' must be at most two dimensional, but got a.ndim = %d" % a.ndim)
return ma.apply_along_axis(_trimmed_stde_1D, axis, a,
lolim,uplim,loinc,upinc)
def tmean(a, limits=None, inclusive=(True,True)):
return trima(a, limits=limits, inclusive=inclusive).mean()
tmean.__doc__ = stats.tmean.__doc__
def tvar(a, limits=None, inclusive=(True,True)):
a = a.astype(float).ravel()
if limits is None:
n = (~a.mask).sum() # todo: better way to do that?
r = trima(a, limits=limits, inclusive=inclusive).var() * (n/(n-1.))
else:
raise ValueError('mstats.tvar() with limits not implemented yet so far')
return r
tvar.__doc__ = stats.tvar.__doc__
def tmin(a, lowerlimit=None, axis=0, inclusive=True):
a, axis = _chk_asarray(a, axis)
am = trima(a, (lowerlimit, None), (inclusive, False))
return ma.minimum.reduce(am, axis)
tmin.__doc__ = stats.tmin.__doc__
def tmax(a, upperlimit, axis=0, inclusive=True):
a, axis = _chk_asarray(a, axis)
am = trima(a, (None, upperlimit), (False, inclusive))
return ma.maximum.reduce(am, axis)
tmax.__doc__ = stats.tmax.__doc__
def tsem(a, limits=None, inclusive=(True,True)):
a = ma.asarray(a).ravel()
if limits is None:
n = float(a.count())
return a.std(ddof=1)/ma.sqrt(n)
am = trima(a.ravel(), limits, inclusive)
sd = np.sqrt(am.var(ddof=1))
return sd / np.sqrt(am.count())
tsem.__doc__ = stats.tsem.__doc__
def winsorize(a, limits=None, inclusive=(True, True), inplace=False,
axis=None):
"""Returns a Winsorized version of the input array.
The (limits[0])th lowest values are set to the (limits[0])th percentile,
and the (limits[1])th highest values are set to the (1 - limits[1])th
percentile.
Masked values are skipped.
Parameters
----------
a : sequence
Input array.
limits : {None, tuple of float}, optional
Tuple of the percentages to cut on each side of the array, with respect
to the number of unmasked data, as floats between 0. and 1.
Noting n the number of unmasked data before trimming, the
(n*limits[0])th smallest data and the (n*limits[1])th largest data are
masked, and the total number of unmasked data after trimming
is n*(1.-sum(limits)) The value of one limit can be set to None to
indicate an open interval.
inclusive : {(True, True) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
inplace : {False, True}, optional
Whether to winsorize in place (True) or to use a copy (False)
axis : {None, int}, optional
Axis along which to trim. If None, the whole array is trimmed, but its
shape is maintained.
Notes
-----
This function is applied to reduce the effect of possibly spurious outliers
by limiting the extreme values.
"""
def _winsorize1D(a, low_limit, up_limit, low_include, up_include):
n = a.count()
idx = a.argsort()
if low_limit:
if low_include:
lowidx = int(low_limit * n)
else:
lowidx = np.round(low_limit * n)
a[idx[:lowidx]] = a[idx[lowidx]]
if up_limit is not None:
if up_include:
upidx = n - int(n * up_limit)
else:
upidx = n - np.round(n * up_limit)
a[idx[upidx:]] = a[idx[upidx - 1]]
return a
# We are going to modify a: better make a copy
a = ma.array(a, copy=np.logical_not(inplace))
if limits is None:
return a
if (not isinstance(limits, tuple)) and isinstance(limits, float):
limits = (limits, limits)
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
(loinc, upinc) = inclusive
if axis is None:
shp = a.shape
return _winsorize1D(a.ravel(), lolim, uplim, loinc, upinc).reshape(shp)
else:
return ma.apply_along_axis(_winsorize1D, axis, a, lolim, uplim, loinc,
upinc)
def moment(a, moment=1, axis=0):
a, axis = _chk_asarray(a, axis)
if moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - ma.expand_dims(a.mean(axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return s.mean(axis)
moment.__doc__ = stats.moment.__doc__
def variation(a, axis=0):
a, axis = _chk_asarray(a, axis)
return a.std(axis)/a.mean(axis)
variation.__doc__ = stats.variation.__doc__
def skew(a, axis=0, bias=True):
a, axis = _chk_asarray(a,axis)
n = a.count(axis)
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
olderr = np.seterr(all='ignore')
try:
vals = ma.where(m2 == 0, 0, m3 / m2**1.5)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = ma.sqrt((n-1.0)*n)/(n-2.0)*m3/m2**1.5
np.place(vals, can_correct, nval)
return vals
skew.__doc__ = stats.skew.__doc__
def kurtosis(a, axis=0, fisher=True, bias=True):
a, axis = _chk_asarray(a, axis)
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
olderr = np.seterr(all='ignore')
try:
vals = ma.where(m2 == 0, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
n = a.count(axis)
can_correct = (n > 3) & (m2 is not ma.masked and m2 > 0)
if can_correct.any():
n = np.extract(can_correct, n)
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3)*((n*n-1.0)*m4/m2**2.0-3*(n-1)**2.0)
np.place(vals, can_correct, nval+3.0)
if fisher:
return vals - 3
else:
return vals
kurtosis.__doc__ = stats.kurtosis.__doc__
def describe(a, axis=0, ddof=0):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Data array
axis : int or None, optional
Axis along which to calculate statistics. Default 0. If None,
compute over the whole array `a`.
ddof : int, optional
degree of freedom (default 0); note that default ddof is different
from the same routine in stats.describe
Returns
-------
nobs : int
(size of the data (discarding missing values)
minmax : (int, int)
min, max
mean : float
arithmetic mean
variance : float
unbiased variance
skewness : float
biased skewness
kurtosis : float
biased kurtosis
Examples
--------
>>> ma = np.ma.array(range(6), mask=[0, 0, 0, 1, 1, 1])
>>> describe(ma)
(array(3),
(0, 2),
1.0,
1.0,
masked_array(data = 0.0,
mask = False,
fill_value = 1e+20)
,
-1.5)
"""
a, axis = _chk_asarray(a, axis)
n = a.count(axis)
mm = (ma.minimum.reduce(a), ma.maximum.reduce(a))
m = a.mean(axis)
v = a.var(axis, ddof=ddof)
sk = skew(a, axis)
kurt = kurtosis(a, axis)
DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean',
'variance', 'skewness',
'kurtosis'))
return DescribeResult(n, mm, m, v, sk, kurt)
def stde_median(data, axis=None):
"""Returns the McKean-Schrader estimate of the standard error of the sample
median along the given axis. masked values are discarded.
Parameters
----------
data : ndarray
Data to trim.
axis : {None,int}, optional
Axis along which to perform the trimming.
If None, the input array is first flattened.
"""
def _stdemed_1D(data):
data = np.sort(data.compressed())
n = len(data)
z = 2.5758293035489004
k = int(np.round((n+1)/2. - z * np.sqrt(n/4.),0))
return ((data[n-k] - data[k-1])/(2.*z))
data = ma.array(data, copy=False, subok=True)
if (axis is None):
return _stdemed_1D(data)
else:
if data.ndim > 2:
raise ValueError("Array 'data' must be at most two dimensional, "
"but got data.ndim = %d" % data.ndim)
return ma.apply_along_axis(_stdemed_1D, axis, data)
def skewtest(a, axis=0):
a, axis = _chk_asarray(a, axis)
if axis is None:
a = a.ravel()
axis = 0
b2 = skew(a,axis)
n = a.count(axis)
if np.min(n) < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % np.min(n))
y = b2 * ma.sqrt(((n+1)*(n+3)) / (6.0*(n-2)))
beta2 = (3.0*(n*n+27*n-70)*(n+1)*(n+3)) / ((n-2.0)*(n+5)*(n+7)*(n+9))
W2 = -1 + ma.sqrt(2*(beta2-1))
delta = 1/ma.sqrt(0.5*ma.log(W2))
alpha = ma.sqrt(2.0/(W2-1))
y = ma.where(y == 0, 1, y)
Z = delta*ma.log(y/alpha + ma.sqrt((y/alpha)**2+1))
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
skewtest.__doc__ = stats.skewtest.__doc__
def kurtosistest(a, axis=0):
a, axis = _chk_asarray(a, axis)
n = a.count(axis=axis)
if np.min(n) < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % np.min(n))
if np.min(n) < 20:
warnings.warn(
"kurtosistest only valid for n>=20 ... continuing anyway, n=%i" %
np.min(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2.)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5))
x = (b2-E)/ma.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2./(9.0*A)
denom = 1 + x*ma.sqrt(2/(A-4.0))
if np.ma.isMaskedArray(denom):
# For multi-dimensional array input
denom[denom < 0] = masked
elif denom < 0:
denom = masked
term2 = ma.power((1-2.0/A)/denom,1/3.0)
Z = (term1 - term2) / np.sqrt(2/(9.0*A))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic',
'pvalue'))
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
kurtosistest.__doc__ = stats.kurtosistest.__doc__
def normaltest(a, axis=0):
a, axis = _chk_asarray(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
return NormaltestResult(k2, stats.distributions.chi2.sf(k2, 2))
normaltest.__doc__ = stats.normaltest.__doc__
def mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None,
limit=()):
"""
Computes empirical quantiles for a data array.
Samples quantile are defined by ``Q(p) = (1-gamma)*x[j] + gamma*x[j+1]``,
where ``x[j]`` is the j-th order statistic, and gamma is a function of
``j = floor(n*p + m)``, ``m = alphap + p*(1 - alphap - betap)`` and
``g = n*p + m - j``.
Reinterpreting the above equations to compare to **R** lead to the
equation: ``p(k) = (k - alphap)/(n + 1 - alphap - betap)``
Typical values of (alphap,betap) are:
- (0,1) : ``p(k) = k/n`` : linear interpolation of cdf
(**R** type 4)
- (.5,.5) : ``p(k) = (k - 1/2.)/n`` : piecewise linear function
(**R** type 5)
- (0,0) : ``p(k) = k/(n+1)`` :
(**R** type 6)
- (1,1) : ``p(k) = (k-1)/(n-1)``: p(k) = mode[F(x[k])].
(**R** type 7, **R** default)
- (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``: Then p(k) ~ median[F(x[k])].
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x.
(**R** type 8)
- (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``: Blom.
The resulting quantile estimates are approximately unbiased
if x is normally distributed
(**R** type 9)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
Parameters
----------
a : array_like
Input data, as a sequence or array of dimension at most 2.
prob : array_like, optional
List of quantiles to compute.
alphap : float, optional
Plotting positions parameter, default is 0.4.
betap : float, optional
Plotting positions parameter, default is 0.4.
axis : int, optional
Axis along which to perform the trimming.
If None (default), the input array is first flattened.
limit : tuple, optional
Tuple of (lower, upper) values.
Values of `a` outside this open interval are ignored.
Returns
-------
mquantiles : MaskedArray
An array containing the calculated quantiles.
Notes
-----
This formulation is very similar to **R** except the calculation of
``m`` from ``alphap`` and ``betap``, where in **R** ``m`` is defined
with each type.
References
----------
.. [1] *R* statistical software: http://www.r-project.org/
.. [2] *R* ``quantile`` function:
http://stat.ethz.ch/R-manual/R-devel/library/stats/html/quantile.html
Examples
--------
>>> from scipy.stats.mstats import mquantiles
>>> a = np.array([6., 47., 49., 15., 42., 41., 7., 39., 43., 40., 36.])
>>> mquantiles(a)
array([ 19.2, 40. , 42.8])
Using a 2D array, specifying axis and limit.
>>> data = np.array([[ 6., 7., 1.],
[ 47., 15., 2.],
[ 49., 36., 3.],
[ 15., 39., 4.],
[ 42., 40., -999.],
[ 41., 41., -999.],
[ 7., -999., -999.],
[ 39., -999., -999.],
[ 43., -999., -999.],
[ 40., -999., -999.],
[ 36., -999., -999.]])
>>> mquantiles(data, axis=0, limit=(0, 50))
array([[ 19.2 , 14.6 , 1.45],
[ 40. , 37.5 , 2.5 ],
[ 42.8 , 40.05, 3.55]])
>>> data[:, 2] = -999.
>>> mquantiles(data, axis=0, limit=(0, 50))
masked_array(data =
[[19.2 14.6 --]
[40.0 37.5 --]
[42.8 40.05 --]],
mask =
[[False False True]
[False False True]
[False False True]],
fill_value = 1e+20)
"""
def _quantiles1D(data,m,p):
x = np.sort(data.compressed())
n = len(x)
if n == 0:
return ma.array(np.empty(len(p), dtype=float), mask=True)
elif n == 1:
return ma.array(np.resize(x, p.shape), mask=nomask)
aleph = (n*p + m)
k = np.floor(aleph.clip(1, n-1)).astype(int)
gamma = (aleph-k).clip(0,1)
return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()]
data = ma.array(a, copy=False)
if data.ndim > 2:
raise TypeError("Array should be 2D at most !")
if limit:
condition = (limit[0] < data) & (data < limit[1])
data[~condition.filled(True)] = masked
p = np.array(prob, copy=False, ndmin=1)
m = alphap + p*(1.-alphap-betap)
# Computes quantiles along axis (or globally)
if (axis is None):
return _quantiles1D(data, m, p)
return ma.apply_along_axis(_quantiles1D, axis, data, m, p)
def scoreatpercentile(data, per, limit=(), alphap=.4, betap=.4):
"""Calculate the score at the given 'per' percentile of the
sequence a. For example, the score at per=50 is the median.
This function is a shortcut to mquantile
"""
if (per < 0) or (per > 100.):
raise ValueError("The percentile should be between 0. and 100. !"
" (got %s)" % per)
return mquantiles(data, prob=[per/100.], alphap=alphap, betap=betap,
limit=limit, axis=0).squeeze()
def plotting_positions(data, alpha=0.4, beta=0.4):
"""
Returns plotting positions (or empirical percentile points) for the data.
Plotting positions are defined as ``(i-alpha)/(n+1-alpha-beta)``, where:
- i is the rank order statistics
- n is the number of unmasked values along the given axis
- `alpha` and `beta` are two parameters.
Typical values for `alpha` and `beta` are:
- (0,1) : ``p(k) = k/n``, linear interpolation of cdf (R, type 4)
- (.5,.5) : ``p(k) = (k-1/2.)/n``, piecewise linear function
(R, type 5)
- (0,0) : ``p(k) = k/(n+1)``, Weibull (R type 6)
- (1,1) : ``p(k) = (k-1)/(n-1)``, in this case,
``p(k) = mode[F(x[k])]``. That's R default (R type 7)
- (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``, then
``p(k) ~ median[F(x[k])]``.
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x. (R type 8)
- (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``, Blom.
The resulting quantile estimates are approximately unbiased
if x is normally distributed (R type 9)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
- (.3175, .3175): used in scipy.stats.probplot
Parameters
----------
data : array_like
Input data, as a sequence or array of dimension at most 2.
alpha : float, optional
Plotting positions parameter. Default is 0.4.
beta : float, optional
Plotting positions parameter. Default is 0.4.
Returns
-------
positions : MaskedArray
The calculated plotting positions.
"""
data = ma.array(data, copy=False).reshape(1,-1)
n = data.count()
plpos = np.empty(data.size, dtype=float)
plpos[n:] = 0
plpos[data.argsort()[:n]] = ((np.arange(1, n+1) - alpha) /
(n + 1.0 - alpha - beta))
return ma.array(plpos, mask=data._mask)
meppf = plotting_positions
def obrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. Each
array in ``*args`` is one level of a factor. If an `f_oneway()` run on
the transformed data and found significant, variances are unequal. From
Maxwell and Delaney, p.112.
Returns: transformed data for use in an ANOVA
"""
data = argstoarray(*args).T
v = data.var(axis=0,ddof=1)
m = data.mean(0)
n = data.count(0).astype(float)
# result = ((N-1.5)*N*(a-m)**2 - 0.5*v*(n-1))/((n-1)*(n-2))
data -= m
data **= 2
data *= (n-1.5)*n
data -= 0.5*v*(n-1)
data /= (n-1.)*(n-2.)
if not ma.allclose(v,data.mean(0)):
raise ValueError("Lack of convergence in obrientransform.")
return data
@np.deprecate(message="mstats.signaltonoise is deprecated in scipy 0.16.0")
def signaltonoise(data, axis=0):
"""Calculates the signal-to-noise ratio, as the ratio of the mean over
standard deviation along the given axis.
Parameters
----------
data : sequence
Input data
axis : {0, int}, optional
Axis along which to compute. If None, the computation is performed
on a flat version of the array.
"""
data = ma.array(data, copy=False)
m = data.mean(axis)
sd = data.std(axis, ddof=0)
return m/sd
def sem(a, axis=0, ddof=1):
"""
Calculates the standard error of the mean of the input array.
Also sometimes called standard error of measurement.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
If axis is None, ravel `a` first. If axis is an integer, this will be
the axis over which to operate. Defaults to 0.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` changed in scipy 0.15.0 to be consistent with
`stats.sem` as well as with the most common definition used (like in the R
documentation).
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
n = a.count(axis=axis)
s = a.std(axis=axis, ddof=ddof) / ma.sqrt(n)
return s
zmap = stats.zmap
zscore = stats.zscore
def f_oneway(*args):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: ``f_oneway(*args)``, where ``*args`` is 2 or more arrays,
one per treatment group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
"""
# Construct a single array of arguments: each row is a group
data = argstoarray(*args)
ngroups = len(data)
ntot = data.count()
sstot = (data**2).sum() - (data.sum())**2/float(ntot)
ssbg = (data.count(-1) * (data.mean(-1)-data.mean())**2).sum()
sswg = sstot-ssbg
dfbg = ngroups-1
dfwg = ntot - ngroups
msb = ssbg/float(dfbg)
msw = sswg/float(dfwg)
f = msb/msw
prob = special.fdtrc(dfbg, dfwg, f) # equivalent to stats.f.sf
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
return F_onewayResult(f, prob)
@np.deprecate(message="mstats.f_value_wilks_lambda deprecated in scipy 0.17.0")
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivariate data, per
Maxwell & Delaney p.657.
"""
ER = ma.array(ER, copy=False, ndmin=2)
EF = ma.array(EF, copy=False, ndmin=2)
if ma.getmask(ER).any() or ma.getmask(EF).any():
raise NotImplementedError("Not implemented when the inputs "
"have missing data")
lmbda = np.linalg.det(EF) / np.linalg.det(ER)
q = ma.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
q = ma.filled(q, 1)
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
def friedmanchisquare(*args):
"""Friedman Chi-Square is a non-parametric, one-way within-subjects ANOVA.
This function calculates the Friedman Chi-square test for repeated measures
and returns the result, along with the associated probability value.
Each input is considered a given group. Ideally, the number of treatments
among each group should be equal. If this is not the case, only the first
n treatments are taken into account, where n is the number of treatments
of the smallest group.
If a group has some missing values, the corresponding treatments are masked
in the other groups.
The test statistic is corrected for ties.
Masked values in one group are propagated to the other groups.
Returns
-------
statistic : float
the test statistic.
pvalue : float
the associated p-value.
"""
data = argstoarray(*args).astype(float)
k = len(data)
if k < 3:
raise ValueError("Less than 3 groups (%i): " % k +
"the Friedman test is NOT appropriate.")
ranked = ma.masked_values(rankdata(data, axis=0), 0)
if ranked._mask is not nomask:
ranked = ma.mask_cols(ranked)
ranked = ranked.compressed().reshape(k,-1).view(ndarray)
else:
ranked = ranked._data
(k,n) = ranked.shape
# Ties correction
repeats = np.array([find_repeats(_) for _ in ranked.T], dtype=object)
ties = repeats[repeats.nonzero()].reshape(-1,2)[:,-1].astype(int)
tie_correction = 1 - (ties**3-ties).sum()/float(n*(k**3-k))
ssbg = np.sum((ranked.sum(-1) - n*(k+1)/2.)**2)
chisq = ssbg * 12./(n*k*(k+1)) * 1./tie_correction
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
return FriedmanchisquareResult(chisq,
stats.distributions.chi2.sf(chisq, k-1))
| bsd-3-clause |
bpramod/azure-linux-extensions | Diagnostic/Utils/XmlUtil.py | 5 | 2369 | #!/usr/bin/env python
#
# Azure Linux extension
#
# Copyright (c) Microsoft Corporation
# All rights reserved.
# MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the ""Software""), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import xml.etree.ElementTree as ET
def setXmlValue(xml,path,property,value,selector=[]):
elements = xml.findall(path)
for element in elements:
if selector and element.get(selector[0])!=selector[1]:
continue
if not property:
element.text = value
elif not element.get(property) or len(element.get(property))==0 :
element.set(property,value)
def getXmlValue(xml,path,property):
element = xml.find(path)
if element is not None:
return element.get(property)
def addElement(xml,path,el,selector=[],addOnlyOnce=False):
elements = xml.findall(path)
for element in elements:
if selector and element.get(selector[0])!=selector[1]:
continue
element.append(el)
if addOnlyOnce:
return
def createElement(schema):
return ET.fromstring(schema)
def removeElement(tree, parent_path, removed_element_name):
parents = tree.findall(parent_path)
for parent in parents:
element = parent.find(removed_element_name)
while element is not None:
parent.remove(element)
element = parent.find(removed_element_name) | apache-2.0 |
magopian/olympia | apps/reviews/feeds.py | 15 | 2542 | from django.contrib.syndication.views import Feed
from django.shortcuts import get_object_or_404
from tower import ugettext as _
from amo import helpers
from addons.models import Addon, Review
import urllib
class ReviewsRss(Feed):
addon = None
def get_object(self, request, addon_id=None):
"""Get the Addon for which we are about to output
the RSS feed of it Review"""
self.addon = get_object_or_404(Addon.objects.id_or_slug(addon_id))
return self.addon
def title(self, addon):
"""Title for the feed"""
return _(u'Reviews for %s') % addon.name
def link(self, addon):
"""Link for the feed"""
return helpers.absolutify(helpers.url('home'))
def description(self, addon):
"""Description for the feed"""
return _('Review History for this Addon')
def items(self, addon):
"""Return the Reviews for this Addon to be output as RSS <item>'s"""
qs = (Review.objects.valid().filter(addon=addon).order_by('-created'))
return qs.all()[:30]
def item_link(self, review):
"""Link for a particular review (<item><link>)"""
return helpers.absolutify(helpers.url('addons.reviews.detail',
self.addon.slug,
review.id))
def item_title(self, review):
"""Title for particular review (<item><title>)"""
tag_line = rating = ''
if getattr(review, 'rating', None):
# L10n: This describes the number of stars given out of 5
rating = _('Rated %d out of 5 stars') % review.rating
if getattr(review, 'title', None):
tag_line = review.title
divider = ' : ' if rating and tag_line else ''
return u'%s%s%s' % (rating, divider, tag_line)
def item_description(self, review):
"""Description for particular review (<item><description>)"""
return review.body
def item_guid(self, review):
"""Guid for a particuar review (<item><guid>)"""
guid_url = helpers.absolutify(helpers.url('addons.reviews.list',
self.addon.slug))
return guid_url + urllib.quote(str(review.id))
def item_author_name(self, review):
"""Author for a particuar review (<item><dc:creator>)"""
return review.user.name
def item_pubdate(self, review):
"""Pubdate for a particuar review (<item><pubDate>)"""
return review.created
| bsd-3-clause |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_11_01/aio/operations/_usages_operations.py | 1 | 5201 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsagesOperations:
"""UsagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs
) -> AsyncIterable["_models.UsagesListResult"]:
"""List network usages for a subscription.
:param location: The location where resource usage is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsagesListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_11_01.models.UsagesListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UsagesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._ ]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('UsagesListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/usages'} # type: ignore
| mit |
akurtakov/Pydev | plugins/org.python.pydev.core/pysrc/_pydev_bundle/pydev_monkey.py | 1 | 23007 | # License: EPL
import os
import sys
import traceback
try:
xrange
except:
xrange = range
#===============================================================================
# Things that are dependent on having the pydevd debugger
#===============================================================================
def log_debug(msg):
from _pydev_bundle import pydev_log
pydev_log.debug(msg)
def log_error_once(msg):
from _pydev_bundle import pydev_log
pydev_log.error_once(msg)
pydev_src_dir = os.path.dirname(os.path.dirname(__file__))
def _get_python_c_args(host, port, indC, args, setup):
host_literal = "'" + host + "'" if host is not None else 'None'
return ("import sys; sys.path.append(r'%s'); import pydevd; "
"pydevd.settrace(host=%s, port=%s, suspend=False, trace_only_current_thread=False, patch_multiprocessing=True); "
"from pydevd import SetupHolder; SetupHolder.setup = %s; %s"
) % (
pydev_src_dir,
host_literal,
port,
setup,
args[indC + 1])
def _get_host_port():
import pydevd
host, port = pydevd.dispatch()
return host, port
def _is_managed_arg(arg):
if arg.endswith('pydevd.py'):
return True
return False
def _on_forked_process():
import pydevd
pydevd.threadingCurrentThread().__pydevd_main_thread = True
pydevd.settrace_forked()
def _on_set_trace_for_new_thread(global_debugger):
if global_debugger is not None:
global_debugger.SetTrace(global_debugger.trace_dispatch, global_debugger.frame_eval_func, global_debugger.dummy_trace_dispatch)
#===============================================================================
# Things related to monkey-patching
#===============================================================================
def is_python(path):
if path.endswith("'") or path.endswith('"'):
path = path[1:len(path) - 1]
filename = os.path.basename(path).lower()
for name in ['python', 'jython', 'pypy']:
if filename.find(name) != -1:
return True
return False
def remove_quotes_from_args(args):
if sys.platform == "win32":
new_args = []
for x in args:
if len(x) > 1 and x.startswith('"') and x.endswith('"'):
x = x[1:-1]
new_args.append(x)
return new_args
else:
return args
def quote_args(args):
if sys.platform == "win32":
quoted_args = []
for x in args:
if x.startswith('"') and x.endswith('"'):
quoted_args.append(x)
else:
if ' ' in x:
x = x.replace('"', '\\"')
quoted_args.append('"%s"' % x)
else:
quoted_args.append(x)
return quoted_args
else:
return args
def get_c_option_index(args):
"""
Get index of "-c" argument and check if it's interpreter's option
:param args: list of arguments
:return: index of "-c" if it's an interpreter's option and -1 if it doesn't exist or program's option
"""
try:
ind_c = args.index('-c')
except ValueError:
return -1
else:
for i in range(1, ind_c):
if not args[i].startswith('-'):
# there is an arg without "-" before "-c", so it's not an interpreter's option
return -1
return ind_c
def patch_args(args):
try:
log_debug("Patching args: %s"% str(args))
args = remove_quotes_from_args(args)
from pydevd import SetupHolder
import sys
new_args = []
if len(args) == 0:
return args
if is_python(args[0]):
ind_c = get_c_option_index(args)
if ind_c != -1:
host, port = _get_host_port()
if port is not None:
new_args.extend(args)
new_args[ind_c + 1] = _get_python_c_args(host, port, ind_c, args, SetupHolder.setup)
return quote_args(new_args)
else:
# Check for Python ZIP Applications and don't patch the args for them.
# Assumes the first non `-<flag>` argument is what we need to check.
# There's probably a better way to determine this but it works for most cases.
continue_next = False
for i in range(1, len(args)):
if continue_next:
continue_next = False
continue
arg = args[i]
if arg.startswith('-'):
# Skip the next arg too if this flag expects a value.
continue_next = arg in ['-m', '-W', '-X']
continue
if arg.rsplit('.')[-1] in ['zip', 'pyz', 'pyzw']:
log_debug('Executing a PyZip, returning')
return args
break
new_args.append(args[0])
else:
log_debug("Process is not python, returning.")
return args
i = 1
# Original args should be something as:
# ['X:\\pysrc\\pydevd.py', '--multiprocess', '--print-in-debugger-startup',
# '--vm_type', 'python', '--client', '127.0.0.1', '--port', '56352', '--file', 'x:\\snippet1.py']
from _pydevd_bundle.pydevd_command_line_handling import setup_to_argv
original = setup_to_argv(SetupHolder.setup) + ['--file']
while i < len(args):
if args[i] == '-m':
# Always insert at pos == 1 (i.e.: pydevd "--module" --multiprocess ...)
original.insert(1, '--module')
else:
if args[i].startswith('-'):
new_args.append(args[i])
else:
break
i += 1
# Note: undoing https://github.com/Elizaveta239/PyDev.Debugger/commit/053c9d6b1b455530bca267e7419a9f63bf51cddf
# (i >= len(args) instead of i < len(args))
# in practice it'd raise an exception here and would return original args, which is not what we want... providing
# a proper fix for https://youtrack.jetbrains.com/issue/PY-9767 elsewhere.
if i < len(args) and _is_managed_arg(args[i]): # no need to add pydevd twice
return args
for x in original:
new_args.append(x)
if x == '--file':
break
while i < len(args):
new_args.append(args[i])
i += 1
return quote_args(new_args)
except:
traceback.print_exc()
return args
def str_to_args_windows(args):
# see http:#msdn.microsoft.com/en-us/library/a1y7w461.aspx
result = []
DEFAULT = 0
ARG = 1
IN_DOUBLE_QUOTE = 2
state = DEFAULT
backslashes = 0
buf = ''
args_len = len(args)
for i in xrange(args_len):
ch = args[i]
if (ch == '\\'):
backslashes += 1
continue
elif (backslashes != 0):
if ch == '"':
while backslashes >= 2:
backslashes -= 2
buf += '\\'
if (backslashes == 1):
if (state == DEFAULT):
state = ARG
buf += '"'
backslashes = 0
continue
# else fall through to switch
else:
# false alarm, treat passed backslashes literally...
if (state == DEFAULT):
state = ARG
while backslashes > 0:
backslashes -= 1
buf += '\\'
# fall through to switch
if ch in (' ', '\t'):
if (state == DEFAULT):
# skip
continue
elif (state == ARG):
state = DEFAULT
result.append(buf)
buf = ''
continue
if state in (DEFAULT, ARG):
if ch == '"':
state = IN_DOUBLE_QUOTE
else:
state = ARG
buf += ch
elif state == IN_DOUBLE_QUOTE:
if ch == '"':
if (i + 1 < args_len and args[i + 1] == '"'):
# Undocumented feature in Windows:
# Two consecutive double quotes inside a double-quoted argument are interpreted as
# a single double quote.
buf += '"'
i += 1
elif len(buf) == 0:
# empty string on Windows platform. Account for bug in constructor of
# JDK's java.lang.ProcessImpl.
result.append("\"\"")
state = DEFAULT
else:
state = ARG
else:
buf += ch
else:
raise RuntimeError('Illegal condition')
if len(buf) > 0 or state != DEFAULT:
result.append(buf)
return result
def patch_arg_str_win(arg_str):
args = str_to_args_windows(arg_str)
# Fix https://youtrack.jetbrains.com/issue/PY-9767 (args may be empty)
if not args or not is_python(args[0]):
return arg_str
arg_str = ' '.join(patch_args(args))
log_debug("New args: %s" % arg_str)
return arg_str
def monkey_patch_module(module, funcname, create_func):
if hasattr(module, funcname):
original_name = 'original_' + funcname
if not hasattr(module, original_name):
setattr(module, original_name, getattr(module, funcname))
setattr(module, funcname, create_func(original_name))
def monkey_patch_os(funcname, create_func):
monkey_patch_module(os, funcname, create_func)
def warn_multiproc():
log_error_once(
"pydev debugger: New process is launching (breakpoints won't work in the new process).\n"
"pydev debugger: To debug that process please enable 'Attach to subprocess automatically while debugging?' option in the debugger settings.\n")
def create_warn_multiproc(original_name):
def new_warn_multiproc(*args):
import os
warn_multiproc()
return getattr(os, original_name)(*args)
return new_warn_multiproc
def create_execl(original_name):
def new_execl(path, *args):
"""
os.execl(path, arg0, arg1, ...)
os.execle(path, arg0, arg1, ..., env)
os.execlp(file, arg0, arg1, ...)
os.execlpe(file, arg0, arg1, ..., env)
"""
import os
args = patch_args(args)
send_process_created_message()
return getattr(os, original_name)(path, *args)
return new_execl
def create_execv(original_name):
def new_execv(path, args):
"""
os.execv(path, args)
os.execvp(file, args)
"""
import os
send_process_created_message()
return getattr(os, original_name)(path, patch_args(args))
return new_execv
def create_execve(original_name):
"""
os.execve(path, args, env)
os.execvpe(file, args, env)
"""
def new_execve(path, args, env):
import os
send_process_created_message()
return getattr(os, original_name)(path, patch_args(args), env)
return new_execve
def create_spawnl(original_name):
def new_spawnl(mode, path, *args):
"""
os.spawnl(mode, path, arg0, arg1, ...)
os.spawnlp(mode, file, arg0, arg1, ...)
"""
import os
args = patch_args(args)
send_process_created_message()
return getattr(os, original_name)(mode, path, *args)
return new_spawnl
def create_spawnv(original_name):
def new_spawnv(mode, path, args):
"""
os.spawnv(mode, path, args)
os.spawnvp(mode, file, args)
"""
import os
send_process_created_message()
return getattr(os, original_name)(mode, path, patch_args(args))
return new_spawnv
def create_spawnve(original_name):
"""
os.spawnve(mode, path, args, env)
os.spawnvpe(mode, file, args, env)
"""
def new_spawnve(mode, path, args, env):
import os
send_process_created_message()
return getattr(os, original_name)(mode, path, patch_args(args), env)
return new_spawnve
def create_fork_exec(original_name):
"""
_posixsubprocess.fork_exec(args, executable_list, close_fds, ... (13 more))
"""
def new_fork_exec(args, *other_args):
import _posixsubprocess # @UnresolvedImport
args = patch_args(args)
send_process_created_message()
return getattr(_posixsubprocess, original_name)(args, *other_args)
return new_fork_exec
def create_warn_fork_exec(original_name):
"""
_posixsubprocess.fork_exec(args, executable_list, close_fds, ... (13 more))
"""
def new_warn_fork_exec(*args):
try:
import _posixsubprocess
warn_multiproc()
return getattr(_posixsubprocess, original_name)(*args)
except:
pass
return new_warn_fork_exec
def create_CreateProcess(original_name):
"""
CreateProcess(*args, **kwargs)
"""
def new_CreateProcess(app_name, cmd_line, *args):
try:
import _subprocess
except ImportError:
import _winapi as _subprocess
send_process_created_message()
return getattr(_subprocess, original_name)(app_name, patch_arg_str_win(cmd_line), *args)
return new_CreateProcess
def create_CreateProcessWarnMultiproc(original_name):
"""
CreateProcess(*args, **kwargs)
"""
def new_CreateProcess(*args):
try:
import _subprocess
except ImportError:
import _winapi as _subprocess
warn_multiproc()
return getattr(_subprocess, original_name)(*args)
return new_CreateProcess
def create_fork(original_name):
def new_fork():
import os
# A simple fork will result in a new python process
is_new_python_process = True
frame = sys._getframe()
while frame is not None:
if frame.f_code.co_name == '_execute_child' and 'subprocess' in frame.f_code.co_filename:
# If we're actually in subprocess.Popen creating a child, it may
# result in something which is not a Python process, (so, we
# don't want to connect with it in the forked version).
executable = frame.f_locals.get('executable')
if executable is not None:
is_new_python_process = False
if is_python(executable):
is_new_python_process = True
break
frame = frame.f_back
frame = None # Just make sure we don't hold on to it.
child_process = getattr(os, original_name)() # fork
if not child_process:
if is_new_python_process:
_on_forked_process()
else:
if is_new_python_process:
send_process_created_message()
return child_process
return new_fork
def send_process_created_message():
from _pydevd_bundle.pydevd_comm import get_global_debugger
debugger = get_global_debugger()
if debugger is not None:
debugger.send_process_created_message()
def patch_new_process_functions():
# os.execl(path, arg0, arg1, ...)
# os.execle(path, arg0, arg1, ..., env)
# os.execlp(file, arg0, arg1, ...)
# os.execlpe(file, arg0, arg1, ..., env)
# os.execv(path, args)
# os.execve(path, args, env)
# os.execvp(file, args)
# os.execvpe(file, args, env)
monkey_patch_os('execl', create_execl)
monkey_patch_os('execle', create_execl)
monkey_patch_os('execlp', create_execl)
monkey_patch_os('execlpe', create_execl)
monkey_patch_os('execv', create_execv)
monkey_patch_os('execve', create_execve)
monkey_patch_os('execvp', create_execv)
monkey_patch_os('execvpe', create_execve)
# os.spawnl(mode, path, ...)
# os.spawnle(mode, path, ..., env)
# os.spawnlp(mode, file, ...)
# os.spawnlpe(mode, file, ..., env)
# os.spawnv(mode, path, args)
# os.spawnve(mode, path, args, env)
# os.spawnvp(mode, file, args)
# os.spawnvpe(mode, file, args, env)
monkey_patch_os('spawnl', create_spawnl)
monkey_patch_os('spawnle', create_spawnl)
monkey_patch_os('spawnlp', create_spawnl)
monkey_patch_os('spawnlpe', create_spawnl)
monkey_patch_os('spawnv', create_spawnv)
monkey_patch_os('spawnve', create_spawnve)
monkey_patch_os('spawnvp', create_spawnv)
monkey_patch_os('spawnvpe', create_spawnve)
if sys.platform != 'win32':
monkey_patch_os('fork', create_fork)
try:
import _posixsubprocess
monkey_patch_module(_posixsubprocess, 'fork_exec', create_fork_exec)
except ImportError:
pass
else:
# Windows
try:
import _subprocess
except ImportError:
import _winapi as _subprocess
monkey_patch_module(_subprocess, 'CreateProcess', create_CreateProcess)
def patch_new_process_functions_with_warning():
monkey_patch_os('execl', create_warn_multiproc)
monkey_patch_os('execle', create_warn_multiproc)
monkey_patch_os('execlp', create_warn_multiproc)
monkey_patch_os('execlpe', create_warn_multiproc)
monkey_patch_os('execv', create_warn_multiproc)
monkey_patch_os('execve', create_warn_multiproc)
monkey_patch_os('execvp', create_warn_multiproc)
monkey_patch_os('execvpe', create_warn_multiproc)
monkey_patch_os('spawnl', create_warn_multiproc)
monkey_patch_os('spawnle', create_warn_multiproc)
monkey_patch_os('spawnlp', create_warn_multiproc)
monkey_patch_os('spawnlpe', create_warn_multiproc)
monkey_patch_os('spawnv', create_warn_multiproc)
monkey_patch_os('spawnve', create_warn_multiproc)
monkey_patch_os('spawnvp', create_warn_multiproc)
monkey_patch_os('spawnvpe', create_warn_multiproc)
if sys.platform != 'win32':
monkey_patch_os('fork', create_warn_multiproc)
try:
import _posixsubprocess
monkey_patch_module(_posixsubprocess, 'fork_exec', create_warn_fork_exec)
except ImportError:
pass
else:
# Windows
try:
import _subprocess
except ImportError:
import _winapi as _subprocess
monkey_patch_module(_subprocess, 'CreateProcess', create_CreateProcessWarnMultiproc)
class _NewThreadStartupWithTrace:
def __init__(self, original_func, args, kwargs):
self.original_func = original_func
self.args = args
self.kwargs = kwargs
self.global_debugger = self.get_debugger()
def get_debugger(self):
from _pydevd_bundle.pydevd_comm import get_global_debugger
return get_global_debugger()
def __call__(self):
_on_set_trace_for_new_thread(self.global_debugger)
global_debugger = self.global_debugger
if global_debugger is not None and global_debugger.thread_analyser is not None:
# we can detect start_new_thread only here
try:
from pydevd_concurrency_analyser.pydevd_concurrency_logger import log_new_thread
log_new_thread(global_debugger)
except:
sys.stderr.write("Failed to detect new thread for visualization")
return self.original_func(*self.args, **self.kwargs)
class _NewThreadStartupWithoutTrace:
def __init__(self, original_func, args, kwargs):
self.original_func = original_func
self.args = args
self.kwargs = kwargs
def __call__(self):
return self.original_func(*self.args, **self.kwargs)
_UseNewThreadStartup = _NewThreadStartupWithTrace
def _get_threading_modules_to_patch():
threading_modules_to_patch = []
try:
import thread as _thread
except:
import _thread
threading_modules_to_patch.append(_thread)
return threading_modules_to_patch
threading_modules_to_patch = _get_threading_modules_to_patch()
def patch_thread_module(thread):
if getattr(thread, '_original_start_new_thread', None) is None:
_original_start_new_thread = thread._original_start_new_thread = thread.start_new_thread
else:
_original_start_new_thread = thread._original_start_new_thread
class ClassWithPydevStartNewThread:
def pydev_start_new_thread(self, function, args=(), kwargs={}):
'''
We need to replace the original thread.start_new_thread with this function so that threads started
through it and not through the threading module are properly traced.
'''
return _original_start_new_thread(_UseNewThreadStartup(function, args, kwargs), ())
# This is a hack for the situation where the thread.start_new_thread is declared inside a class, such as the one below
# class F(object):
# start_new_thread = thread.start_new_thread
#
# def start_it(self):
# self.start_new_thread(self.function, args, kwargs)
# So, if it's an already bound method, calling self.start_new_thread won't really receive a different 'self' -- it
# does work in the default case because in builtins self isn't passed either.
pydev_start_new_thread = ClassWithPydevStartNewThread().pydev_start_new_thread
try:
# We need to replace the original thread.start_new_thread with this function so that threads started through
# it and not through the threading module are properly traced.
thread.start_new_thread = pydev_start_new_thread
thread.start_new = pydev_start_new_thread
except:
pass
def patch_thread_modules():
for t in threading_modules_to_patch:
patch_thread_module(t)
def undo_patch_thread_modules():
for t in threading_modules_to_patch:
try:
t.start_new_thread = t._original_start_new_thread
except:
pass
try:
t.start_new = t._original_start_new_thread
except:
pass
def disable_trace_thread_modules():
'''
Can be used to temporarily stop tracing threads created with thread.start_new_thread.
'''
global _UseNewThreadStartup
_UseNewThreadStartup = _NewThreadStartupWithoutTrace
def enable_trace_thread_modules():
'''
Can be used to start tracing threads created with thread.start_new_thread again.
'''
global _UseNewThreadStartup
_UseNewThreadStartup = _NewThreadStartupWithTrace
def get_original_start_new_thread(threading_module):
try:
return threading_module._original_start_new_thread
except:
return threading_module.start_new_thread
| epl-1.0 |
dreamsxin/kbengine | kbe/src/lib/python/Lib/test/test_crypt.py | 91 | 1084 | from test import support
import unittest
crypt = support.import_module('crypt')
class CryptTestCase(unittest.TestCase):
def test_crypt(self):
c = crypt.crypt('mypassword', 'ab')
if support.verbose:
print('Test encryption: ', c)
def test_salt(self):
self.assertEqual(len(crypt._saltchars), 64)
for method in crypt.methods:
salt = crypt.mksalt(method)
self.assertEqual(len(salt),
method.salt_chars + (3 if method.ident else 0))
def test_saltedcrypt(self):
for method in crypt.methods:
pw = crypt.crypt('assword', method)
self.assertEqual(len(pw), method.total_size)
pw = crypt.crypt('assword', crypt.mksalt(method))
self.assertEqual(len(pw), method.total_size)
def test_methods(self):
# Gurantee that METHOD_CRYPT is the last method in crypt.methods.
self.assertTrue(len(crypt.methods) >= 1)
self.assertEqual(crypt.METHOD_CRYPT, crypt.methods[-1])
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
hydroshare/hydroshare_temp | hs_core/tests/api/native/test_publish_resource.py | 1 | 2615 | __author__ = 'Tian Gan'
## unit test for publish_resource() from resource.py
## Note:
# It can't test if the edit_groups are empty now,
# as the edit_group can't be assigned any value when creating a resource
from django.test import TestCase
from django.contrib.auth.models import User, Group
from hs_core import hydroshare
from hs_core.models import GenericResource
class TestPublishResource(TestCase):
def setUp(self):
# create two users
self.user1 = hydroshare.create_account(
'[email protected]',
username='creator',
first_name='Creator_FirstName',
last_name='Creator_LastName',
superuser=False,
groups=[]
)
self.user2 = hydroshare.create_account(
'[email protected]',
username='creator2',
first_name='Creator2_FirstName',
last_name='Creator2_LastName',
superuser=False,
groups=[]
)
# create a group
self.group = hydroshare.create_group(
'Test group',
members=[],
owners=[self.user1]
)
# create a resource
self.res = hydroshare.create_resource(
'GenericResource',
self.user1,
'Test Resource',
#edit_groups=[self.group],
edit_users=[self.user1, self.user2]
)
def tearDown(self):
User.objects.all().delete()
Group.objects.all().delete()
GenericResource.objects.all().delete()
def test_publish_resource(self):
# publish resource
hydroshare.publish_resource(self.res.short_id)
self.pub_res = hydroshare.get_resource_by_shortkey(self.res.short_id)
# test publish state
self.assertTrue(
self.pub_res.published_and_frozen,
msg='The resoruce is not published and frozen'
)
# test frozen state
self.assertTrue(
self.pub_res.frozen,
msg='The resource is not frozen'
)
# test if resource has edit users
self.assertListEqual(
list(self.pub_res.edit_users.all()),
[],
msg='edit users list is not empty'
)
# test if resource has edit groups
self.assertListEqual(
list(self.pub_res.edit_groups.all()),
[],
msg='edit groups is not empty'
)
# test if doi is assigned
self.assertIsNotNone(
self.pub_res.doi,
msg='No doi is assigned with the published resource.'
)
| bsd-3-clause |
espadrine/opera | chromium/src/third_party/scons-2.0.1/engine/SCons/Tool/packaging/rpm.py | 61 | 13485 | """SCons.Tool.Packaging.rpm
The rpm packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/packaging/rpm.py 5134 2010/08/16 23:02:40 bdeegan"
import os
import SCons.Builder
from SCons.Environment import OverrideEnvironment
from SCons.Tool.packaging import stripinstallbuilder, src_targz
from SCons.Errors import UserError
def package(env, target, source, PACKAGEROOT, NAME, VERSION,
PACKAGEVERSION, DESCRIPTION, SUMMARY, X_RPM_GROUP, LICENSE,
**kw):
# initialize the rpm tool
SCons.Tool.Tool('rpm').generate(env)
bld = env['BUILDERS']['Rpm']
# Generate a UserError whenever the target name has been set explicitly,
# since rpm does not allow for controlling it. This is detected by
# checking if the target has been set to the default by the Package()
# Environment function.
if str(target[0])!="%s-%s"%(NAME, VERSION):
raise UserError( "Setting target is not supported for rpm." )
else:
# This should be overridable from the construction environment,
# which it is by using ARCHITECTURE=.
# Guessing based on what os.uname() returns at least allows it
# to work for both i386 and x86_64 Linux systems.
archmap = {
'i686' : 'i386',
'i586' : 'i386',
'i486' : 'i386',
}
buildarchitecture = os.uname()[4]
buildarchitecture = archmap.get(buildarchitecture, buildarchitecture)
if 'ARCHITECTURE' in kw:
buildarchitecture = kw['ARCHITECTURE']
fmt = '%s-%s-%s.%s.rpm'
srcrpm = fmt % (NAME, VERSION, PACKAGEVERSION, 'src')
binrpm = fmt % (NAME, VERSION, PACKAGEVERSION, buildarchitecture)
target = [ srcrpm, binrpm ]
# get the correct arguments into the kw hash
loc=locals()
del loc['kw']
kw.update(loc)
del kw['source'], kw['target'], kw['env']
# if no "SOURCE_URL" tag is given add a default one.
if 'SOURCE_URL' not in kw:
#kw['SOURCE_URL']=(str(target[0])+".tar.gz").replace('.rpm', '')
kw['SOURCE_URL']=(str(target[0])+".tar.gz").replace('.rpm', '')
# mangle the source and target list for the rpmbuild
env = OverrideEnvironment(env, kw)
target, source = stripinstallbuilder(target, source, env)
target, source = addspecfile(target, source, env)
target, source = collectintargz(target, source, env)
# now call the rpm builder to actually build the packet.
return bld(env, target, source, **kw)
def collectintargz(target, source, env):
""" Puts all source files into a tar.gz file. """
# the rpm tool depends on a source package, until this is chagned
# this hack needs to be here that tries to pack all sources in.
sources = env.FindSourceFiles()
# filter out the target we are building the source list for.
#sources = [s for s in sources if not (s in target)]
sources = [s for s in sources if s not in target]
# find the .spec file for rpm and add it since it is not necessarily found
# by the FindSourceFiles function.
#sources.extend( [s for s in source if str(s).rfind('.spec')!=-1] )
spec_file = lambda s: str(s).rfind('.spec') != -1
sources.extend( list(filter(spec_file, source)) )
# as the source contains the url of the source package this rpm package
# is built from, we extract the target name
#tarball = (str(target[0])+".tar.gz").replace('.rpm', '')
tarball = (str(target[0])+".tar.gz").replace('.rpm', '')
try:
#tarball = env['SOURCE_URL'].split('/')[-1]
tarball = env['SOURCE_URL'].split('/')[-1]
except KeyError, e:
raise SCons.Errors.UserError( "Missing PackageTag '%s' for RPM packager" % e.args[0] )
tarball = src_targz.package(env, source=sources, target=tarball,
PACKAGEROOT=env['PACKAGEROOT'], )
return (target, tarball)
def addspecfile(target, source, env):
specfile = "%s-%s" % (env['NAME'], env['VERSION'])
bld = SCons.Builder.Builder(action = build_specfile,
suffix = '.spec',
target_factory = SCons.Node.FS.File)
source.extend(bld(env, specfile, source))
return (target,source)
def build_specfile(target, source, env):
""" Builds a RPM specfile from a dictionary with string metadata and
by analyzing a tree of nodes.
"""
file = open(target[0].abspath, 'w')
str = ""
try:
file.write( build_specfile_header(env) )
file.write( build_specfile_sections(env) )
file.write( build_specfile_filesection(env, source) )
file.close()
# call a user specified function
if 'CHANGE_SPECFILE' in env:
env['CHANGE_SPECFILE'](target, source)
except KeyError, e:
raise SCons.Errors.UserError( '"%s" package field for RPM is missing.' % e.args[0] )
#
# mandatory and optional package tag section
#
def build_specfile_sections(spec):
""" Builds the sections of a rpm specfile.
"""
str = ""
mandatory_sections = {
'DESCRIPTION' : '\n%%description\n%s\n\n', }
str = str + SimpleTagCompiler(mandatory_sections).compile( spec )
optional_sections = {
'DESCRIPTION_' : '%%description -l %s\n%s\n\n',
'CHANGELOG' : '%%changelog\n%s\n\n',
'X_RPM_PREINSTALL' : '%%pre\n%s\n\n',
'X_RPM_POSTINSTALL' : '%%post\n%s\n\n',
'X_RPM_PREUNINSTALL' : '%%preun\n%s\n\n',
'X_RPM_POSTUNINSTALL' : '%%postun\n%s\n\n',
'X_RPM_VERIFY' : '%%verify\n%s\n\n',
# These are for internal use but could possibly be overriden
'X_RPM_PREP' : '%%prep\n%s\n\n',
'X_RPM_BUILD' : '%%build\n%s\n\n',
'X_RPM_INSTALL' : '%%install\n%s\n\n',
'X_RPM_CLEAN' : '%%clean\n%s\n\n',
}
# Default prep, build, install and clean rules
# TODO: optimize those build steps, to not compile the project a second time
if 'X_RPM_PREP' not in spec:
spec['X_RPM_PREP'] = '[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && rm -rf "$RPM_BUILD_ROOT"' + '\n%setup -q'
if 'X_RPM_BUILD' not in spec:
spec['X_RPM_BUILD'] = 'mkdir "$RPM_BUILD_ROOT"'
if 'X_RPM_INSTALL' not in spec:
spec['X_RPM_INSTALL'] = 'scons --install-sandbox="$RPM_BUILD_ROOT" "$RPM_BUILD_ROOT"'
if 'X_RPM_CLEAN' not in spec:
spec['X_RPM_CLEAN'] = '[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && rm -rf "$RPM_BUILD_ROOT"'
str = str + SimpleTagCompiler(optional_sections, mandatory=0).compile( spec )
return str
def build_specfile_header(spec):
""" Builds all section but the %file of a rpm specfile
"""
str = ""
# first the mandatory sections
mandatory_header_fields = {
'NAME' : '%%define name %s\nName: %%{name}\n',
'VERSION' : '%%define version %s\nVersion: %%{version}\n',
'PACKAGEVERSION' : '%%define release %s\nRelease: %%{release}\n',
'X_RPM_GROUP' : 'Group: %s\n',
'SUMMARY' : 'Summary: %s\n',
'LICENSE' : 'License: %s\n', }
str = str + SimpleTagCompiler(mandatory_header_fields).compile( spec )
# now the optional tags
optional_header_fields = {
'VENDOR' : 'Vendor: %s\n',
'X_RPM_URL' : 'Url: %s\n',
'SOURCE_URL' : 'Source: %s\n',
'SUMMARY_' : 'Summary(%s): %s\n',
'X_RPM_DISTRIBUTION' : 'Distribution: %s\n',
'X_RPM_ICON' : 'Icon: %s\n',
'X_RPM_PACKAGER' : 'Packager: %s\n',
'X_RPM_GROUP_' : 'Group(%s): %s\n',
'X_RPM_REQUIRES' : 'Requires: %s\n',
'X_RPM_PROVIDES' : 'Provides: %s\n',
'X_RPM_CONFLICTS' : 'Conflicts: %s\n',
'X_RPM_BUILDREQUIRES' : 'BuildRequires: %s\n',
'X_RPM_SERIAL' : 'Serial: %s\n',
'X_RPM_EPOCH' : 'Epoch: %s\n',
'X_RPM_AUTOREQPROV' : 'AutoReqProv: %s\n',
'X_RPM_EXCLUDEARCH' : 'ExcludeArch: %s\n',
'X_RPM_EXCLUSIVEARCH' : 'ExclusiveArch: %s\n',
'X_RPM_PREFIX' : 'Prefix: %s\n',
'X_RPM_CONFLICTS' : 'Conflicts: %s\n',
# internal use
'X_RPM_BUILDROOT' : 'BuildRoot: %s\n', }
# fill in default values:
# Adding a BuildRequires renders the .rpm unbuildable under System, which
# are not managed by rpm, since the database to resolve this dependency is
# missing (take Gentoo as an example)
# if not s.has_key('x_rpm_BuildRequires'):
# s['x_rpm_BuildRequires'] = 'scons'
if 'X_RPM_BUILDROOT' not in spec:
spec['X_RPM_BUILDROOT'] = '%{_tmppath}/%{name}-%{version}-%{release}'
str = str + SimpleTagCompiler(optional_header_fields, mandatory=0).compile( spec )
return str
#
# mandatory and optional file tags
#
def build_specfile_filesection(spec, files):
""" builds the %file section of the specfile
"""
str = '%files\n'
if 'X_RPM_DEFATTR' not in spec:
spec['X_RPM_DEFATTR'] = '(-,root,root)'
str = str + '%%defattr %s\n' % spec['X_RPM_DEFATTR']
supported_tags = {
'PACKAGING_CONFIG' : '%%config %s',
'PACKAGING_CONFIG_NOREPLACE' : '%%config(noreplace) %s',
'PACKAGING_DOC' : '%%doc %s',
'PACKAGING_UNIX_ATTR' : '%%attr %s',
'PACKAGING_LANG_' : '%%lang(%s) %s',
'PACKAGING_X_RPM_VERIFY' : '%%verify %s',
'PACKAGING_X_RPM_DIR' : '%%dir %s',
'PACKAGING_X_RPM_DOCDIR' : '%%docdir %s',
'PACKAGING_X_RPM_GHOST' : '%%ghost %s', }
for file in files:
# build the tagset
tags = {}
for k in supported_tags.keys():
try:
tags[k]=getattr(file, k)
except AttributeError:
pass
# compile the tagset
str = str + SimpleTagCompiler(supported_tags, mandatory=0).compile( tags )
str = str + ' '
str = str + file.PACKAGING_INSTALL_LOCATION
str = str + '\n\n'
return str
class SimpleTagCompiler(object):
""" This class is a simple string substition utility:
the replacement specfication is stored in the tagset dictionary, something
like:
{ "abc" : "cdef %s ",
"abc_" : "cdef %s %s" }
the compile function gets a value dictionary, which may look like:
{ "abc" : "ghij",
"abc_gh" : "ij" }
The resulting string will be:
"cdef ghij cdef gh ij"
"""
def __init__(self, tagset, mandatory=1):
self.tagset = tagset
self.mandatory = mandatory
def compile(self, values):
""" compiles the tagset and returns a str containing the result
"""
def is_international(tag):
#return tag.endswith('_')
return tag[-1:] == '_'
def get_country_code(tag):
return tag[-2:]
def strip_country_code(tag):
return tag[:-2]
replacements = list(self.tagset.items())
str = ""
#domestic = [ (k,v) for k,v in replacements if not is_international(k) ]
domestic = [t for t in replacements if not is_international(t[0])]
for key, replacement in domestic:
try:
str = str + replacement % values[key]
except KeyError, e:
if self.mandatory:
raise e
#international = [ (k,v) for k,v in replacements if is_international(k) ]
international = [t for t in replacements if is_international(t[0])]
for key, replacement in international:
try:
#int_values_for_key = [ (get_country_code(k),v) for k,v in values.items() if strip_country_code(k) == key ]
x = [t for t in values.items() if strip_country_code(t[0]) == key]
int_values_for_key = [(get_country_code(t[0]),t[1]) for t in x]
for v in int_values_for_key:
str = str + replacement % v
except KeyError, e:
if self.mandatory:
raise e
return str
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| bsd-3-clause |
markYoungH/chromium.src | tools/android/findbugs_plugin/test/run_findbugs_plugin_tests.py | 109 | 1401 | #!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is used to test the findbugs plugin, it calls
# build/android/pylib/utils/findbugs.py to analyze the classes in
# org.chromium.tools.findbugs.plugin package, and expects to get the same
# issue with those in expected_result.txt.
#
# Useful command line:
# --rebaseline to generate the expected_result.txt, please make sure don't
# remove the expected result of exsting tests.
import optparse
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', '..', '..',
'build', 'android')))
from pylib import constants
from pylib.utils import findbugs
def main(argv):
parser = findbugs.GetCommonParser()
options, _ = parser.parse_args()
if not options.known_bugs:
options.known_bugs = os.path.join(constants.DIR_SOURCE_ROOT, 'tools',
'android', 'findbugs_plugin', 'test',
'expected_result.txt')
if not options.only_analyze:
options.only_analyze = 'org.chromium.tools.findbugs.plugin.*'
return findbugs.Run(options)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
un33k/robotframework | src/robot/utils/encodingsniffer.py | 25 | 3239 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
from .platform import JYTHON, WINDOWS, UNIXY
if UNIXY:
DEFAULT_SYSTEM_ENCODING = 'UTF-8'
DEFAULT_OUTPUT_ENCODING = 'UTF-8'
else:
DEFAULT_SYSTEM_ENCODING = 'cp1252'
DEFAULT_OUTPUT_ENCODING = 'cp437'
def get_system_encoding():
platform_getters = [(True, _get_python_system_encoding),
(JYTHON, _get_java_system_encoding),
(UNIXY, _get_unixy_encoding),
(WINDOWS, _get_windows_system_encoding)]
return _get_encoding(platform_getters, DEFAULT_SYSTEM_ENCODING)
def get_output_encoding():
platform_getters = [(True, _get_stream_output_encoding),
(UNIXY, _get_unixy_encoding),
(WINDOWS, _get_windows_output_encoding)]
return _get_encoding(platform_getters, DEFAULT_OUTPUT_ENCODING)
def _get_encoding(platform_getters, default):
for platform, getter in platform_getters:
if platform:
encoding = getter()
if _is_valid(encoding):
return encoding
return default
def _get_python_system_encoding():
return sys.getfilesystemencoding()
def _get_java_system_encoding():
from java.lang import System
return System.getProperty('file.encoding')
def _get_unixy_encoding():
for name in 'LANG', 'LC_CTYPE', 'LANGUAGE', 'LC_ALL':
if name in os.environ:
# Encoding can be in format like `UTF-8` or `en_US.UTF-8`
encoding = os.environ[name].split('.')[-1]
if _is_valid(encoding):
return encoding
return None
def _get_stream_output_encoding():
# Stream may not have encoding attribute if it is intercepted outside RF
# in Python. Encoding is None if process's outputs are redirected.
for stream in sys.__stdout__, sys.__stderr__, sys.__stdin__:
encoding = getattr(stream, 'encoding', None)
if _is_valid(encoding):
return encoding
return None
def _get_windows_system_encoding():
return _get_code_page('GetACP')
def _get_windows_output_encoding():
return _get_code_page('GetOEMCP')
def _get_code_page(method_name):
from ctypes import cdll
try:
method = getattr(cdll.kernel32, method_name)
except TypeError: # Sometimes occurs w/ IronPython (mainly on CI)
return None
method.argtypes = () # Needed w/ Jython (at least 2.5)
return 'cp%s' % method()
def _is_valid(encoding):
if not encoding:
return False
try:
'test'.encode(encoding)
except LookupError:
return False
else:
return True
| apache-2.0 |
phihag/youtube-dl | youtube_dl/extractor/localnews8.py | 87 | 1728 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class LocalNews8IE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?localnews8\.com/(?:[^/]+/)*(?P<display_id>[^/]+)/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.localnews8.com/news/rexburg-business-turns-carbon-fiber-scraps-into-wedding-rings/35183304',
'md5': 'be4d48aea61aa2bde7be2ee47691ad20',
'info_dict': {
'id': '35183304',
'display_id': 'rexburg-business-turns-carbon-fiber-scraps-into-wedding-rings',
'ext': 'mp4',
'title': 'Rexburg business turns carbon fiber scraps into wedding ring',
'description': 'The process was first invented by Lamborghini and less than a dozen companies around the world use it.',
'duration': 153,
'timestamp': 1441844822,
'upload_date': '20150910',
'uploader_id': 'api',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
partner_id = self._search_regex(
r'partnerId\s*[:=]\s*(["\'])(?P<id>\d+)\1',
webpage, 'partner id', group='id')
kaltura_id = self._search_regex(
r'videoIdString\s*[:=]\s*(["\'])kaltura:(?P<id>[0-9a-z_]+)\1',
webpage, 'videl id', group='id')
return {
'_type': 'url_transparent',
'url': 'kaltura:%s:%s' % (partner_id, kaltura_id),
'ie_key': 'Kaltura',
'id': video_id,
'display_id': display_id,
}
| unlicense |
WaveBlocks/WaveBlocksND | WaveBlocksND/Plot/plotcm.py | 1 | 1716 | """The WaveBlocks Project
Function for plotting complex matrices with the
phase of the entries encoded into the usual color code.
@author: R. Bourquin
@copyright: Copyright (C) 2011, 2012, 2016 R. Bourquin
@license: Modified BSD License
"""
from matplotlib.pyplot import gca
from matplotlib import ticker
from WaveBlocksND.Plot.color_map import color_map
def plotcm(matrix, phase=None, modulus=None, darken=None, axes=None, **kwargs):
"""Plot complex matrices with the phase of the entries encoded into the usual color code.
:param matrix: The matrix data.
:param phase: The phase of the entries, if not given they are computed.
:param modulus: The modulus of the entries, if not given they are computed.
:param darken: Whether to take into account the modulus of the data to darken colors.
:param axes: The axes instance used for plotting.
Note that the additional keyword arguments are passed to the plot function.
"""
# TODO: Allow to specify axes
nr, nc = matrix.shape
extent = [-0.5, nc - 0.5, nr - 0.5, -0.5]
kw = {'extent': extent,
'origin': 'upper',
'interpolation': 'nearest',
'aspect': 'equal'}
kw.update(kwargs)
# Plot to the given axis instance or retrieve the current one
if axes is None:
axes = gca()
# Color code and plot the data matrix
axes.imshow(color_map(matrix, phase=phase, modulus=modulus, darken=darken), **kw)
axes.xaxis.tick_top()
axes.xaxis.set_ticks_position('both')
axes.xaxis.set_major_locator(ticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True))
axes.yaxis.set_major_locator(ticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True))
| bsd-3-clause |
xianai/Ultra-KSM-Head | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
Fusion-Devices/android_device_xiaomi_cancro | releasetools.py | 6 | 1443 | #
# Copyright (C) 2015 The CyanogenMod Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Custom OTA commands for cancro devices"""
import common
import os
TARGET_DIR = os.getenv('OUT')
UTILITIES_DIR = os.path.join(TARGET_DIR, 'utilities')
TARGET_DEVICE = os.getenv('CUSTOM_BUILD')
def FullOTA_Assertions(info):
info.output_zip.write(os.path.join(TARGET_DIR, "nfcchecker.sh"), "nfcchecker.sh")
info.output_zip.write(os.path.join(UTILITIES_DIR, "busybox"), "busybox")
info.script.AppendExtra(
('package_extract_file("checksoc.sh", "/tmp/nfcchecker.sh");\n'
'set_metadata("/tmp/nfcchecker.sh", "uid", 0, "gid", 0, "mode", 0777);'))
info.script.AppendExtra(
('package_extract_file("busybox", "/tmp/busybox");\n'
'set_metadata("/tmp/busybox", "uid", 0, "gid", 0, "mode", 0777);'))
def FullOTA_InstallEnd(info):
info.script.AppendExtra('assert(run_program("/tmp/nfcchecker.sh") == 0);')
| gpl-2.0 |
yaoandw/joke | Pods/AVOSCloudCrashReporting/Breakpad/src/tools/gyp/test/mac/gyptest-rpath.py | 242 | 1310 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that LD_DYLIB_INSTALL_NAME and DYLIB_INSTALL_NAME_BASE are handled
correctly.
"""
import TestGyp
import re
import subprocess
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'rpath'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
def GetRpaths(p):
p = test.built_file_path(p, chdir=CHDIR)
r = re.compile(r'cmd LC_RPATH.*?path (.*?) \(offset \d+\)', re.DOTALL)
proc = subprocess.Popen(['otool', '-l', p], stdout=subprocess.PIPE)
o = proc.communicate()[0]
assert not proc.returncode
return r.findall(o)
if (GetRpaths('libdefault_rpath.dylib') != []):
test.fail_test()
if (GetRpaths('libexplicit_rpath.dylib') != ['@executable_path/.']):
test.fail_test()
if (GetRpaths('libexplicit_rpaths_escaped.dylib') !=
['First rpath', 'Second rpath']):
test.fail_test()
if (GetRpaths('My Framework.framework/My Framework') != ['@loader_path/.']):
test.fail_test()
if (GetRpaths('executable') != ['@executable_path/.']):
test.fail_test()
test.pass_test()
| mit |
ToontownUprising/src | toontown/effects/BlastEffect.py | 6 | 1346 | from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from EffectController import EffectController
class BlastEffect(NodePath, EffectController):
def __init__(self):
NodePath.__init__(self, 'BlastEffect')
EffectController.__init__(self)
self.fadeTime = 0.15
self.effectColor = Vec4(1, 1, 1, 1)
model = loader.loadModel('phase_4/models/props/tt_m_efx_ext_particleCards')
self.effectModel = model.find('**/tt_t_efx_ext_particleBlast')
self.effectModel.reparentTo(self)
self.effectModel.setColorScale(0, 0, 0, 0)
self.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingAlpha, ColorBlendAttrib.OOne))
self.setBillboardPointWorld()
self.setDepthWrite(0)
self.setLightOff()
self.setFogOff()
def createTrack(self):
self.effectModel.setColorScale(0, 0, 0, 0)
fadeBlast = self.effectModel.colorScaleInterval(self.fadeTime, Vec4(0, 0, 0, 0), startColorScale=Vec4(self.effectColor), blendType='easeOut')
scaleBlast = self.effectModel.scaleInterval(self.fadeTime, 4, startScale=1.0, blendType='easeIn')
self.track = Sequence(Parallel(fadeBlast, scaleBlast), Func(self.cleanUpEffect))
def setEffectColor(self, color):
self.effectColor = color
| mit |
wyc/django | django/contrib/postgres/aggregates/statistics.py | 493 | 2033 | from django.db.models import FloatField, IntegerField
from django.db.models.aggregates import Aggregate
__all__ = [
'CovarPop', 'Corr', 'RegrAvgX', 'RegrAvgY', 'RegrCount', 'RegrIntercept',
'RegrR2', 'RegrSlope', 'RegrSXX', 'RegrSXY', 'RegrSYY', 'StatAggregate',
]
class StatAggregate(Aggregate):
def __init__(self, y, x, output_field=FloatField()):
if not x or not y:
raise ValueError('Both y and x must be provided.')
super(StatAggregate, self).__init__(y=y, x=x, output_field=output_field)
self.x = x
self.y = y
self.source_expressions = self._parse_expressions(self.y, self.x)
def get_source_expressions(self):
return self.y, self.x
def set_source_expressions(self, exprs):
self.y, self.x = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
return super(Aggregate, self).resolve_expression(query, allow_joins, reuse, summarize)
class Corr(StatAggregate):
function = 'CORR'
class CovarPop(StatAggregate):
def __init__(self, y, x, sample=False):
self.function = 'COVAR_SAMP' if sample else 'COVAR_POP'
super(CovarPop, self).__init__(y, x)
class RegrAvgX(StatAggregate):
function = 'REGR_AVGX'
class RegrAvgY(StatAggregate):
function = 'REGR_AVGY'
class RegrCount(StatAggregate):
function = 'REGR_COUNT'
def __init__(self, y, x):
super(RegrCount, self).__init__(y=y, x=x, output_field=IntegerField())
def convert_value(self, value, expression, connection, context):
if value is None:
return 0
return int(value)
class RegrIntercept(StatAggregate):
function = 'REGR_INTERCEPT'
class RegrR2(StatAggregate):
function = 'REGR_R2'
class RegrSlope(StatAggregate):
function = 'REGR_SLOPE'
class RegrSXX(StatAggregate):
function = 'REGR_SXX'
class RegrSXY(StatAggregate):
function = 'REGR_SXY'
class RegrSYY(StatAggregate):
function = 'REGR_SYY'
| bsd-3-clause |
quiltdata/quilt | lambdas/pkgevents/index.py | 1 | 2667 | import itertools
import json
import re
import boto3
from t4_lambda_shared.utils import get_quilt_logger
EXPECTED_POINTER_SIZE = 64
event_bridge = boto3.client('events')
s3 = boto3.client('s3')
logger = get_quilt_logger()
class PutEventsException(Exception):
pass
class EventsQueue:
MAX_SIZE = 10
def __init__(self):
self._events = []
def append(self, event):
self._events.append(event)
if len(self) >= self.MAX_SIZE:
self._flush()
def _flush(self):
events = self._events
self._events = []
resp = event_bridge.put_events(Entries=events)
if resp['FailedEntryCount']:
raise PutEventsException(resp)
def flush(self):
if self:
self._flush()
def __len__(self):
return len(self._events)
def __bool__(self):
return bool(self._events)
PKG_POINTER_REGEX = re.compile(r'\.quilt/named_packages/([\w-]+/[\w-]+)/([0-9]{10})')
def pkg_created_event(s3_event):
if not s3_event['eventName'].startswith('ObjectCreated:'):
return
s3_event_obj = s3_event['s3']
obj = s3_event_obj['object']
key = obj['key']
match = PKG_POINTER_REGEX.fullmatch(key)
if not match:
return
pkg_name, pointer_name = match.groups()
if not '1451631600' <= pointer_name <= '1767250800':
return
bucket_obj = s3_event_obj['bucket']
bucket = bucket_obj['name']
try:
resp = s3.get_object(Bucket=bucket, Key=key, Range=f'bytes=0-{EXPECTED_POINTER_SIZE - 1}')
except s3.exceptions.NoSuchKey:
logger.warning('pointer is created in bucket %r at %r, but not found', bucket, key)
return
if resp['ContentLength'] != EXPECTED_POINTER_SIZE:
logger.warning('pointer in bucket %r at %r has %d bytes, but %d bytes expected',
bucket, key, resp['ContentLength'], EXPECTED_POINTER_SIZE)
return
return {
'Time': s3_event['eventTime'],
'Source': 'com.quiltdata',
'DetailType': 'package-revision',
'Resources': [
# TODO: add stack ARN?
],
'Detail': json.dumps({
'version': '0.1',
'type': 'created',
'bucket': bucket,
'handle': pkg_name,
'topHash': resp['Body'].read().decode(),
}),
}
def handler(event, context):
s3_events = itertools.chain.from_iterable(
json.loads(record['body'])['Records']
for record in event['Records']
)
queue = EventsQueue()
for event in filter(None, map(pkg_created_event, s3_events)):
queue.append(event)
queue.flush()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.