code
stringlengths
2
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
2
1.05M
# Copyright (c) 2006-2007 The Regents of The University of Michigan # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Ron Dreslinski import optparse import sys import m5 from m5.objects import * parser = optparse.OptionParser() parser.add_option("-a", "--atomic", action="store_true", help="Use atomic (non-timing) mode") parser.add_option("-b", "--blocking", action="store_true", help="Use blocking caches") parser.add_option("-l", "--maxloads", metavar="N", default=0, help="Stop after N loads") parser.add_option("-m", "--maxtick", type="int", default=m5.MaxTick, metavar="T", help="Stop after T ticks") # # The "tree" specification is a colon-separated list of one or more # integers. The first integer is the number of caches/testers # connected directly to main memory. The last integer in the list is # the number of testers associated with the uppermost level of memory # (L1 cache, if there are caches, or main memory if no caches). Thus # if there is only one integer, there are no caches, and the integer # specifies the number of testers connected directly to main memory. # The other integers (if any) specify the number of caches at each # level of the hierarchy between. # # Examples: # # "2:1" Two caches connected to memory with a single tester behind each # (single-level hierarchy, two testers total) # # "2:2:1" Two-level hierarchy, 2 L1s behind each of 2 L2s, 4 testers total # parser.add_option("-t", "--treespec", type="string", default="8:1", help="Colon-separated multilevel tree specification, " "see script comments for details " "[default: %default]") parser.add_option("--force-bus", action="store_true", help="Use bus between levels even with single cache") parser.add_option("-f", "--functional", type="int", default=0, metavar="PCT", help="Target percentage of functional accesses " "[default: %default]") parser.add_option("-u", "--uncacheable", type="int", default=0, metavar="PCT", help="Target percentage of uncacheable accesses " "[default: %default]") parser.add_option("--progress", type="int", default=1000, metavar="NLOADS", help="Progress message interval " "[default: %default]") (options, args) = parser.parse_args() if args: print "Error: script doesn't take any positional arguments" sys.exit(1) block_size = 64 try: treespec = [int(x) for x in options.treespec.split(':')] numtesters = reduce(lambda x,y: x*y, treespec) except: print "Error parsing treespec option" sys.exit(1) if numtesters > block_size: print "Error: Number of testers limited to %s because of false sharing" \ % (block_size) sys.exit(1) if len(treespec) < 1: print "Error parsing treespec" sys.exit(1) # define prototype L1 cache proto_l1 = BaseCache(size = '32kB', assoc = 4, block_size = block_size, latency = '1ns', tgts_per_mshr = 8) if options.blocking: proto_l1.mshrs = 1 else: proto_l1.mshrs = 4 # build a list of prototypes, one for each level of treespec, starting # at the end (last entry is tester objects) prototypes = [ MemTest(atomic=options.atomic, max_loads=options.maxloads, percent_functional=options.functional, percent_uncacheable=options.uncacheable, progress_interval=options.progress) ] # next comes L1 cache, if any if len(treespec) > 1: prototypes.insert(0, proto_l1) # now add additional cache levels (if any) by scaling L1 params for scale in treespec[:-2]: # clone previous level and update params prev = prototypes[0] next = prev() next.size = prev.size * scale next.latency = prev.latency * 10 next.assoc = prev.assoc * scale next.mshrs = prev.mshrs * scale prototypes.insert(0, next) # system simulated system = System(funcmem = SimpleMemory(in_addr_map = False), funcbus = NoncoherentBus(), physmem = SimpleMemory(latency = "100ns")) def make_level(spec, prototypes, attach_obj, attach_port): fanout = spec[0] parent = attach_obj # use attach obj as config parent too if len(spec) > 1 and (fanout > 1 or options.force_bus): port = getattr(attach_obj, attach_port) new_bus = CoherentBus(clock="500MHz", width=16) if (port.role == 'MASTER'): new_bus.slave = port attach_port = "master" else: new_bus.master = port attach_port = "slave" parent.cpu_side_bus = new_bus attach_obj = new_bus objs = [prototypes[0]() for i in xrange(fanout)] if len(spec) > 1: # we just built caches, more levels to go parent.cache = objs for cache in objs: cache.mem_side = getattr(attach_obj, attach_port) make_level(spec[1:], prototypes[1:], cache, "cpu_side") else: # we just built the MemTest objects parent.cpu = objs for t in objs: t.test = getattr(attach_obj, attach_port) t.functional = system.funcbus.slave make_level(treespec, prototypes, system.physmem, "port") # connect reference memory to funcbus system.funcbus.master = system.funcmem.port # ----------------------- # run simulation # ----------------------- root = Root( full_system = False, system = system ) if options.atomic: root.system.mem_mode = 'atomic' else: root.system.mem_mode = 'timing' # The system port is never used in the tester so merely connect it # to avoid problems root.system.system_port = root.system.physmem.port # Not much point in this being higher than the L1 latency m5.ticks.setGlobalFrequency('1ns') # instantiate configuration m5.instantiate() # simulate until program terminates exit_event = m5.simulate(options.maxtick) print 'Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()
aferr/LatticeMemCtl
configs/example/memtest.py
Python
bsd-3-clause
7,651
# SPDX-License-Identifier: GPL-2.0+ # Copyright (c) 2016 Google, Inc # Written by Simon Glass <[email protected]> # # Entry-type module for the 16-bit x86 reset code for U-Boot # from binman.entry import Entry from binman.etype.blob import Entry_blob class Entry_x86_reset16(Entry_blob): """x86 16-bit reset code for U-Boot Properties / Entry arguments: - filename: Filename of u-boot-x86-reset16.bin (default 'u-boot-x86-reset16.bin') x86 CPUs start up in 16-bit mode, even if they are 32-bit CPUs. This code must be placed at a particular address. This entry holds that code. It is typically placed at offset CONFIG_RESET_VEC_LOC. The code is responsible for jumping to the x86-start16 code, which continues execution. For 64-bit U-Boot, the 'x86_reset16_spl' entry type is used instead. """ def __init__(self, section, etype, node): super().__init__(section, etype, node) def GetDefaultFilename(self): return 'u-boot-x86-reset16.bin'
Digilent/u-boot-digilent
tools/binman/etype/x86_reset16.py
Python
gpl-2.0
1,018
# -*- coding: utf-8 -*- # # twoneurons.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. import matplotlib # matplotlib.use("macosx") import pylab import nest import nest.voltage_trace weight=20.0 delay=1.0 stim=1000.0 neuron1 = nest.Create("iaf_neuron") neuron2 = nest.Create("iaf_neuron") voltmeter = nest.Create("voltmeter") nest.SetStatus(neuron1, {"I_e": stim}) nest.Connect(neuron1, neuron2, syn_spec={'weight':weight, 'delay':delay}) nest.Connect(voltmeter, neuron2) nest.Simulate(100.0) nest.voltage_trace.from_device(voltmeter) nest.voltage_trace.show()
kristoforcarlson/nest-simulator-fork
pynest/examples/twoneurons.py
Python
gpl-2.0
1,209
# -*- coding: utf-8 -*- ''' Specto Add-on Copyright (C) 2015 lambda This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import re,json,urllib from resources.lib.libraries import client def resolve(url): try: try: v = url.split('public')[-1] r = client.request(url) r = re.sub(r'[^\x00-\x7F]+', ' ', r) tok = re.findall('"tokens"\s*:\s*{\s*"download"\s*:\s*"([^"]+)', r)[0] url = re.findall('"weblink_get"\s*:\s*\[.+?"url"\s*:\s*"([^"]+)', r)[0] url = '%s%s?key=%s' % (url, v, tok) print("u",url) return url except: return except: return
azumimuo/family-xbmc-addon
plugin.video.specto/resources/lib/resolvers/cloudmailru.py
Python
gpl-2.0
1,296
import StringIO class Plugin(object): ANGULAR_MODULE = None JS_FILES = [] CSS_FILES = [] @classmethod def PlugIntoApp(cls, app): pass @classmethod def GenerateHTML(cls, root_url="/"): out = StringIO.StringIO() for js_file in cls.JS_FILES: js_file = js_file.lstrip("/") out.write('<script src="%s%s"></script>\n' % (root_url, js_file)) for css_file in cls.CSS_FILES: css_file = css_file.lstrip("/") out.write('<link rel="stylesheet" href="%s%s"></link>\n' % ( root_url, css_file)) if cls.ANGULAR_MODULE: out.write(""" <script>var manuskriptPluginsList = manuskriptPluginsList || [];\n manuskriptPluginsList.push("%s");</script>\n""" % cls.ANGULAR_MODULE) return out.getvalue()
dsweet04/rekall
rekall-gui/manuskript/plugin.py
Python
gpl-2.0
836
# # Copyright (C) 2016 FreeIPA Contributors see COPYING for license # # pylint: disable=unused-import import six from . import Command, Method, Object from ipalib import api, parameters, output from ipalib.parameters import DefaultFrom from ipalib.plugable import Registry from ipalib.text import _ from ipapython.dn import DN from ipapython.dnsutil import DNSName if six.PY3: unicode = str __doc__ = _(""" Set a user's password If someone other than a user changes that user's password (e.g., Helpdesk resets it) then the password will need to be changed the first time it is used. This is so the end-user is the only one who knows the password. The IPA password policy controls how often a password may be changed, what strength requirements exist, and the length of the password history. EXAMPLES: To reset your own password: ipa passwd To change another user's password: ipa passwd tuser1 """) register = Registry() @register() class passwd(Command): __doc__ = _("Set a user's password.") takes_args = ( parameters.Str( 'principal', cli_name='user', label=_(u'User name'), default_from=DefaultFrom(lambda : None), # FIXME: # lambda: krb_utils.get_principal() autofill=True, no_convert=True, ), parameters.Password( 'password', label=_(u'New Password'), confirm=True, ), parameters.Password( 'current_password', label=_(u'Current Password'), default_from=DefaultFrom(lambda principal: None, 'principal'), # FIXME: # lambda principal: get_current_password(principal) autofill=True, ), ) takes_options = ( parameters.Password( 'otp', required=False, label=_(u'OTP'), doc=_(u'One Time Password'), ), ) has_output = ( output.Output( 'summary', (unicode, type(None)), doc=_(u'User-friendly description of action performed'), ), output.Output( 'result', bool, doc=_(u'True means the operation was successful'), ), output.PrimaryKey( 'value', doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"), ), )
redhatrises/freeipa
ipaclient/remote_plugins/2_164/passwd.py
Python
gpl-3.0
2,428
# coding=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os import shutil from pants.util.contextutil import temporary_file def atomic_copy(src, dst): """Copy the file src to dst, overwriting dst atomically.""" with temporary_file(root_dir=os.path.dirname(dst)) as tmp_dst: shutil.copyfile(src, tmp_dst.name) os.rename(tmp_dst.name, dst) def create_size_estimators(): def line_count(filename): with open(filename, 'rb') as fh: return sum(1 for line in fh) return { 'linecount': lambda srcs: sum(line_count(src) for src in srcs), 'filecount': lambda srcs: len(srcs), 'filesize': lambda srcs: sum(os.path.getsize(src) for src in srcs), 'nosize': lambda srcs: 0, }
sameerparekh/pants
src/python/pants/util/fileutil.py
Python
apache-2.0
961
"""Support for the MAX! Cube LAN Gateway.""" import logging from socket import timeout from threading import Lock import time from maxcube.cube import MaxCube import voluptuous as vol from homeassistant.const import CONF_HOST, CONF_PORT, CONF_SCAN_INTERVAL import homeassistant.helpers.config_validation as cv from homeassistant.helpers.discovery import load_platform from homeassistant.util.dt import now _LOGGER = logging.getLogger(__name__) DEFAULT_PORT = 62910 DOMAIN = "maxcube" DATA_KEY = "maxcube" NOTIFICATION_ID = "maxcube_notification" NOTIFICATION_TITLE = "Max!Cube gateway setup" CONF_GATEWAYS = "gateways" CONFIG_GATEWAY = vol.Schema( { vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_SCAN_INTERVAL, default=300): cv.time_period, } ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_GATEWAYS, default={}): vol.All( cv.ensure_list, [CONFIG_GATEWAY] ) } ) }, extra=vol.ALLOW_EXTRA, ) def setup(hass, config): """Establish connection to MAX! Cube.""" if DATA_KEY not in hass.data: hass.data[DATA_KEY] = {} connection_failed = 0 gateways = config[DOMAIN][CONF_GATEWAYS] for gateway in gateways: host = gateway[CONF_HOST] port = gateway[CONF_PORT] scan_interval = gateway[CONF_SCAN_INTERVAL].total_seconds() try: cube = MaxCube(host, port, now=now) hass.data[DATA_KEY][host] = MaxCubeHandle(cube, scan_interval) except timeout as ex: _LOGGER.error("Unable to connect to Max!Cube gateway: %s", str(ex)) hass.components.persistent_notification.create( f"Error: {ex}<br />You will need to restart Home Assistant after fixing.", title=NOTIFICATION_TITLE, notification_id=NOTIFICATION_ID, ) connection_failed += 1 if connection_failed >= len(gateways): return False load_platform(hass, "climate", DOMAIN, {}, config) load_platform(hass, "binary_sensor", DOMAIN, {}, config) return True class MaxCubeHandle: """Keep the cube instance in one place and centralize the update.""" def __init__(self, cube, scan_interval): """Initialize the Cube Handle.""" self.cube = cube self.cube.use_persistent_connection = scan_interval <= 300 # seconds self.scan_interval = scan_interval self.mutex = Lock() self._updatets = time.monotonic() def update(self): """Pull the latest data from the MAX! Cube.""" # Acquire mutex to prevent simultaneous update from multiple threads with self.mutex: # Only update every update_interval if (time.monotonic() - self._updatets) >= self.scan_interval: _LOGGER.debug("Updating") try: self.cube.update() except timeout: _LOGGER.error("Max!Cube connection failed") return False self._updatets = time.monotonic() else: _LOGGER.debug("Skipping update")
jawilson/home-assistant
homeassistant/components/maxcube/__init__.py
Python
apache-2.0
3,289
# # Copyright 2015 Cisco Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/kafka_broker.py """ import datetime import uuid import mock from oslo_utils import netutils from ceilometer.event.storage import models as event from ceilometer.publisher import kafka_broker as kafka from ceilometer.publisher import messaging as msg_publisher from ceilometer import sample from ceilometer.tests import base as tests_base @mock.patch('ceilometer.publisher.kafka_broker.LOG', mock.Mock()) @mock.patch('ceilometer.publisher.kafka_broker.kafka.KafkaClient', mock.Mock()) class TestKafkaPublisher(tests_base.BaseTestCase): test_event_data = [ event.Event(message_id=uuid.uuid4(), event_type='event_%d' % i, generated=datetime.datetime.utcnow(), traits=[], raw={}) for i in range(0, 5) ] test_data = [ sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test3', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), ] def test_publish(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer')) with mock.patch.object(publisher, '_producer') as fake_producer: publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(5, len(fake_producer.send_messages.mock_calls)) self.assertEqual(0, len(publisher.local_queue)) def test_publish_without_options(self): publisher = kafka.KafkaBrokerPublisher( netutils.urlsplit('kafka://127.0.0.1:9092')) with mock.patch.object(publisher, '_producer') as fake_producer: publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(5, len(fake_producer.send_messages.mock_calls)) self.assertEqual(0, len(publisher.local_queue)) def test_publish_to_host_without_policy(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer')) self.assertEqual('default', publisher.policy) publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer&policy=test')) self.assertEqual('default', publisher.policy) def test_publish_to_host_with_default_policy(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer&policy=default')) with mock.patch.object(publisher, '_producer') as fake_producer: fake_producer.send_messages.side_effect = TypeError self.assertRaises(msg_publisher.DeliveryFailure, publisher.publish_samples, mock.MagicMock(), self.test_data) self.assertEqual(100, len(fake_producer.send_messages.mock_calls)) self.assertEqual(0, len(publisher.local_queue)) def test_publish_to_host_with_drop_policy(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer&policy=drop')) with mock.patch.object(publisher, '_producer') as fake_producer: fake_producer.send_messages.side_effect = Exception("test") publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(1, len(fake_producer.send_messages.mock_calls)) self.assertEqual(0, len(publisher.local_queue)) def test_publish_to_host_with_queue_policy(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) with mock.patch.object(publisher, '_producer') as fake_producer: fake_producer.send_messages.side_effect = Exception("test") publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(1, len(fake_producer.send_messages.mock_calls)) self.assertEqual(1, len(publisher.local_queue)) def test_publish_to_down_host_with_default_queue_size(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) with mock.patch.object(publisher, '_producer') as fake_producer: fake_producer.send_messages.side_effect = Exception("test") for i in range(0, 2000): for s in self.test_data: s.name = 'test-%d' % i publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(1024, len(publisher.local_queue)) self.assertEqual('test-976', publisher.local_queue[0][2][0]['counter_name']) self.assertEqual('test-1999', publisher.local_queue[1023][2][0]['counter_name']) def test_publish_to_host_from_down_to_up_with_queue(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) with mock.patch.object(publisher, '_producer') as fake_producer: fake_producer.send_messages.side_effect = Exception("test") for i in range(0, 16): for s in self.test_data: s.name = 'test-%d' % i publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(16, len(publisher.local_queue)) fake_producer.send_messages.side_effect = None for s in self.test_data: s.name = 'test-%d' % 16 publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(0, len(publisher.local_queue)) def test_publish_event_with_default_policy(self): publisher = kafka.KafkaBrokerPublisher( netutils.urlsplit('kafka://127.0.0.1:9092?topic=ceilometer')) with mock.patch.object(publisher, '_producer') as fake_producer: publisher.publish_events(mock.MagicMock(), self.test_event_data) self.assertEqual(5, len(fake_producer.send_messages.mock_calls)) with mock.patch.object(publisher, '_producer') as fake_producer: fake_producer.send_messages.side_effect = Exception("test") self.assertRaises(msg_publisher.DeliveryFailure, publisher.publish_events, mock.MagicMock(), self.test_event_data) self.assertEqual(100, len(fake_producer.send_messages.mock_calls)) self.assertEqual(0, len(publisher.local_queue))
cernops/ceilometer
ceilometer/tests/unit/publisher/test_kafka_broker_publisher.py
Python
apache-2.0
8,865
#!/usr/bin/python2.4 # Copyright 2009, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Collada DOM 1.3.0 tool for SCons.""" def generate(env): # NOTE: SCons requires the use of this name, which fails gpylint. """SCons entry point for this tool.""" env.Append(CCFLAGS=[ '-I$COLLADA_DIR/include', '-I$COLLADA_DIR/include/1.4', ])
nguyentran/openviber
tools/swtoolkit/site_scons/site_tools/collada_dom.py
Python
mit
1,817
""" Utility function to facilitate testing. """ from __future__ import division, absolute_import, print_function import os import sys import re import operator import warnings from functools import partial import shutil import contextlib from tempfile import mkdtemp, mkstemp from .nosetester import import_nose from numpy.core import float32, empty, arange, array_repr, ndarray from numpy.lib.utils import deprecate if sys.version_info[0] >= 3: from io import StringIO else: from StringIO import StringIO __all__ = ['assert_equal', 'assert_almost_equal', 'assert_approx_equal', 'assert_array_equal', 'assert_array_less', 'assert_string_equal', 'assert_array_almost_equal', 'assert_raises', 'build_err_msg', 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal', 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure', 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex', 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings', 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings', 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir'] class KnownFailureException(Exception): '''Raise this exception to mark a test as a known failing test.''' pass KnownFailureTest = KnownFailureException # backwards compat # nose.SkipTest is unittest.case.SkipTest # import it into the namespace, so that it's available as np.testing.SkipTest try: from unittest.case import SkipTest except ImportError: # on py2.6 unittest.case is not available. Ask nose for a replacement. SkipTest = import_nose().SkipTest verbose = 0 def assert_(val, msg=''): """ Assert that works in release mode. Accepts callable msg to allow deferring evaluation until failure. The Python built-in ``assert`` does not work when executing code in optimized mode (the ``-O`` flag) - no byte-code is generated for it. For documentation on usage, refer to the Python documentation. """ if not val: try: smsg = msg() except TypeError: smsg = msg raise AssertionError(smsg) def gisnan(x): """like isnan, but always raise an error if type not supported instead of returning a TypeError object. Notes ----- isnan and other ufunc sometimes return a NotImplementedType object instead of raising any exception. This function is a wrapper to make sure an exception is always raised. This should be removed once this problem is solved at the Ufunc level.""" from numpy.core import isnan st = isnan(x) if isinstance(st, type(NotImplemented)): raise TypeError("isnan not supported for this type") return st def gisfinite(x): """like isfinite, but always raise an error if type not supported instead of returning a TypeError object. Notes ----- isfinite and other ufunc sometimes return a NotImplementedType object instead of raising any exception. This function is a wrapper to make sure an exception is always raised. This should be removed once this problem is solved at the Ufunc level.""" from numpy.core import isfinite, errstate with errstate(invalid='ignore'): st = isfinite(x) if isinstance(st, type(NotImplemented)): raise TypeError("isfinite not supported for this type") return st def gisinf(x): """like isinf, but always raise an error if type not supported instead of returning a TypeError object. Notes ----- isinf and other ufunc sometimes return a NotImplementedType object instead of raising any exception. This function is a wrapper to make sure an exception is always raised. This should be removed once this problem is solved at the Ufunc level.""" from numpy.core import isinf, errstate with errstate(invalid='ignore'): st = isinf(x) if isinstance(st, type(NotImplemented)): raise TypeError("isinf not supported for this type") return st @deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. " "Use numpy.random.rand instead.") def rand(*args): """Returns an array of random numbers with the given shape. This only uses the standard library, so it is useful for testing purposes. """ import random from numpy.core import zeros, float64 results = zeros(args, float64) f = results.flat for i in range(len(f)): f[i] = random.random() return results if os.name == 'nt': # Code "stolen" from enthought/debug/memusage.py def GetPerformanceAttributes(object, counter, instance=None, inum=-1, format=None, machine=None): # NOTE: Many counters require 2 samples to give accurate results, # including "% Processor Time" (as by definition, at any instant, a # thread's CPU usage is either 0 or 100). To read counters like this, # you should copy this function, but keep the counter open, and call # CollectQueryData() each time you need to know. # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp # My older explanation for this was that the "AddCounter" process forced # the CPU to 100%, but the above makes more sense :) import win32pdh if format is None: format = win32pdh.PDH_FMT_LONG path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter)) hq = win32pdh.OpenQuery() try: hc = win32pdh.AddCounter(hq, path) try: win32pdh.CollectQueryData(hq) type, val = win32pdh.GetFormattedCounterValue(hc, format) return val finally: win32pdh.RemoveCounter(hc) finally: win32pdh.CloseQuery(hq) def memusage(processName="python", instance=0): # from win32pdhutil, part of the win32all package import win32pdh return GetPerformanceAttributes("Process", "Virtual Bytes", processName, instance, win32pdh.PDH_FMT_LONG, None) elif sys.platform[:5] == 'linux': def memusage(_proc_pid_stat='/proc/%s/stat' % (os.getpid())): """ Return virtual memory size in bytes of the running python. """ try: f = open(_proc_pid_stat, 'r') l = f.readline().split(' ') f.close() return int(l[22]) except: return else: def memusage(): """ Return memory usage of running python. [Not implemented] """ raise NotImplementedError if sys.platform[:5] == 'linux': def jiffies(_proc_pid_stat='/proc/%s/stat' % (os.getpid()), _load_time=[]): """ Return number of jiffies elapsed. Return number of jiffies (1/100ths of a second) that this process has been scheduled in user mode. See man 5 proc. """ import time if not _load_time: _load_time.append(time.time()) try: f = open(_proc_pid_stat, 'r') l = f.readline().split(' ') f.close() return int(l[13]) except: return int(100*(time.time()-_load_time[0])) else: # os.getpid is not in all platforms available. # Using time is safe but inaccurate, especially when process # was suspended or sleeping. def jiffies(_load_time=[]): """ Return number of jiffies elapsed. Return number of jiffies (1/100ths of a second) that this process has been scheduled in user mode. See man 5 proc. """ import time if not _load_time: _load_time.append(time.time()) return int(100*(time.time()-_load_time[0])) def build_err_msg(arrays, err_msg, header='Items are not equal:', verbose=True, names=('ACTUAL', 'DESIRED'), precision=8): msg = ['\n' + header] if err_msg: if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header): msg = [msg[0] + ' ' + err_msg] else: msg.append(err_msg) if verbose: for i, a in enumerate(arrays): if isinstance(a, ndarray): # precision argument is only needed if the objects are ndarrays r_func = partial(array_repr, precision=precision) else: r_func = repr try: r = r_func(a) except: r = '[repr failed]' if r.count('\n') > 3: r = '\n'.join(r.splitlines()[:3]) r += '...' msg.append(' %s: %s' % (names[i], r)) return '\n'.join(msg) def assert_equal(actual,desired,err_msg='',verbose=True): """ Raises an AssertionError if two objects are not equal. Given two objects (scalars, lists, tuples, dictionaries or numpy arrays), check that all elements of these objects are equal. An exception is raised at the first conflicting values. Parameters ---------- actual : array_like The object to check. desired : array_like The expected object. err_msg : str, optional The error message to be printed in case of failure. verbose : bool, optional If True, the conflicting values are appended to the error message. Raises ------ AssertionError If actual and desired are not equal. Examples -------- >>> np.testing.assert_equal([4,5], [4,6]) ... <type 'exceptions.AssertionError'>: Items are not equal: item=1 ACTUAL: 5 DESIRED: 6 """ __tracebackhide__ = True # Hide traceback for py.test if isinstance(desired, dict): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) assert_equal(len(actual), len(desired), err_msg, verbose) for k, i in desired.items(): if k not in actual: raise AssertionError(repr(k)) assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg), verbose) return if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): assert_equal(len(actual), len(desired), err_msg, verbose) for k in range(len(desired)): assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg), verbose) return from numpy.core import ndarray, isscalar, signbit from numpy.lib import iscomplexobj, real, imag if isinstance(actual, ndarray) or isinstance(desired, ndarray): return assert_array_equal(actual, desired, err_msg, verbose) msg = build_err_msg([actual, desired], err_msg, verbose=verbose) # Handle complex numbers: separate into real/imag to handle # nan/inf/negative zero correctly # XXX: catch ValueError for subclasses of ndarray where iscomplex fail try: usecomplex = iscomplexobj(actual) or iscomplexobj(desired) except ValueError: usecomplex = False if usecomplex: if iscomplexobj(actual): actualr = real(actual) actuali = imag(actual) else: actualr = actual actuali = 0 if iscomplexobj(desired): desiredr = real(desired) desiredi = imag(desired) else: desiredr = desired desiredi = 0 try: assert_equal(actualr, desiredr) assert_equal(actuali, desiredi) except AssertionError: raise AssertionError(msg) # Inf/nan/negative zero handling try: # isscalar test to check cases such as [np.nan] != np.nan if isscalar(desired) != isscalar(actual): raise AssertionError(msg) # If one of desired/actual is not finite, handle it specially here: # check that both are nan if any is a nan, and test for equality # otherwise if not (gisfinite(desired) and gisfinite(actual)): isdesnan = gisnan(desired) isactnan = gisnan(actual) if isdesnan or isactnan: if not (isdesnan and isactnan): raise AssertionError(msg) else: if not desired == actual: raise AssertionError(msg) return elif desired == 0 and actual == 0: if not signbit(desired) == signbit(actual): raise AssertionError(msg) # If TypeError or ValueError raised while using isnan and co, just handle # as before except (TypeError, ValueError, NotImplementedError): pass # Explicitly use __eq__ for comparison, ticket #2552 if not (desired == actual): raise AssertionError(msg) def print_assert_equal(test_string, actual, desired): """ Test if two objects are equal, and print an error message if test fails. The test is performed with ``actual == desired``. Parameters ---------- test_string : str The message supplied to AssertionError. actual : object The object to test for equality against `desired`. desired : object The expected result. Examples -------- >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]) >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2]) Traceback (most recent call last): ... AssertionError: Test XYZ of func xyz failed ACTUAL: [0, 1] DESIRED: [0, 2] """ __tracebackhide__ = True # Hide traceback for py.test import pprint if not (actual == desired): msg = StringIO() msg.write(test_string) msg.write(' failed\nACTUAL: \n') pprint.pprint(actual, msg) msg.write('DESIRED: \n') pprint.pprint(desired, msg) raise AssertionError(msg.getvalue()) def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): """ Raises an AssertionError if two items are not equal up to desired precision. .. note:: It is recommended to use one of `assert_allclose`, `assert_array_almost_equal_nulp` or `assert_array_max_ulp` instead of this function for more consistent floating point comparisons. The test is equivalent to ``abs(desired-actual) < 0.5 * 10**(-decimal)``. Given two objects (numbers or ndarrays), check that all elements of these objects are almost equal. An exception is raised at conflicting values. For ndarrays this delegates to assert_array_almost_equal Parameters ---------- actual : array_like The object to check. desired : array_like The expected object. decimal : int, optional Desired precision, default is 7. err_msg : str, optional The error message to be printed in case of failure. verbose : bool, optional If True, the conflicting values are appended to the error message. Raises ------ AssertionError If actual and desired are not equal up to specified precision. See Also -------- assert_allclose: Compare two array_like objects for equality with desired relative and/or absolute precision. assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal Examples -------- >>> import numpy.testing as npt >>> npt.assert_almost_equal(2.3333333333333, 2.33333334) >>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10) ... <type 'exceptions.AssertionError'>: Items are not equal: ACTUAL: 2.3333333333333002 DESIRED: 2.3333333399999998 >>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]), ... np.array([1.0,2.33333334]), decimal=9) ... <type 'exceptions.AssertionError'>: Arrays are not almost equal <BLANKLINE> (mismatch 50.0%) x: array([ 1. , 2.33333333]) y: array([ 1. , 2.33333334]) """ __tracebackhide__ = True # Hide traceback for py.test from numpy.core import ndarray from numpy.lib import iscomplexobj, real, imag # Handle complex numbers: separate into real/imag to handle # nan/inf/negative zero correctly # XXX: catch ValueError for subclasses of ndarray where iscomplex fail try: usecomplex = iscomplexobj(actual) or iscomplexobj(desired) except ValueError: usecomplex = False def _build_err_msg(): header = ('Arrays are not almost equal to %d decimals' % decimal) return build_err_msg([actual, desired], err_msg, verbose=verbose, header=header) if usecomplex: if iscomplexobj(actual): actualr = real(actual) actuali = imag(actual) else: actualr = actual actuali = 0 if iscomplexobj(desired): desiredr = real(desired) desiredi = imag(desired) else: desiredr = desired desiredi = 0 try: assert_almost_equal(actualr, desiredr, decimal=decimal) assert_almost_equal(actuali, desiredi, decimal=decimal) except AssertionError: raise AssertionError(_build_err_msg()) if isinstance(actual, (ndarray, tuple, list)) \ or isinstance(desired, (ndarray, tuple, list)): return assert_array_almost_equal(actual, desired, decimal, err_msg) try: # If one of desired/actual is not finite, handle it specially here: # check that both are nan if any is a nan, and test for equality # otherwise if not (gisfinite(desired) and gisfinite(actual)): if gisnan(desired) or gisnan(actual): if not (gisnan(desired) and gisnan(actual)): raise AssertionError(_build_err_msg()) else: if not desired == actual: raise AssertionError(_build_err_msg()) return except (NotImplementedError, TypeError): pass if round(abs(desired - actual), decimal) != 0: raise AssertionError(_build_err_msg()) def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): """ Raises an AssertionError if two items are not equal up to significant digits. .. note:: It is recommended to use one of `assert_allclose`, `assert_array_almost_equal_nulp` or `assert_array_max_ulp` instead of this function for more consistent floating point comparisons. Given two numbers, check that they are approximately equal. Approximately equal is defined as the number of significant digits that agree. Parameters ---------- actual : scalar The object to check. desired : scalar The expected object. significant : int, optional Desired precision, default is 7. err_msg : str, optional The error message to be printed in case of failure. verbose : bool, optional If True, the conflicting values are appended to the error message. Raises ------ AssertionError If actual and desired are not equal up to specified precision. See Also -------- assert_allclose: Compare two array_like objects for equality with desired relative and/or absolute precision. assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal Examples -------- >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20) >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20, significant=8) >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20, significant=8) ... <type 'exceptions.AssertionError'>: Items are not equal to 8 significant digits: ACTUAL: 1.234567e-021 DESIRED: 1.2345672000000001e-021 the evaluated condition that raises the exception is >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1) True """ __tracebackhide__ = True # Hide traceback for py.test import numpy as np (actual, desired) = map(float, (actual, desired)) if desired == actual: return # Normalized the numbers to be in range (-10.0,10.0) # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) with np.errstate(invalid='ignore'): scale = 0.5*(np.abs(desired) + np.abs(actual)) scale = np.power(10, np.floor(np.log10(scale))) try: sc_desired = desired/scale except ZeroDivisionError: sc_desired = 0.0 try: sc_actual = actual/scale except ZeroDivisionError: sc_actual = 0.0 msg = build_err_msg([actual, desired], err_msg, header='Items are not equal to %d significant digits:' % significant, verbose=verbose) try: # If one of desired/actual is not finite, handle it specially here: # check that both are nan if any is a nan, and test for equality # otherwise if not (gisfinite(desired) and gisfinite(actual)): if gisnan(desired) or gisnan(actual): if not (gisnan(desired) and gisnan(actual)): raise AssertionError(msg) else: if not desired == actual: raise AssertionError(msg) return except (TypeError, NotImplementedError): pass if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)): raise AssertionError(msg) def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', precision=6): __tracebackhide__ = True # Hide traceback for py.test from numpy.core import array, isnan, isinf, any, all, inf x = array(x, copy=False, subok=True) y = array(y, copy=False, subok=True) def safe_comparison(*args, **kwargs): # There are a number of cases where comparing two arrays hits special # cases in array_richcompare, specifically around strings and void # dtypes. Basically, we just can't do comparisons involving these # types, unless both arrays have exactly the *same* type. So # e.g. you can apply == to two string arrays, or two arrays with # identical structured dtypes. But if you compare a non-string array # to a string array, or two arrays with non-identical structured # dtypes, or anything like that, then internally stuff blows up. # Currently, when things blow up, we just return a scalar False or # True. But we also emit a DeprecationWarning, b/c eventually we # should raise an error here. (Ideally we might even make this work # properly, but since that will require rewriting a bunch of how # ufuncs work then we are not counting on that.) # # The point of this little function is to let the DeprecationWarning # pass (or maybe eventually catch the errors and return False, I # dunno, that's a little trickier and we can figure that out when the # time comes). with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) return comparison(*args, **kwargs) def isnumber(x): return x.dtype.char in '?bhilqpBHILQPefdgFDG' def chk_same_position(x_id, y_id, hasval='nan'): """Handling nan/inf: check that x and y have the nan/inf at the same locations.""" try: assert_array_equal(x_id, y_id) except AssertionError: msg = build_err_msg([x, y], err_msg + '\nx and y %s location mismatch:' % (hasval), verbose=verbose, header=header, names=('x', 'y'), precision=precision) raise AssertionError(msg) try: cond = (x.shape == () or y.shape == ()) or x.shape == y.shape if not cond: msg = build_err_msg([x, y], err_msg + '\n(shapes %s, %s mismatch)' % (x.shape, y.shape), verbose=verbose, header=header, names=('x', 'y'), precision=precision) if not cond: raise AssertionError(msg) if isnumber(x) and isnumber(y): x_isnan, y_isnan = isnan(x), isnan(y) x_isinf, y_isinf = isinf(x), isinf(y) # Validate that the special values are in the same place if any(x_isnan) or any(y_isnan): chk_same_position(x_isnan, y_isnan, hasval='nan') if any(x_isinf) or any(y_isinf): # Check +inf and -inf separately, since they are different chk_same_position(x == +inf, y == +inf, hasval='+inf') chk_same_position(x == -inf, y == -inf, hasval='-inf') # Combine all the special values x_id, y_id = x_isnan, y_isnan x_id |= x_isinf y_id |= y_isinf # Only do the comparison if actual values are left if all(x_id): return if any(x_id): val = safe_comparison(x[~x_id], y[~y_id]) else: val = safe_comparison(x, y) else: val = safe_comparison(x, y) if isinstance(val, bool): cond = val reduced = [0] else: reduced = val.ravel() cond = reduced.all() reduced = reduced.tolist() if not cond: match = 100-100.0*reduced.count(1)/len(reduced) msg = build_err_msg([x, y], err_msg + '\n(mismatch %s%%)' % (match,), verbose=verbose, header=header, names=('x', 'y'), precision=precision) if not cond: raise AssertionError(msg) except ValueError: import traceback efmt = traceback.format_exc() header = 'error during assertion:\n\n%s\n\n%s' % (efmt, header) msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header, names=('x', 'y'), precision=precision) raise ValueError(msg) def assert_array_equal(x, y, err_msg='', verbose=True): """ Raises an AssertionError if two array_like objects are not equal. Given two array_like objects, check that the shape is equal and all elements of these objects are equal. An exception is raised at shape mismatch or conflicting values. In contrast to the standard usage in numpy, NaNs are compared like numbers, no assertion is raised if both objects have NaNs in the same positions. The usual caution for verifying equality with floating point numbers is advised. Parameters ---------- x : array_like The actual object to check. y : array_like The desired, expected object. err_msg : str, optional The error message to be printed in case of failure. verbose : bool, optional If True, the conflicting values are appended to the error message. Raises ------ AssertionError If actual and desired objects are not equal. See Also -------- assert_allclose: Compare two array_like objects for equality with desired relative and/or absolute precision. assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal Examples -------- The first assert does not raise an exception: >>> np.testing.assert_array_equal([1.0,2.33333,np.nan], ... [np.exp(0),2.33333, np.nan]) Assert fails with numerical inprecision with floats: >>> np.testing.assert_array_equal([1.0,np.pi,np.nan], ... [1, np.sqrt(np.pi)**2, np.nan]) ... <type 'exceptions.ValueError'>: AssertionError: Arrays are not equal <BLANKLINE> (mismatch 50.0%) x: array([ 1. , 3.14159265, NaN]) y: array([ 1. , 3.14159265, NaN]) Use `assert_allclose` or one of the nulp (number of floating point values) functions for these cases instead: >>> np.testing.assert_allclose([1.0,np.pi,np.nan], ... [1, np.sqrt(np.pi)**2, np.nan], ... rtol=1e-10, atol=0) """ assert_array_compare(operator.__eq__, x, y, err_msg=err_msg, verbose=verbose, header='Arrays are not equal') def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): """ Raises an AssertionError if two objects are not equal up to desired precision. .. note:: It is recommended to use one of `assert_allclose`, `assert_array_almost_equal_nulp` or `assert_array_max_ulp` instead of this function for more consistent floating point comparisons. The test verifies identical shapes and verifies values with ``abs(desired-actual) < 0.5 * 10**(-decimal)``. Given two array_like objects, check that the shape is equal and all elements of these objects are almost equal. An exception is raised at shape mismatch or conflicting values. In contrast to the standard usage in numpy, NaNs are compared like numbers, no assertion is raised if both objects have NaNs in the same positions. Parameters ---------- x : array_like The actual object to check. y : array_like The desired, expected object. decimal : int, optional Desired precision, default is 6. err_msg : str, optional The error message to be printed in case of failure. verbose : bool, optional If True, the conflicting values are appended to the error message. Raises ------ AssertionError If actual and desired are not equal up to specified precision. See Also -------- assert_allclose: Compare two array_like objects for equality with desired relative and/or absolute precision. assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal Examples -------- the first assert does not raise an exception >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan], [1.0,2.333,np.nan]) >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], ... [1.0,2.33339,np.nan], decimal=5) ... <type 'exceptions.AssertionError'>: AssertionError: Arrays are not almost equal <BLANKLINE> (mismatch 50.0%) x: array([ 1. , 2.33333, NaN]) y: array([ 1. , 2.33339, NaN]) >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], ... [1.0,2.33333, 5], decimal=5) <type 'exceptions.ValueError'>: ValueError: Arrays are not almost equal x: array([ 1. , 2.33333, NaN]) y: array([ 1. , 2.33333, 5. ]) """ __tracebackhide__ = True # Hide traceback for py.test from numpy.core import around, number, float_, result_type, array from numpy.core.numerictypes import issubdtype from numpy.core.fromnumeric import any as npany def compare(x, y): try: if npany(gisinf(x)) or npany( gisinf(y)): xinfid = gisinf(x) yinfid = gisinf(y) if not xinfid == yinfid: return False # if one item, x and y is +- inf if x.size == y.size == 1: return x == y x = x[~xinfid] y = y[~yinfid] except (TypeError, NotImplementedError): pass # make sure y is an inexact type to avoid abs(MIN_INT); will cause # casting of x later. dtype = result_type(y, 1.) y = array(y, dtype=dtype, copy=False, subok=True) z = abs(x-y) if not issubdtype(z.dtype, number): z = z.astype(float_) # handle object arrays return around(z, decimal) <= 10.0**(-decimal) assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, header=('Arrays are not almost equal to %d decimals' % decimal), precision=decimal) def assert_array_less(x, y, err_msg='', verbose=True): """ Raises an AssertionError if two array_like objects are not ordered by less than. Given two array_like objects, check that the shape is equal and all elements of the first object are strictly smaller than those of the second object. An exception is raised at shape mismatch or incorrectly ordered values. Shape mismatch does not raise if an object has zero dimension. In contrast to the standard usage in numpy, NaNs are compared, no assertion is raised if both objects have NaNs in the same positions. Parameters ---------- x : array_like The smaller object to check. y : array_like The larger object to compare. err_msg : string The error message to be printed in case of failure. verbose : bool If True, the conflicting values are appended to the error message. Raises ------ AssertionError If actual and desired objects are not equal. See Also -------- assert_array_equal: tests objects for equality assert_array_almost_equal: test objects for equality up to precision Examples -------- >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan]) >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan]) ... <type 'exceptions.ValueError'>: Arrays are not less-ordered (mismatch 50.0%) x: array([ 1., 1., NaN]) y: array([ 1., 2., NaN]) >>> np.testing.assert_array_less([1.0, 4.0], 3) ... <type 'exceptions.ValueError'>: Arrays are not less-ordered (mismatch 50.0%) x: array([ 1., 4.]) y: array(3) >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4]) ... <type 'exceptions.ValueError'>: Arrays are not less-ordered (shapes (3,), (1,) mismatch) x: array([ 1., 2., 3.]) y: array([4]) """ __tracebackhide__ = True # Hide traceback for py.test assert_array_compare(operator.__lt__, x, y, err_msg=err_msg, verbose=verbose, header='Arrays are not less-ordered') def runstring(astr, dict): exec(astr, dict) def assert_string_equal(actual, desired): """ Test if two strings are equal. If the given strings are equal, `assert_string_equal` does nothing. If they are not equal, an AssertionError is raised, and the diff between the strings is shown. Parameters ---------- actual : str The string to test for equality against the expected string. desired : str The expected string. Examples -------- >>> np.testing.assert_string_equal('abc', 'abc') >>> np.testing.assert_string_equal('abc', 'abcd') Traceback (most recent call last): File "<stdin>", line 1, in <module> ... AssertionError: Differences in strings: - abc+ abcd? + """ # delay import of difflib to reduce startup time __tracebackhide__ = True # Hide traceback for py.test import difflib if not isinstance(actual, str): raise AssertionError(repr(type(actual))) if not isinstance(desired, str): raise AssertionError(repr(type(desired))) if re.match(r'\A'+desired+r'\Z', actual, re.M): return diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1))) diff_list = [] while diff: d1 = diff.pop(0) if d1.startswith(' '): continue if d1.startswith('- '): l = [d1] d2 = diff.pop(0) if d2.startswith('? '): l.append(d2) d2 = diff.pop(0) if not d2.startswith('+ '): raise AssertionError(repr(d2)) l.append(d2) if diff: d3 = diff.pop(0) if d3.startswith('? '): l.append(d3) else: diff.insert(0, d3) if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]): continue diff_list.extend(l) continue raise AssertionError(repr(d1)) if not diff_list: return msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip() if actual != desired: raise AssertionError(msg) def rundocs(filename=None, raise_on_error=True): """ Run doctests found in the given file. By default `rundocs` raises an AssertionError on failure. Parameters ---------- filename : str The path to the file for which the doctests are run. raise_on_error : bool Whether to raise an AssertionError when a doctest fails. Default is True. Notes ----- The doctests can be run by the user/developer by adding the ``doctests`` argument to the ``test()`` call. For example, to run all tests (including doctests) for `numpy.lib`: >>> np.lib.test(doctests=True) #doctest: +SKIP """ import doctest import imp if filename is None: f = sys._getframe(1) filename = f.f_globals['__file__'] name = os.path.splitext(os.path.basename(filename))[0] path = [os.path.dirname(filename)] file, pathname, description = imp.find_module(name, path) try: m = imp.load_module(name, file, pathname, description) finally: file.close() tests = doctest.DocTestFinder().find(m) runner = doctest.DocTestRunner(verbose=False) msg = [] if raise_on_error: out = lambda s: msg.append(s) else: out = None for test in tests: runner.run(test, out=out) if runner.failures > 0 and raise_on_error: raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg)) def raises(*args,**kwargs): nose = import_nose() return nose.tools.raises(*args,**kwargs) def assert_raises(*args,**kwargs): """ assert_raises(exception_class, callable, *args, **kwargs) Fail unless an exception of class exception_class is thrown by callable when invoked with arguments args and keyword arguments kwargs. If a different type of exception is thrown, it will not be caught, and the test case will be deemed to have suffered an error, exactly as for an unexpected exception. Alternatively, `assert_raises` can be used as a context manager: >>> from numpy.testing import assert_raises >>> with assert_raises(ZeroDivisionError): ... 1 / 0 is equivalent to >>> def div(x, y): ... return x / y >>> assert_raises(ZeroDivisionError, div, 1, 0) """ __tracebackhide__ = True # Hide traceback for py.test nose = import_nose() return nose.tools.assert_raises(*args,**kwargs) assert_raises_regex_impl = None def assert_raises_regex(exception_class, expected_regexp, callable_obj=None, *args, **kwargs): """ Fail unless an exception of class exception_class and with message that matches expected_regexp is thrown by callable when invoked with arguments args and keyword arguments kwargs. Name of this function adheres to Python 3.2+ reference, but should work in all versions down to 2.6. """ __tracebackhide__ = True # Hide traceback for py.test nose = import_nose() global assert_raises_regex_impl if assert_raises_regex_impl is None: try: # Python 3.2+ assert_raises_regex_impl = nose.tools.assert_raises_regex except AttributeError: try: # 2.7+ assert_raises_regex_impl = nose.tools.assert_raises_regexp except AttributeError: # 2.6 # This class is copied from Python2.7 stdlib almost verbatim class _AssertRaisesContext(object): """A context manager used to implement TestCase.assertRaises* methods.""" def __init__(self, expected, expected_regexp=None): self.expected = expected self.expected_regexp = expected_regexp def failureException(self, msg): return AssertionError(msg) def __enter__(self): return self def __exit__(self, exc_type, exc_value, tb): if exc_type is None: try: exc_name = self.expected.__name__ except AttributeError: exc_name = str(self.expected) raise self.failureException( "{0} not raised".format(exc_name)) if not issubclass(exc_type, self.expected): # let unexpected exceptions pass through return False self.exception = exc_value # store for later retrieval if self.expected_regexp is None: return True expected_regexp = self.expected_regexp if isinstance(expected_regexp, basestring): expected_regexp = re.compile(expected_regexp) if not expected_regexp.search(str(exc_value)): raise self.failureException( '"%s" does not match "%s"' % (expected_regexp.pattern, str(exc_value))) return True def impl(cls, regex, callable_obj, *a, **kw): mgr = _AssertRaisesContext(cls, regex) if callable_obj is None: return mgr with mgr: callable_obj(*a, **kw) assert_raises_regex_impl = impl return assert_raises_regex_impl(exception_class, expected_regexp, callable_obj, *args, **kwargs) def decorate_methods(cls, decorator, testmatch=None): """ Apply a decorator to all methods in a class matching a regular expression. The given decorator is applied to all public methods of `cls` that are matched by the regular expression `testmatch` (``testmatch.search(methodname)``). Methods that are private, i.e. start with an underscore, are ignored. Parameters ---------- cls : class Class whose methods to decorate. decorator : function Decorator to apply to methods testmatch : compiled regexp or str, optional The regular expression. Default value is None, in which case the nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``) is used. If `testmatch` is a string, it is compiled to a regular expression first. """ if testmatch is None: testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep) else: testmatch = re.compile(testmatch) cls_attr = cls.__dict__ # delayed import to reduce startup time from inspect import isfunction methods = [_m for _m in cls_attr.values() if isfunction(_m)] for function in methods: try: if hasattr(function, 'compat_func_name'): funcname = function.compat_func_name else: funcname = function.__name__ except AttributeError: # not a function continue if testmatch.search(funcname) and not funcname.startswith('_'): setattr(cls, funcname, decorator(function)) return def measure(code_str,times=1,label=None): """ Return elapsed time for executing code in the namespace of the caller. The supplied code string is compiled with the Python builtin ``compile``. The precision of the timing is 10 milli-seconds. If the code will execute fast on this timescale, it can be executed many times to get reasonable timing accuracy. Parameters ---------- code_str : str The code to be timed. times : int, optional The number of times the code is executed. Default is 1. The code is only compiled once. label : str, optional A label to identify `code_str` with. This is passed into ``compile`` as the second argument (for run-time error messages). Returns ------- elapsed : float Total elapsed time in seconds for executing `code_str` `times` times. Examples -------- >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', ... times=times) >>> print("Time for a single execution : ", etime / times, "s") Time for a single execution : 0.005 s """ frame = sys._getframe(1) locs, globs = frame.f_locals, frame.f_globals code = compile(code_str, 'Test name: %s ' % label, 'exec') i = 0 elapsed = jiffies() while i < times: i += 1 exec(code, globs, locs) elapsed = jiffies() - elapsed return 0.01*elapsed def _assert_valid_refcount(op): """ Check that ufuncs don't mishandle refcount of object `1`. Used in a few regression tests. """ import numpy as np b = np.arange(100*100).reshape(100, 100) c = b i = 1 rc = sys.getrefcount(i) for j in range(15): d = op(b, c) assert_(sys.getrefcount(i) >= rc) del d # for pyflakes def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=False, err_msg='', verbose=True): """ Raises an AssertionError if two objects are not equal up to desired tolerance. The test is equivalent to ``allclose(actual, desired, rtol, atol)``. It compares the difference between `actual` and `desired` to ``atol + rtol * abs(desired)``. .. versionadded:: 1.5.0 Parameters ---------- actual : array_like Array obtained. desired : array_like Array desired. rtol : float, optional Relative tolerance. atol : float, optional Absolute tolerance. equal_nan : bool, optional. If True, NaNs will compare equal. err_msg : str, optional The error message to be printed in case of failure. verbose : bool, optional If True, the conflicting values are appended to the error message. Raises ------ AssertionError If actual and desired are not equal up to specified precision. See Also -------- assert_array_almost_equal_nulp, assert_array_max_ulp Examples -------- >>> x = [1e-5, 1e-3, 1e-1] >>> y = np.arccos(np.cos(x)) >>> assert_allclose(x, y, rtol=1e-5, atol=0) """ __tracebackhide__ = True # Hide traceback for py.test import numpy as np def compare(x, y): return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol, equal_nan=equal_nan) actual, desired = np.asanyarray(actual), np.asanyarray(desired) header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol) assert_array_compare(compare, actual, desired, err_msg=str(err_msg), verbose=verbose, header=header) def assert_array_almost_equal_nulp(x, y, nulp=1): """ Compare two arrays relatively to their spacing. This is a relatively robust method to compare two arrays whose amplitude is variable. Parameters ---------- x, y : array_like Input arrays. nulp : int, optional The maximum number of unit in the last place for tolerance (see Notes). Default is 1. Returns ------- None Raises ------ AssertionError If the spacing between `x` and `y` for one or more elements is larger than `nulp`. See Also -------- assert_array_max_ulp : Check that all items of arrays differ in at most N Units in the Last Place. spacing : Return the distance between x and the nearest adjacent number. Notes ----- An assertion is raised if the following condition is not met:: abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y))) Examples -------- >>> x = np.array([1., 1e-10, 1e-20]) >>> eps = np.finfo(x.dtype).eps >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x) >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x) Traceback (most recent call last): ... AssertionError: X and Y are not equal to 1 ULP (max is 2) """ __tracebackhide__ = True # Hide traceback for py.test import numpy as np ax = np.abs(x) ay = np.abs(y) ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) if not np.all(np.abs(x-y) <= ref): if np.iscomplexobj(x) or np.iscomplexobj(y): msg = "X and Y are not equal to %d ULP" % nulp else: max_nulp = np.max(nulp_diff(x, y)) msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp) raise AssertionError(msg) def assert_array_max_ulp(a, b, maxulp=1, dtype=None): """ Check that all items of arrays differ in at most N Units in the Last Place. Parameters ---------- a, b : array_like Input arrays to be compared. maxulp : int, optional The maximum number of units in the last place that elements of `a` and `b` can differ. Default is 1. dtype : dtype, optional Data-type to convert `a` and `b` to if given. Default is None. Returns ------- ret : ndarray Array containing number of representable floating point numbers between items in `a` and `b`. Raises ------ AssertionError If one or more elements differ by more than `maxulp`. See Also -------- assert_array_almost_equal_nulp : Compare two arrays relatively to their spacing. Examples -------- >>> a = np.linspace(0., 1., 100) >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) """ __tracebackhide__ = True # Hide traceback for py.test import numpy as np ret = nulp_diff(a, b, dtype) if not np.all(ret <= maxulp): raise AssertionError("Arrays are not almost equal up to %g ULP" % maxulp) return ret def nulp_diff(x, y, dtype=None): """For each item in x and y, return the number of representable floating points between them. Parameters ---------- x : array_like first input array y : array_like second input array dtype : dtype, optional Data-type to convert `x` and `y` to if given. Default is None. Returns ------- nulp : array_like number of representable floating point numbers between each item in x and y. Examples -------- # By definition, epsilon is the smallest number such as 1 + eps != 1, so # there should be exactly one ULP between 1 and 1 + eps >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps) 1.0 """ import numpy as np if dtype: x = np.array(x, dtype=dtype) y = np.array(y, dtype=dtype) else: x = np.array(x) y = np.array(y) t = np.common_type(x, y) if np.iscomplexobj(x) or np.iscomplexobj(y): raise NotImplementedError("_nulp not implemented for complex array") x = np.array(x, dtype=t) y = np.array(y, dtype=t) if not x.shape == y.shape: raise ValueError("x and y do not have the same shape: %s - %s" % (x.shape, y.shape)) def _diff(rx, ry, vdt): diff = np.array(rx-ry, dtype=vdt) return np.abs(diff) rx = integer_repr(x) ry = integer_repr(y) return _diff(rx, ry, t) def _integer_repr(x, vdt, comp): # Reinterpret binary representation of the float as sign-magnitude: # take into account two-complement representation # See also # http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm rx = x.view(vdt) if not (rx.size == 1): rx[rx < 0] = comp - rx[rx < 0] else: if rx < 0: rx = comp - rx return rx def integer_repr(x): """Return the signed-magnitude interpretation of the binary representation of x.""" import numpy as np if x.dtype == np.float32: return _integer_repr(x, np.int32, np.int32(-2**31)) elif x.dtype == np.float64: return _integer_repr(x, np.int64, np.int64(-2**63)) else: raise ValueError("Unsupported dtype %s" % x.dtype) # The following two classes are copied from python 2.6 warnings module (context # manager) class WarningMessage(object): """ Holds the result of a single showwarning() call. Deprecated in 1.8.0 Notes ----- `WarningMessage` is copied from the Python 2.6 warnings module, so it can be used in NumPy with older Python versions. """ _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file", "line") def __init__(self, message, category, filename, lineno, file=None, line=None): local_values = locals() for attr in self._WARNING_DETAILS: setattr(self, attr, local_values[attr]) if category: self._category_name = category.__name__ else: self._category_name = None def __str__(self): return ("{message : %r, category : %r, filename : %r, lineno : %s, " "line : %r}" % (self.message, self._category_name, self.filename, self.lineno, self.line)) class WarningManager(object): """ A context manager that copies and restores the warnings filter upon exiting the context. The 'record' argument specifies whether warnings should be captured by a custom implementation of ``warnings.showwarning()`` and be appended to a list returned by the context manager. Otherwise None is returned by the context manager. The objects appended to the list are arguments whose attributes mirror the arguments to ``showwarning()``. The 'module' argument is to specify an alternative module to the module named 'warnings' and imported under that name. This argument is only useful when testing the warnings module itself. Deprecated in 1.8.0 Notes ----- `WarningManager` is a copy of the ``catch_warnings`` context manager from the Python 2.6 warnings module, with slight modifications. It is copied so it can be used in NumPy with older Python versions. """ def __init__(self, record=False, module=None): self._record = record if module is None: self._module = sys.modules['warnings'] else: self._module = module self._entered = False def __enter__(self): if self._entered: raise RuntimeError("Cannot enter %r twice" % self) self._entered = True self._filters = self._module.filters self._module.filters = self._filters[:] self._showwarning = self._module.showwarning if self._record: log = [] def showwarning(*args, **kwargs): log.append(WarningMessage(*args, **kwargs)) self._module.showwarning = showwarning return log else: return None def __exit__(self): if not self._entered: raise RuntimeError("Cannot exit %r without entering first" % self) self._module.filters = self._filters self._module.showwarning = self._showwarning @contextlib.contextmanager def _assert_warns_context(warning_class, name=None): __tracebackhide__ = True # Hide traceback for py.test with warnings.catch_warnings(record=True) as l: warnings.simplefilter('always') yield if not len(l) > 0: name_str = " when calling %s" % name if name is not None else "" raise AssertionError("No warning raised" + name_str) if not l[0].category is warning_class: name_str = "%s " % name if name is not None else "" raise AssertionError("First warning %sis not a %s (is %s)" % (name_str, warning_class, l[0])) def assert_warns(warning_class, *args, **kwargs): """ Fail unless the given callable throws the specified warning. A warning of class warning_class should be thrown by the callable when invoked with arguments args and keyword arguments kwargs. If a different type of warning is thrown, it will not be caught, and the test case will be deemed to have suffered an error. If called with all arguments other than the warning class omitted, may be used as a context manager: with assert_warns(SomeWarning): do_something() The ability to be used as a context manager is new in NumPy v1.11.0. .. versionadded:: 1.4.0 Parameters ---------- warning_class : class The class defining the warning that `func` is expected to throw. func : callable The callable to test. \\*args : Arguments Arguments passed to `func`. \\*\\*kwargs : Kwargs Keyword arguments passed to `func`. Returns ------- The value returned by `func`. """ if not args: return _assert_warns_context(warning_class) func = args[0] args = args[1:] with _assert_warns_context(warning_class, name=func.__name__): return func(*args, **kwargs) @contextlib.contextmanager def _assert_no_warnings_context(name=None): __tracebackhide__ = True # Hide traceback for py.test with warnings.catch_warnings(record=True) as l: warnings.simplefilter('always') yield if len(l) > 0: name_str = " when calling %s" % name if name is not None else "" raise AssertionError("Got warnings%s: %s" % (name_str, l)) def assert_no_warnings(*args, **kwargs): """ Fail if the given callable produces any warnings. If called with all arguments omitted, may be used as a context manager: with assert_no_warnings(): do_something() The ability to be used as a context manager is new in NumPy v1.11.0. .. versionadded:: 1.7.0 Parameters ---------- func : callable The callable to test. \\*args : Arguments Arguments passed to `func`. \\*\\*kwargs : Kwargs Keyword arguments passed to `func`. Returns ------- The value returned by `func`. """ if not args: return _assert_no_warnings_context() func = args[0] args = args[1:] with _assert_no_warnings_context(name=func.__name__): return func(*args, **kwargs) def _gen_alignment_data(dtype=float32, type='binary', max_size=24): """ generator producing data with different alignment and offsets to test simd vectorization Parameters ---------- dtype : dtype data type to produce type : string 'unary': create data for unary operations, creates one input and output array 'binary': create data for unary operations, creates two input and output array max_size : integer maximum size of data to produce Returns ------- if type is 'unary' yields one output, one input array and a message containing information on the data if type is 'binary' yields one output array, two input array and a message containing information on the data """ ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s' bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s' for o in range(3): for s in range(o + 2, max(o + 3, max_size)): if type == 'unary': inp = lambda: arange(s, dtype=dtype)[o:] out = empty((s,), dtype=dtype)[o:] yield out, inp(), ufmt % (o, o, s, dtype, 'out of place') yield inp(), inp(), ufmt % (o, o, s, dtype, 'in place') yield out[1:], inp()[:-1], ufmt % \ (o + 1, o, s - 1, dtype, 'out of place') yield out[:-1], inp()[1:], ufmt % \ (o, o + 1, s - 1, dtype, 'out of place') yield inp()[:-1], inp()[1:], ufmt % \ (o, o + 1, s - 1, dtype, 'aliased') yield inp()[1:], inp()[:-1], ufmt % \ (o + 1, o, s - 1, dtype, 'aliased') if type == 'binary': inp1 = lambda: arange(s, dtype=dtype)[o:] inp2 = lambda: arange(s, dtype=dtype)[o:] out = empty((s,), dtype=dtype)[o:] yield out, inp1(), inp2(), bfmt % \ (o, o, o, s, dtype, 'out of place') yield inp1(), inp1(), inp2(), bfmt % \ (o, o, o, s, dtype, 'in place1') yield inp2(), inp1(), inp2(), bfmt % \ (o, o, o, s, dtype, 'in place2') yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \ (o + 1, o, o, s - 1, dtype, 'out of place') yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \ (o, o + 1, o, s - 1, dtype, 'out of place') yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \ (o, o, o + 1, s - 1, dtype, 'out of place') yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \ (o + 1, o, o, s - 1, dtype, 'aliased') yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \ (o, o + 1, o, s - 1, dtype, 'aliased') yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \ (o, o, o + 1, s - 1, dtype, 'aliased') class IgnoreException(Exception): "Ignoring this exception due to disabled feature" @contextlib.contextmanager def tempdir(*args, **kwargs): """Context manager to provide a temporary test folder. All arguments are passed as this to the underlying tempfile.mkdtemp function. """ tmpdir = mkdtemp(*args, **kwargs) try: yield tmpdir finally: shutil.rmtree(tmpdir) @contextlib.contextmanager def temppath(*args, **kwargs): """Context manager for temporary files. Context manager that returns the path to a closed temporary file. Its parameters are the same as for tempfile.mkstemp and are passed directly to that function. The underlying file is removed when the context is exited, so it should be closed at that time. Windows does not allow a temporary file to be opened if it is already open, so the underlying file must be closed after opening before it can be opened again. """ fd, path = mkstemp(*args, **kwargs) os.close(fd) try: yield path finally: os.remove(path) class clear_and_catch_warnings(warnings.catch_warnings): """ Context manager that resets warning registry for catching warnings Warnings can be slippery, because, whenever a warning is triggered, Python adds a ``__warningregistry__`` member to the *calling* module. This makes it impossible to retrigger the warning in this module, whatever you put in the warnings filters. This context manager accepts a sequence of `modules` as a keyword argument to its constructor and: * stores and removes any ``__warningregistry__`` entries in given `modules` on entry; * resets ``__warningregistry__`` to its previous state on exit. This makes it possible to trigger any warning afresh inside the context manager without disturbing the state of warnings outside. For compatibility with Python 3.0, please consider all arguments to be keyword-only. Parameters ---------- record : bool, optional Specifies whether warnings should be captured by a custom implementation of ``warnings.showwarning()`` and be appended to a list returned by the context manager. Otherwise None is returned by the context manager. The objects appended to the list are arguments whose attributes mirror the arguments to ``showwarning()``. modules : sequence, optional Sequence of modules for which to reset warnings registry on entry and restore on exit Examples -------- >>> import warnings >>> with clear_and_catch_warnings(modules=[np.core.fromnumeric]): ... warnings.simplefilter('always') ... # do something that raises a warning in np.core.fromnumeric """ class_modules = () def __init__(self, record=False, modules=()): self.modules = set(modules).union(self.class_modules) self._warnreg_copies = {} super(clear_and_catch_warnings, self).__init__(record=record) def __enter__(self): for mod in self.modules: if hasattr(mod, '__warningregistry__'): mod_reg = mod.__warningregistry__ self._warnreg_copies[mod] = mod_reg.copy() mod_reg.clear() return super(clear_and_catch_warnings, self).__enter__() def __exit__(self, *exc_info): super(clear_and_catch_warnings, self).__exit__(*exc_info) for mod in self.modules: if hasattr(mod, '__warningregistry__'): mod.__warningregistry__.clear() if mod in self._warnreg_copies: mod.__warningregistry__.update(self._warnreg_copies[mod])
MyRookie/SentimentAnalyse
venv/lib/python2.7/site-packages/numpy/testing/utils.py
Python
mit
66,431
#!/usr/bin/env python # -*- coding: utf-8 -*- from django.conf import settings def duplicate_txn_id(ipn_obj): """Returns True if a record with this transaction id exists and it is not a payment which has gone from pending to completed. """ query = ipn_obj._default_manager.filter(txn_id = ipn_obj.txn_id) if ipn_obj.payment_status == "Completed": # A payment that was pending and is now completed will have the same # IPN transaction id, so don't flag them as duplicates because it # means that the payment was finally successful! query = query.exclude(payment_status = "Pending") return query.count() > 0 def make_secret(form_instance, secret_fields=None): """ Returns a secret for use in a EWP form or an IPN verification based on a selection of variables in params. Should only be used with SSL. """ # @@@ Moved here as temporary fix to avoid dependancy on auth.models. from django.contrib.auth.models import get_hexdigest # @@@ amount is mc_gross on the IPN - where should mapping logic go? # @@@ amount / mc_gross is not nessecarily returned as it was sent - how to use it? 10.00 vs. 10.0 # @@@ the secret should be based on the invoice or custom fields as well - otherwise its always the same. # Build the secret with fields availible in both PaymentForm and the IPN. Order matters. if secret_fields is None: secret_fields = ['business', 'item_name'] data = "" for name in secret_fields: if hasattr(form_instance, 'cleaned_data'): if name in form_instance.cleaned_data: data += unicode(form_instance.cleaned_data[name]) else: # Initial data passed into the constructor overrides defaults. if name in form_instance.initial: data += unicode(form_instance.initial[name]) elif name in form_instance.fields and form_instance.fields[name].initial is not None: data += unicode(form_instance.fields[name].initial) secret = get_hexdigest('sha1', settings.SECRET_KEY, data) return secret def check_secret(form_instance, secret): """ Returns true if received `secret` matches expected secret for form_instance. Used to verify IPN. """ # @@@ add invoice & custom # secret_fields = ['business', 'item_name'] return make_secret(form_instance) == secret
zsuzhengdu/camp
paypal/standard/helpers.py
Python
mit
2,448
'''tzinfo timezone information for US/Eastern.''' from pytz.tzinfo import DstTzInfo from pytz.tzinfo import memorized_datetime as d from pytz.tzinfo import memorized_ttinfo as i class Eastern(DstTzInfo): '''US/Eastern timezone definition. See datetime.tzinfo for details''' zone = 'US/Eastern' _utc_transition_times = [ d(1,1,1,0,0,0), d(1918,3,31,7,0,0), d(1918,10,27,6,0,0), d(1919,3,30,7,0,0), d(1919,10,26,6,0,0), d(1920,3,28,7,0,0), d(1920,10,31,6,0,0), d(1921,4,24,7,0,0), d(1921,9,25,6,0,0), d(1922,4,30,7,0,0), d(1922,9,24,6,0,0), d(1923,4,29,7,0,0), d(1923,9,30,6,0,0), d(1924,4,27,7,0,0), d(1924,9,28,6,0,0), d(1925,4,26,7,0,0), d(1925,9,27,6,0,0), d(1926,4,25,7,0,0), d(1926,9,26,6,0,0), d(1927,4,24,7,0,0), d(1927,9,25,6,0,0), d(1928,4,29,7,0,0), d(1928,9,30,6,0,0), d(1929,4,28,7,0,0), d(1929,9,29,6,0,0), d(1930,4,27,7,0,0), d(1930,9,28,6,0,0), d(1931,4,26,7,0,0), d(1931,9,27,6,0,0), d(1932,4,24,7,0,0), d(1932,9,25,6,0,0), d(1933,4,30,7,0,0), d(1933,9,24,6,0,0), d(1934,4,29,7,0,0), d(1934,9,30,6,0,0), d(1935,4,28,7,0,0), d(1935,9,29,6,0,0), d(1936,4,26,7,0,0), d(1936,9,27,6,0,0), d(1937,4,25,7,0,0), d(1937,9,26,6,0,0), d(1938,4,24,7,0,0), d(1938,9,25,6,0,0), d(1939,4,30,7,0,0), d(1939,9,24,6,0,0), d(1940,4,28,7,0,0), d(1940,9,29,6,0,0), d(1941,4,27,7,0,0), d(1941,9,28,6,0,0), d(1942,2,9,7,0,0), d(1945,8,14,23,0,0), d(1945,9,30,6,0,0), d(1946,4,28,7,0,0), d(1946,9,29,6,0,0), d(1947,4,27,7,0,0), d(1947,9,28,6,0,0), d(1948,4,25,7,0,0), d(1948,9,26,6,0,0), d(1949,4,24,7,0,0), d(1949,9,25,6,0,0), d(1950,4,30,7,0,0), d(1950,9,24,6,0,0), d(1951,4,29,7,0,0), d(1951,9,30,6,0,0), d(1952,4,27,7,0,0), d(1952,9,28,6,0,0), d(1953,4,26,7,0,0), d(1953,9,27,6,0,0), d(1954,4,25,7,0,0), d(1954,9,26,6,0,0), d(1955,4,24,7,0,0), d(1955,10,30,6,0,0), d(1956,4,29,7,0,0), d(1956,10,28,6,0,0), d(1957,4,28,7,0,0), d(1957,10,27,6,0,0), d(1958,4,27,7,0,0), d(1958,10,26,6,0,0), d(1959,4,26,7,0,0), d(1959,10,25,6,0,0), d(1960,4,24,7,0,0), d(1960,10,30,6,0,0), d(1961,4,30,7,0,0), d(1961,10,29,6,0,0), d(1962,4,29,7,0,0), d(1962,10,28,6,0,0), d(1963,4,28,7,0,0), d(1963,10,27,6,0,0), d(1964,4,26,7,0,0), d(1964,10,25,6,0,0), d(1965,4,25,7,0,0), d(1965,10,31,6,0,0), d(1966,4,24,7,0,0), d(1966,10,30,6,0,0), d(1967,4,30,7,0,0), d(1967,10,29,6,0,0), d(1968,4,28,7,0,0), d(1968,10,27,6,0,0), d(1969,4,27,7,0,0), d(1969,10,26,6,0,0), d(1970,4,26,7,0,0), d(1970,10,25,6,0,0), d(1971,4,25,7,0,0), d(1971,10,31,6,0,0), d(1972,4,30,7,0,0), d(1972,10,29,6,0,0), d(1973,4,29,7,0,0), d(1973,10,28,6,0,0), d(1974,1,6,7,0,0), d(1974,10,27,6,0,0), d(1975,2,23,7,0,0), d(1975,10,26,6,0,0), d(1976,4,25,7,0,0), d(1976,10,31,6,0,0), d(1977,4,24,7,0,0), d(1977,10,30,6,0,0), d(1978,4,30,7,0,0), d(1978,10,29,6,0,0), d(1979,4,29,7,0,0), d(1979,10,28,6,0,0), d(1980,4,27,7,0,0), d(1980,10,26,6,0,0), d(1981,4,26,7,0,0), d(1981,10,25,6,0,0), d(1982,4,25,7,0,0), d(1982,10,31,6,0,0), d(1983,4,24,7,0,0), d(1983,10,30,6,0,0), d(1984,4,29,7,0,0), d(1984,10,28,6,0,0), d(1985,4,28,7,0,0), d(1985,10,27,6,0,0), d(1986,4,27,7,0,0), d(1986,10,26,6,0,0), d(1987,4,5,7,0,0), d(1987,10,25,6,0,0), d(1988,4,3,7,0,0), d(1988,10,30,6,0,0), d(1989,4,2,7,0,0), d(1989,10,29,6,0,0), d(1990,4,1,7,0,0), d(1990,10,28,6,0,0), d(1991,4,7,7,0,0), d(1991,10,27,6,0,0), d(1992,4,5,7,0,0), d(1992,10,25,6,0,0), d(1993,4,4,7,0,0), d(1993,10,31,6,0,0), d(1994,4,3,7,0,0), d(1994,10,30,6,0,0), d(1995,4,2,7,0,0), d(1995,10,29,6,0,0), d(1996,4,7,7,0,0), d(1996,10,27,6,0,0), d(1997,4,6,7,0,0), d(1997,10,26,6,0,0), d(1998,4,5,7,0,0), d(1998,10,25,6,0,0), d(1999,4,4,7,0,0), d(1999,10,31,6,0,0), d(2000,4,2,7,0,0), d(2000,10,29,6,0,0), d(2001,4,1,7,0,0), d(2001,10,28,6,0,0), d(2002,4,7,7,0,0), d(2002,10,27,6,0,0), d(2003,4,6,7,0,0), d(2003,10,26,6,0,0), d(2004,4,4,7,0,0), d(2004,10,31,6,0,0), d(2005,4,3,7,0,0), d(2005,10,30,6,0,0), d(2006,4,2,7,0,0), d(2006,10,29,6,0,0), d(2007,3,11,7,0,0), d(2007,11,4,6,0,0), d(2008,3,9,7,0,0), d(2008,11,2,6,0,0), d(2009,3,8,7,0,0), d(2009,11,1,6,0,0), d(2010,3,14,7,0,0), d(2010,11,7,6,0,0), d(2011,3,13,7,0,0), d(2011,11,6,6,0,0), d(2012,3,11,7,0,0), d(2012,11,4,6,0,0), d(2013,3,10,7,0,0), d(2013,11,3,6,0,0), d(2014,3,9,7,0,0), d(2014,11,2,6,0,0), d(2015,3,8,7,0,0), d(2015,11,1,6,0,0), d(2016,3,13,7,0,0), d(2016,11,6,6,0,0), d(2017,3,12,7,0,0), d(2017,11,5,6,0,0), d(2018,3,11,7,0,0), d(2018,11,4,6,0,0), d(2019,3,10,7,0,0), d(2019,11,3,6,0,0), d(2020,3,8,7,0,0), d(2020,11,1,6,0,0), d(2021,3,14,7,0,0), d(2021,11,7,6,0,0), d(2022,3,13,7,0,0), d(2022,11,6,6,0,0), d(2023,3,12,7,0,0), d(2023,11,5,6,0,0), d(2024,3,10,7,0,0), d(2024,11,3,6,0,0), d(2025,3,9,7,0,0), d(2025,11,2,6,0,0), d(2026,3,8,7,0,0), d(2026,11,1,6,0,0), d(2027,3,14,7,0,0), d(2027,11,7,6,0,0), d(2028,3,12,7,0,0), d(2028,11,5,6,0,0), d(2029,3,11,7,0,0), d(2029,11,4,6,0,0), d(2030,3,10,7,0,0), d(2030,11,3,6,0,0), d(2031,3,9,7,0,0), d(2031,11,2,6,0,0), d(2032,3,14,7,0,0), d(2032,11,7,6,0,0), d(2033,3,13,7,0,0), d(2033,11,6,6,0,0), d(2034,3,12,7,0,0), d(2034,11,5,6,0,0), d(2035,3,11,7,0,0), d(2035,11,4,6,0,0), d(2036,3,9,7,0,0), d(2036,11,2,6,0,0), d(2037,3,8,7,0,0), d(2037,11,1,6,0,0), ] _transition_info = [ i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EWT'), i(-14400,3600,'EPT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), i(-14400,3600,'EDT'), i(-18000,0,'EST'), ] Eastern = Eastern()
newvem/pytz
pytz/zoneinfo/US/Eastern.py
Python
mit
9,981
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A module that adds the ability to do ajax requests.""" __author__ = 'Abhinav Khandelwal ([email protected])' from common import tags from models import custom_modules MODULE_NAME = 'Ajax Registry Library' # Module registration custom_module = None def register_module(): """Registers this module in the registry.""" global_routes = [ ('/modules/ajax_registry/assets/.*', tags.ResourcesHandler) ] global custom_module # pylint: disable=global-statement custom_module = custom_modules.Module( MODULE_NAME, 'Provides library to register ajax calls', global_routes, []) return custom_module
GirlsCodePy/girlscode-coursebuilder
modules/ajax_registry/registry.py
Python
gpl-3.0
1,249
# Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.scenario.data_processing.client_tests import base from tempest import test from tempest_lib.common.utils import data_utils class DataSourceTest(base.BaseDataProcessingTest): def _check_data_source_create(self, source_body): source_name = data_utils.rand_name('sahara-data-source') # create data source resp_body = self.create_data_source(source_name, **source_body) # check that source created successfully self.assertEqual(source_name, resp_body.name) if source_body['type'] == 'swift': source_body = self.swift_data_source self.assertDictContainsSubset(source_body, resp_body.__dict__) return resp_body.id, source_name def _check_data_source_list(self, source_id, source_name): # check for data source in list source_list = self.client.data_sources.list() sources_info = [(source.id, source.name) for source in source_list] self.assertIn((source_id, source_name), sources_info) def _check_data_source_get(self, source_id, source_name, source_body): # check data source fetch by id source = self.client.data_sources.get(source_id) self.assertEqual(source_name, source.name) self.assertDictContainsSubset(source_body, source.__dict__) def _check_data_source_delete(self, source_id): # delete data source self.client.data_sources.delete(source_id) # check that data source really deleted source_list = self.client.data_sources.list() self.assertNotIn(source_id, [source.id for source in source_list]) @test.services('data_processing') def test_swift_data_source(self): # Create extra self.swift_data_source variable to use for comparison to # data source response body because response body has no 'credentials' # field. self.swift_data_source = self.swift_data_source_with_creds.copy() del self.swift_data_source['credentials'] source_id, source_name = self._check_data_source_create( self.swift_data_source_with_creds) self._check_data_source_list(source_id, source_name) self._check_data_source_get(source_id, source_name, self.swift_data_source) self._check_data_source_delete(source_id) @test.services('data_processing') def test_local_hdfs_data_source(self): source_id, source_name = self._check_data_source_create( self.local_hdfs_data_source) self._check_data_source_list(source_id, source_name) self._check_data_source_get(source_id, source_name, self.local_hdfs_data_source) self._check_data_source_delete(source_id) @test.services('data_processing') def test_external_hdfs_data_source(self): source_id, source_name = self._check_data_source_create( self.external_hdfs_data_source) self._check_data_source_list(source_id, source_name) self._check_data_source_get(source_id, source_name, self.external_hdfs_data_source) self._check_data_source_delete(source_id)
ekasitk/sahara
sahara/tests/tempest/scenario/data_processing/client_tests/test_data_sources.py
Python
apache-2.0
3,802
from __future__ import absolute_import from typing import Callable, Tuple, Text from django.conf import settings from diff_match_patch import diff_match_patch import platform import logging # TODO: handle changes in link hrefs def highlight_with_class(klass, text): # type: (Text, Text) -> Text return '<span class="%s">%s</span>' % (klass, text) def highlight_inserted(text): # type: (Text) -> Text return highlight_with_class('highlight_text_inserted', text) def highlight_deleted(text): # type: (Text) -> Text return highlight_with_class('highlight_text_deleted', text) def highlight_replaced(text): # type: (Text) -> Text return highlight_with_class('highlight_text_replaced', text) def chunkize(text, in_tag): # type: (Text, bool) -> Tuple[List[Tuple[Text, Text]], bool] start = 0 idx = 0 chunks = [] # type: List[Tuple[Text, Text]] for c in text: if c == '<': in_tag = True if start != idx: chunks.append(('text', text[start:idx])) start = idx elif c == '>': in_tag = False if start != idx + 1: chunks.append(('tag', text[start:idx + 1])) start = idx + 1 idx += 1 if start != idx: chunks.append(('tag' if in_tag else 'text', text[start:idx])) return chunks, in_tag def highlight_chunks(chunks, highlight_func): # type: (List[Tuple[Text, Text]], Callable[[Text], Text]) -> Text retval = u'' for type, text in chunks: if type == 'text': retval += highlight_func(text) else: retval += text return retval def verify_html(html): # type: (Text) -> bool # TODO: Actually parse the resulting HTML to ensure we don't # create mal-formed markup. This is unfortunately hard because # we both want pretty strict parsing and we want to parse html5 # fragments. For now, we do a basic sanity check. in_tag = False for c in html: if c == '<': if in_tag: return False in_tag = True elif c == '>': if not in_tag: return False in_tag = False if in_tag: return False return True def highlight_html_differences(s1, s2): # type: (Text, Text) -> Text differ = diff_match_patch() ops = differ.diff_main(s1, s2) differ.diff_cleanupSemantic(ops) retval = u'' in_tag = False idx = 0 while idx < len(ops): op, text = ops[idx] next_op = None if idx != len(ops) - 1: next_op, next_text = ops[idx + 1] if op == diff_match_patch.DIFF_DELETE and next_op == diff_match_patch.DIFF_INSERT: # Replace operation chunks, in_tag = chunkize(next_text, in_tag) retval += highlight_chunks(chunks, highlight_replaced) idx += 1 elif op == diff_match_patch.DIFF_INSERT and next_op == diff_match_patch.DIFF_DELETE: # Replace operation # I have no idea whether diff_match_patch generates inserts followed # by deletes, but it doesn't hurt to handle them chunks, in_tag = chunkize(text, in_tag) retval += highlight_chunks(chunks, highlight_replaced) idx += 1 elif op == diff_match_patch.DIFF_DELETE: retval += highlight_deleted('&nbsp;') elif op == diff_match_patch.DIFF_INSERT: chunks, in_tag = chunkize(text, in_tag) retval += highlight_chunks(chunks, highlight_inserted) elif op == diff_match_patch.DIFF_EQUAL: chunks, in_tag = chunkize(text, in_tag) retval += text idx += 1 if not verify_html(retval): from zerver.lib.actions import internal_send_message # We probably want more information here logging.getLogger('').error('HTML diff produced mal-formed HTML') if settings.ERROR_BOT is not None: subject = "HTML diff failure on %s" % (platform.node(),) internal_send_message(settings.ERROR_BOT, "stream", "errors", subject, "HTML diff produced malformed HTML") return s2 return retval
niftynei/zulip
zerver/lib/html_diff.py
Python
apache-2.0
4,261
"""Print 'Hello World' every two seconds, using a coroutine.""" import asyncio @asyncio.coroutine def greet_every_two_seconds(): while True: print('Hello World') yield from asyncio.sleep(2) if __name__ == '__main__': loop = asyncio.get_event_loop() try: loop.run_until_complete(greet_every_two_seconds()) finally: loop.close()
gvanrossum/asyncio
examples/hello_coroutine.py
Python
apache-2.0
380
import pandas as pd import pandas.util.testing as tm from pandas import compat from pandas.io.sas import XportReader, read_sas import numpy as np import os # CSV versions of test XPT files were obtained using the R foreign library # Numbers in a SAS xport file are always float64, so need to convert # before making comparisons. def numeric_as_float(data): for v in data.columns: if data[v].dtype is np.dtype('int64'): data[v] = data[v].astype(np.float64) class TestXport(tm.TestCase): def setUp(self): self.dirpath = tm.get_data_path() self.file01 = os.path.join(self.dirpath, "DEMO_G.XPT") self.file02 = os.path.join(self.dirpath, "SSHSV1_A.XPT") self.file03 = os.path.join(self.dirpath, "DRXFCD_G.XPT") def test1(self): # Tests with DEMO_G.XPT (all numeric file) # Compare to this data_csv = pd.read_csv(self.file01.replace(".XPT", ".csv")) numeric_as_float(data_csv) # Read full file data = XportReader(self.file01).read() tm.assert_frame_equal(data, data_csv) # Test incremental read with `read` method. reader = XportReader(self.file01) data = reader.read(10) tm.assert_frame_equal(data, data_csv.iloc[0:10, :]) # Test incremental read with `get_chunk` method. reader = XportReader(self.file01, chunksize=10) data = reader.get_chunk() tm.assert_frame_equal(data, data_csv.iloc[0:10, :]) # Read full file with `read_sas` method data = read_sas(self.file01) tm.assert_frame_equal(data, data_csv) def test1_index(self): # Tests with DEMO_G.XPT using index (all numeric file) # Compare to this data_csv = pd.read_csv(self.file01.replace(".XPT", ".csv")) data_csv = data_csv.set_index("SEQN") numeric_as_float(data_csv) # Read full file data = XportReader(self.file01, index="SEQN").read() tm.assert_frame_equal(data, data_csv, check_index_type=False) # Test incremental read with `read` method. reader = XportReader(self.file01, index="SEQN") data = reader.read(10) tm.assert_frame_equal(data, data_csv.iloc[0:10, :], check_index_type=False) # Test incremental read with `get_chunk` method. reader = XportReader(self.file01, index="SEQN", chunksize=10) data = reader.get_chunk() tm.assert_frame_equal(data, data_csv.iloc[0:10, :], check_index_type=False) def test1_incremental(self): # Test with DEMO_G.XPT, reading full file incrementally data_csv = pd.read_csv(self.file01.replace(".XPT", ".csv")) data_csv = data_csv.set_index("SEQN") numeric_as_float(data_csv) reader = XportReader(self.file01, index="SEQN", chunksize=1000) all_data = [x for x in reader] data = pd.concat(all_data, axis=0) tm.assert_frame_equal(data, data_csv, check_index_type=False) def test2(self): # Test with SSHSV1_A.XPT # Compare to this data_csv = pd.read_csv(self.file02.replace(".XPT", ".csv")) numeric_as_float(data_csv) data = XportReader(self.file02).read() tm.assert_frame_equal(data, data_csv) def test3(self): # Test with DRXFCD_G.XPT (contains text and numeric variables) # Compare to this data_csv = pd.read_csv(self.file03.replace(".XPT", ".csv")) data = XportReader(self.file03).read() tm.assert_frame_equal(data, data_csv) data = read_sas(self.file03) tm.assert_frame_equal(data, data_csv)
Vvucinic/Wander
venv_2_7/lib/python2.7/site-packages/pandas/io/tests/test_sas.py
Python
artistic-2.0
3,638
from olympia import amo import mkt from mkt.webapps.models import AddonExcludedRegion def run(): """Unleash payments in USA.""" (AddonExcludedRegion.objects .exclude(addon__premium_type=amo.ADDON_FREE) .filter(region=mkt.regions.US.id).delete())
harikishen/addons-server
src/olympia/migrations/532-unleash-payments-in-usa.py
Python
bsd-3-clause
266
# # Chris Lumens <[email protected]> # # Copyright 2005, 2006, 2007 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, modify, # copy, or redistribute it subject to the terms and conditions of the GNU # General Public License v.2. This program is distributed in the hope that it # will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the # implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat # trademarks that are incorporated in the source code or documentation are not # subject to the GNU General Public License and may only be used or replicated # with the express permission of Red Hat, Inc. # from pykickstart.base import * from pykickstart.errors import * from pykickstart.options import * import gettext _ = lambda x: gettext.ldgettext("pykickstart", x) class RHEL3_Mouse(KickstartCommand): removedKeywords = KickstartCommand.removedKeywords removedAttrs = KickstartCommand.removedAttrs def __init__(self, writePriority=0, *args, **kwargs): KickstartCommand.__init__(self, writePriority, *args, **kwargs) self.op = self._getParser() self.device = kwargs.get("device", "") self.emulthree = kwargs.get("emulthree", False) self.mouse = kwargs.get("mouse", "") def __str__(self): retval = KickstartCommand.__str__(self) opts = "" if self.device: opts += "--device=%s " % self.device if self.emulthree: opts += "--emulthree " if self.mouse: retval += "# System mouse\nmouse %s%s\n" % (opts, self.mouse) return retval def _getParser(self): op = KSOptionParser() op.add_option("--device", dest="device", default="") op.add_option("--emulthree", dest="emulthree", default=False, action="store_true") return op def parse(self, args): (opts, extra) = self.op.parse_args(args=args, lineno=self.lineno) self._setToSelf(self.op, opts) if len(extra) != 1: raise KickstartValueError, formatErrorMsg(self.lineno, msg=_("Kickstart command %s requires one argument") % "mouse") self.mouse = extra[0] return self class FC3_Mouse(DeprecatedCommand): def __init__(self): DeprecatedCommand.__init__(self)
marcosbontempo/inatelos
poky-daisy/scripts/lib/mic/3rdparty/pykickstart/commands/mouse.py
Python
mit
2,610
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import json import datetime import mimetypes import os import frappe from frappe import _ import frappe.model.document import frappe.utils import frappe.sessions import werkzeug.utils from werkzeug.local import LocalProxy from werkzeug.wsgi import wrap_file from werkzeug.wrappers import Response from werkzeug.exceptions import NotFound, Forbidden def report_error(status_code): if (status_code!=404 or frappe.conf.logging) and not frappe.local.flags.disable_traceback: frappe.errprint(frappe.utils.get_traceback()) response = build_response("json") response.status_code = status_code return response def build_response(response_type=None): if "docs" in frappe.local.response and not frappe.local.response.docs: del frappe.local.response["docs"] response_type_map = { 'csv': as_csv, 'download': as_raw, 'json': as_json, 'page': as_page, 'redirect': redirect } return response_type_map[frappe.response.get('type') or response_type]() def as_csv(): response = Response() response.headers[b"Content-Type"] = b"text/csv; charset: utf-8" response.headers[b"Content-Disposition"] = ("attachment; filename=\"%s.csv\"" % frappe.response['doctype'].replace(' ', '_')).encode("utf-8") response.data = frappe.response['result'] return response def as_raw(): response = Response() response.headers[b"Content-Type"] = frappe.response.get("content_type") or mimetypes.guess_type(frappe.response['filename'])[0] or b"application/unknown" response.headers[b"Content-Disposition"] = ("filename=\"%s\"" % frappe.response['filename'].replace(' ', '_')).encode("utf-8") response.data = frappe.response['filecontent'] return response def as_json(): make_logs() response = Response() if frappe.local.response.http_status_code: response.status_code = frappe.local.response['http_status_code'] del frappe.local.response['http_status_code'] response.headers[b"Content-Type"] = b"application/json; charset: utf-8" response.data = json.dumps(frappe.local.response, default=json_handler, separators=(',',':')) return response def make_logs(response = None): """make strings for msgprint and errprint""" if not response: response = frappe.local.response if frappe.error_log: # frappe.response['exc'] = json.dumps("\n".join([cstr(d) for d in frappe.error_log])) response['exc'] = json.dumps([frappe.utils.cstr(d) for d in frappe.local.error_log]) if frappe.local.message_log: response['_server_messages'] = json.dumps([frappe.utils.cstr(d) for d in frappe.local.message_log]) if frappe.debug_log and frappe.conf.get("logging") or False: response['_debug_messages'] = json.dumps(frappe.local.debug_log) def json_handler(obj): """serialize non-serializable data for json""" # serialize date if isinstance(obj, (datetime.date, datetime.timedelta, datetime.datetime)): return unicode(obj) elif isinstance(obj, LocalProxy): return unicode(obj) elif isinstance(obj, frappe.model.document.BaseDocument): doc = obj.as_dict(no_nulls=True) return doc else: raise TypeError, """Object of type %s with value of %s is not JSON serializable""" % \ (type(obj), repr(obj)) def as_page(): """print web page""" from frappe.website.render import render return render(frappe.response['page_name'], http_status_code=frappe.response.get("http_status_code")) def redirect(): return werkzeug.utils.redirect(frappe.response.location) def download_backup(path): try: frappe.only_for(("System Manager", "Administrator")) except frappe.PermissionError: raise Forbidden(_("You need to be logged in and have System Manager Role to be able to access backups.")) return send_private_file(path) def send_private_file(path): path = os.path.join(frappe.local.conf.get('private_path', 'private'), path.strip("/")) if frappe.local.request.headers.get('X-Use-X-Accel-Redirect'): path = '/' + path response = Response() response.headers[b'X-Accel-Redirect'] = path else: filename = os.path.basename(path) filepath = frappe.utils.get_site_path(path) try: f = open(filepath, 'rb') except IOError: raise NotFound response = Response(wrap_file(frappe.local.request.environ, f)) response.headers.add(b'Content-Disposition', 'attachment', filename=filename.encode("utf-8")) response.headers[b'Content-Type'] = mimetypes.guess_type(filename)[0] or b'application/octet-stream' return response def handle_session_stopped(): response = Response("""<html> <body style="background-color: #EEE;"> <h3 style="width: 900px; background-color: #FFF; border: 2px solid #AAA; padding: 20px; font-family: Arial; margin: 20px auto"> Updating. We will be back in a few moments... </h3> </body> </html>""") response.status_code = 503 response.content_type = 'text/html' return response
gangadharkadam/saloon_frappe
frappe/utils/response.py
Python
mit
4,943
# # Chris Lumens <[email protected]> # # Copyright 2007 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, modify, # copy, or redistribute it subject to the terms and conditions of the GNU # General Public License v.2. This program is distributed in the hope that it # will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the # implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat # trademarks that are incorporated in the source code or documentation are not # subject to the GNU General Public License and may only be used or replicated # with the express permission of Red Hat, Inc. # from pykickstart.base import * from pykickstart.options import * class FC3_Bootloader(KickstartCommand): removedKeywords = KickstartCommand.removedKeywords removedAttrs = KickstartCommand.removedAttrs def __init__(self, writePriority=10, *args, **kwargs): KickstartCommand.__init__(self, writePriority, *args, **kwargs) self.op = self._getParser() self.driveorder = kwargs.get("driveorder", []) self.appendLine = kwargs.get("appendLine", "") self.forceLBA = kwargs.get("forceLBA", False) self.linear = kwargs.get("linear", True) self.location = kwargs.get("location", "") self.md5pass = kwargs.get("md5pass", "") self.password = kwargs.get("password", "") self.upgrade = kwargs.get("upgrade", False) self.useLilo = kwargs.get("useLilo", False) self.deleteRemovedAttrs() def _getArgsAsStr(self): retval = "" if self.appendLine != "": retval += " --append=\"%s\"" % self.appendLine if self.linear: retval += " --linear" if self.location: retval += " --location=%s" % self.location if hasattr(self, "forceLBA") and self.forceLBA: retval += " --lba32" if self.password != "": retval += " --password=\"%s\"" % self.password if self.md5pass != "": retval += " --md5pass=\"%s\"" % self.md5pass if self.upgrade: retval += " --upgrade" if self.useLilo: retval += " --useLilo" if len(self.driveorder) > 0: retval += " --driveorder=\"%s\"" % ",".join(self.driveorder) return retval def __str__(self): retval = KickstartCommand.__str__(self) if self.location != "": retval += "# System bootloader configuration\nbootloader" retval += self._getArgsAsStr() + "\n" return retval def _getParser(self): def driveorder_cb (option, opt_str, value, parser): for d in value.split(','): parser.values.ensure_value(option.dest, []).append(d) op = KSOptionParser() op.add_option("--append", dest="appendLine") op.add_option("--linear", dest="linear", action="store_true", default=True) op.add_option("--nolinear", dest="linear", action="store_false") op.add_option("--location", dest="location", type="choice", default="mbr", choices=["mbr", "partition", "none", "boot"]) op.add_option("--lba32", dest="forceLBA", action="store_true", default=False) op.add_option("--password", dest="password", default="") op.add_option("--md5pass", dest="md5pass", default="") op.add_option("--upgrade", dest="upgrade", action="store_true", default=False) op.add_option("--useLilo", dest="useLilo", action="store_true", default=False) op.add_option("--driveorder", dest="driveorder", action="callback", callback=driveorder_cb, nargs=1, type="string") return op def parse(self, args): (opts, extra) = self.op.parse_args(args=args, lineno=self.lineno) self._setToSelf(self.op, opts) if self.currentCmd == "lilo": self.useLilo = True return self class FC4_Bootloader(FC3_Bootloader): removedKeywords = FC3_Bootloader.removedKeywords + ["linear", "useLilo"] removedAttrs = FC3_Bootloader.removedAttrs + ["linear", "useLilo"] def __init__(self, writePriority=10, *args, **kwargs): FC3_Bootloader.__init__(self, writePriority, *args, **kwargs) def _getArgsAsStr(self): retval = "" if self.appendLine != "": retval += " --append=\"%s\"" % self.appendLine if self.location: retval += " --location=%s" % self.location if hasattr(self, "forceLBA") and self.forceLBA: retval += " --lba32" if self.password != "": retval += " --password=\"%s\"" % self.password if self.md5pass != "": retval += " --md5pass=\"%s\"" % self.md5pass if self.upgrade: retval += " --upgrade" if len(self.driveorder) > 0: retval += " --driveorder=\"%s\"" % ",".join(self.driveorder) return retval def _getParser(self): op = FC3_Bootloader._getParser(self) op.remove_option("--linear") op.remove_option("--nolinear") op.remove_option("--useLilo") return op def parse(self, args): (opts, extra) = self.op.parse_args(args=args, lineno=self.lineno) self._setToSelf(self.op, opts) return self class F8_Bootloader(FC4_Bootloader): removedKeywords = FC4_Bootloader.removedKeywords removedAttrs = FC4_Bootloader.removedAttrs def __init__(self, writePriority=10, *args, **kwargs): FC4_Bootloader.__init__(self, writePriority, *args, **kwargs) self.timeout = kwargs.get("timeout", None) self.default = kwargs.get("default", "") def _getArgsAsStr(self): ret = FC4_Bootloader._getArgsAsStr(self) if self.timeout is not None: ret += " --timeout=%d" %(self.timeout,) if self.default: ret += " --default=%s" %(self.default,) return ret def _getParser(self): op = FC4_Bootloader._getParser(self) op.add_option("--timeout", dest="timeout", type="int") op.add_option("--default", dest="default") return op class F12_Bootloader(F8_Bootloader): removedKeywords = F8_Bootloader.removedKeywords removedAttrs = F8_Bootloader.removedAttrs def _getParser(self): op = F8_Bootloader._getParser(self) op.add_option("--lba32", dest="forceLBA", deprecated=1, action="store_true") return op class F14_Bootloader(F12_Bootloader): removedKeywords = F12_Bootloader.removedKeywords + ["forceLBA"] removedAttrs = F12_Bootloader.removedKeywords + ["forceLBA"] def _getParser(self): op = F12_Bootloader._getParser(self) op.remove_option("--lba32") return op class F15_Bootloader(F14_Bootloader): removedKeywords = F14_Bootloader.removedKeywords removedAttrs = F14_Bootloader.removedAttrs def __init__(self, writePriority=10, *args, **kwargs): F14_Bootloader.__init__(self, writePriority, *args, **kwargs) self.isCrypted = kwargs.get("isCrypted", False) def _getArgsAsStr(self): ret = F14_Bootloader._getArgsAsStr(self) if self.isCrypted: ret += " --iscrypted" return ret def _getParser(self): def password_cb(option, opt_str, value, parser): parser.values.isCrypted = True parser.values.password = value op = F14_Bootloader._getParser(self) op.add_option("--iscrypted", dest="isCrypted", action="store_true", default=False) op.add_option("--md5pass", action="callback", callback=password_cb, nargs=1, type="string") return op class RHEL5_Bootloader(FC4_Bootloader): removedKeywords = FC4_Bootloader.removedKeywords removedAttrs = FC4_Bootloader.removedAttrs def __init__(self, writePriority=10, *args, **kwargs): FC4_Bootloader.__init__(self, writePriority, *args, **kwargs) self.hvArgs = kwargs.get("hvArgs", "") def _getArgsAsStr(self): ret = FC4_Bootloader._getArgsAsStr(self) if self.hvArgs: ret += " --hvargs=\"%s\"" %(self.hvArgs,) return ret def _getParser(self): op = FC4_Bootloader._getParser(self) op.add_option("--hvargs", dest="hvArgs", type="string") return op class RHEL6_Bootloader(F12_Bootloader): removedKeywords = F12_Bootloader.removedKeywords removedAttrs = F12_Bootloader.removedAttrs def __init__(self, writePriority=10, *args, **kwargs): F12_Bootloader.__init__(self, writePriority, *args, **kwargs) self.isCrypted = kwargs.get("isCrypted", False) def _getArgsAsStr(self): ret = F12_Bootloader._getArgsAsStr(self) if self.isCrypted: ret += " --iscrypted" return ret def _getParser(self): def password_cb(option, opt_str, value, parser): parser.values.isCrypted = True parser.values.password = value op = F12_Bootloader._getParser(self) op.add_option("--iscrypted", dest="isCrypted", action="store_true", default=False) op.add_option("--md5pass", action="callback", callback=password_cb, nargs=1, type="string") return op
marcosbontempo/inatelos
poky-daisy/scripts/lib/mic/3rdparty/pykickstart/commands/bootloader.py
Python
mit
9,658
# -*- coding: utf-8 -*- ''' Specto Add-on Copyright (C) 2015 lambda This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import re,urllib,urlparse,json,base64 from resources.lib.libraries import control from resources.lib.libraries import cleantitle from resources.lib.libraries import client from resources.lib import resolvers class source: def __init__(self): self.base_link = 'http://directdownload.tv' self.search_link = 'L2FwaT9rZXk9NEIwQkI4NjJGMjRDOEEyOSZxdWFsaXR5W109SERUViZxdWFsaXR5W109RFZEUklQJnF1YWxpdHlbXT03MjBQJnF1YWxpdHlbXT1XRUJETCZxdWFsaXR5W109V0VCREwxMDgwUCZsaW1pdD0yMCZrZXl3b3JkPQ==' def get_show(self, imdb, tvdb, tvshowtitle, year): try: url = tvshowtitle url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return url = '%s S%02dE%02d' % (url, int(season), int(episode)) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources if (control.setting('realdedrid_user') == '' and control.setting('premiumize_user') == ''): raise Exception() query = base64.urlsafe_b64decode(self.search_link) + urllib.quote_plus(url) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = json.loads(result) title, hdlr = re.compile('(.+?) (S\d*E\d*)$').findall(url)[0] title = cleantitle.tv(title) hdlr = [hdlr] links = [] for i in result: try: t = i['showName'] t = client.replaceHTMLCodes(t) t = cleantitle.tv(t) if not t == title: raise Exception() y = i['release'] y = re.compile('[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s]').findall(y)[-1] y = y.upper() if not any(x == y for x in hdlr): raise Exception() quality = i['quality'] if quality == 'WEBDL1080P': quality = '1080p' elif quality in ['720P', 'WEBDL']: quality = 'HD' else: quality = 'SD' size = i['size'] size = float(size)/1024 info = '%.2f GB' % size url = i['links'] for x in url.keys(): links.append({'url': url[x], 'quality': quality, 'info': info}) except: pass for i in links: try: url = i['url'] if len(url) > 1: raise Exception() url = url[0] host = (urlparse.urlparse(url).netloc).replace('www.', '').rsplit('.', 1)[0].lower() if not host in hosthdDict: raise Exception() sources.append({'source': host, 'quality': i['quality'], 'provider': 'DirectDL', 'url': url, 'info': i['info']}) except: pass return sources except: return sources def resolve(self, url): try: url = resolvers.request(url) return url except: return
felipenaselva/repo.felipe
plugin.video.specto/resources/lib/sources/disabled/directdl_tv.py
Python
gpl-2.0
4,247
"""Ansible integration test infrastructure.""" from __future__ import (absolute_import, division, print_function) __metaclass__ = type import contextlib import json import os import shutil import tempfile from .. import types as t from ..target import ( analyze_integration_target_dependencies, walk_integration_targets, ) from ..config import ( IntegrationConfig, NetworkIntegrationConfig, PosixIntegrationConfig, WindowsIntegrationConfig, ) from ..util import ( ApplicationError, display, make_dirs, COVERAGE_CONFIG_NAME, MODE_DIRECTORY, MODE_DIRECTORY_WRITE, MODE_FILE, to_bytes, ) from ..util_common import ( named_temporary_file, write_text_file, ResultType, ) from ..coverage_util import ( generate_coverage_config, ) from ..cache import ( CommonCache, ) from ..cloud import ( CloudEnvironmentConfig, ) from ..data import ( data_context, ) def setup_common_temp_dir(args, path): """ :type args: IntegrationConfig :type path: str """ if args.explain: return os.mkdir(path) os.chmod(path, MODE_DIRECTORY) if args.coverage: coverage_config_path = os.path.join(path, COVERAGE_CONFIG_NAME) coverage_config = generate_coverage_config(args) write_text_file(coverage_config_path, coverage_config) os.chmod(coverage_config_path, MODE_FILE) coverage_output_path = os.path.join(path, ResultType.COVERAGE.name) os.mkdir(coverage_output_path) os.chmod(coverage_output_path, MODE_DIRECTORY_WRITE) def generate_dependency_map(integration_targets): """ :type integration_targets: list[IntegrationTarget] :rtype: dict[str, set[IntegrationTarget]] """ targets_dict = dict((target.name, target) for target in integration_targets) target_dependencies = analyze_integration_target_dependencies(integration_targets) dependency_map = {} invalid_targets = set() for dependency, dependents in target_dependencies.items(): dependency_target = targets_dict.get(dependency) if not dependency_target: invalid_targets.add(dependency) continue for dependent in dependents: if dependent not in dependency_map: dependency_map[dependent] = set() dependency_map[dependent].add(dependency_target) if invalid_targets: raise ApplicationError('Non-existent target dependencies: %s' % ', '.join(sorted(invalid_targets))) return dependency_map def get_files_needed(target_dependencies): """ :type target_dependencies: list[IntegrationTarget] :rtype: list[str] """ files_needed = [] for target_dependency in target_dependencies: files_needed += target_dependency.needs_file files_needed = sorted(set(files_needed)) invalid_paths = [path for path in files_needed if not os.path.isfile(path)] if invalid_paths: raise ApplicationError('Invalid "needs/file/*" aliases:\n%s' % '\n'.join(invalid_paths)) return files_needed def check_inventory(args, inventory_path): # type: (IntegrationConfig, str) -> None """Check the given inventory for issues.""" if args.docker or args.remote: if os.path.exists(inventory_path): with open(inventory_path) as inventory_file: inventory = inventory_file.read() if 'ansible_ssh_private_key_file' in inventory: display.warning('Use of "ansible_ssh_private_key_file" in inventory with the --docker or --remote option is unsupported and will likely fail.') def get_inventory_relative_path(args): # type: (IntegrationConfig) -> str """Return the inventory path used for the given integration configuration relative to the content root.""" inventory_names = { PosixIntegrationConfig: 'inventory', WindowsIntegrationConfig: 'inventory.winrm', NetworkIntegrationConfig: 'inventory.networking', } # type: t.Dict[t.Type[IntegrationConfig], str] return os.path.join(data_context().content.integration_path, inventory_names[type(args)]) def delegate_inventory(args, inventory_path_src): # type: (IntegrationConfig, str) -> None """Make the given inventory available during delegation.""" if isinstance(args, PosixIntegrationConfig): return def inventory_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None """ Add the inventory file to the payload file list. This will preserve the file during delegation even if it is ignored or is outside the content and install roots. """ if data_context().content.collection: working_path = data_context().content.collection.directory else: working_path = '' inventory_path = os.path.join(working_path, get_inventory_relative_path(args)) if os.path.isfile(inventory_path_src) and os.path.relpath(inventory_path_src, data_context().content.root) != inventory_path: originals = [item for item in files if item[1] == inventory_path] if originals: for original in originals: files.remove(original) display.warning('Overriding inventory file "%s" with "%s".' % (inventory_path, inventory_path_src)) else: display.notice('Sourcing inventory file "%s" from "%s".' % (inventory_path, inventory_path_src)) files.append((inventory_path_src, inventory_path)) data_context().register_payload_callback(inventory_callback) @contextlib.contextmanager def integration_test_environment(args, target, inventory_path_src): """ :type args: IntegrationConfig :type target: IntegrationTarget :type inventory_path_src: str """ ansible_config_src = args.get_ansible_config() ansible_config_relative = os.path.join(data_context().content.integration_path, '%s.cfg' % args.command) if args.no_temp_workdir or 'no/temp_workdir/' in target.aliases: display.warning('Disabling the temp work dir is a temporary debugging feature that may be removed in the future without notice.') integration_dir = os.path.join(data_context().content.root, data_context().content.integration_path) targets_dir = os.path.join(data_context().content.root, data_context().content.integration_targets_path) inventory_path = inventory_path_src ansible_config = ansible_config_src vars_file = os.path.join(data_context().content.root, data_context().content.integration_vars_path) yield IntegrationEnvironment(integration_dir, targets_dir, inventory_path, ansible_config, vars_file) return # When testing a collection, the temporary directory must reside within the collection. # This is necessary to enable support for the default collection for non-collection content (playbooks and roles). root_temp_dir = os.path.join(ResultType.TMP.path, 'integration') prefix = '%s-' % target.name suffix = u'-\u00c5\u00d1\u015a\u00cc\u03b2\u0141\u00c8' if args.no_temp_unicode or 'no/temp_unicode/' in target.aliases: display.warning('Disabling unicode in the temp work dir is a temporary debugging feature that may be removed in the future without notice.') suffix = '-ansible' if args.explain: temp_dir = os.path.join(root_temp_dir, '%stemp%s' % (prefix, suffix)) else: make_dirs(root_temp_dir) temp_dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir) try: display.info('Preparing temporary directory: %s' % temp_dir, verbosity=2) inventory_relative_path = get_inventory_relative_path(args) inventory_path = os.path.join(temp_dir, inventory_relative_path) cache = IntegrationCache(args) target_dependencies = sorted([target] + list(cache.dependency_map.get(target.name, set()))) files_needed = get_files_needed(target_dependencies) integration_dir = os.path.join(temp_dir, data_context().content.integration_path) targets_dir = os.path.join(temp_dir, data_context().content.integration_targets_path) ansible_config = os.path.join(temp_dir, ansible_config_relative) vars_file_src = os.path.join(data_context().content.root, data_context().content.integration_vars_path) vars_file = os.path.join(temp_dir, data_context().content.integration_vars_path) file_copies = [ (ansible_config_src, ansible_config), (inventory_path_src, inventory_path), ] if os.path.exists(vars_file_src): file_copies.append((vars_file_src, vars_file)) file_copies += [(path, os.path.join(temp_dir, path)) for path in files_needed] integration_targets_relative_path = data_context().content.integration_targets_path directory_copies = [ ( os.path.join(integration_targets_relative_path, target.relative_path), os.path.join(temp_dir, integration_targets_relative_path, target.relative_path) ) for target in target_dependencies ] directory_copies = sorted(set(directory_copies)) file_copies = sorted(set(file_copies)) if not args.explain: make_dirs(integration_dir) for dir_src, dir_dst in directory_copies: display.info('Copying %s/ to %s/' % (dir_src, dir_dst), verbosity=2) if not args.explain: shutil.copytree(to_bytes(dir_src), to_bytes(dir_dst), symlinks=True) for file_src, file_dst in file_copies: display.info('Copying %s to %s' % (file_src, file_dst), verbosity=2) if not args.explain: make_dirs(os.path.dirname(file_dst)) shutil.copy2(file_src, file_dst) yield IntegrationEnvironment(integration_dir, targets_dir, inventory_path, ansible_config, vars_file) finally: if not args.explain: shutil.rmtree(temp_dir) @contextlib.contextmanager def integration_test_config_file(args, env_config, integration_dir): """ :type args: IntegrationConfig :type env_config: CloudEnvironmentConfig :type integration_dir: str """ if not env_config: yield None return config_vars = (env_config.ansible_vars or {}).copy() config_vars.update(dict( ansible_test=dict( environment=env_config.env_vars, module_defaults=env_config.module_defaults, ) )) config_file = json.dumps(config_vars, indent=4, sort_keys=True) with named_temporary_file(args, 'config-file-', '.json', integration_dir, config_file) as path: filename = os.path.relpath(path, integration_dir) display.info('>>> Config File: %s\n%s' % (filename, config_file), verbosity=3) yield path class IntegrationEnvironment: """Details about the integration environment.""" def __init__(self, integration_dir, targets_dir, inventory_path, ansible_config, vars_file): self.integration_dir = integration_dir self.targets_dir = targets_dir self.inventory_path = inventory_path self.ansible_config = ansible_config self.vars_file = vars_file class IntegrationCache(CommonCache): """Integration cache.""" @property def integration_targets(self): """ :rtype: list[IntegrationTarget] """ return self.get('integration_targets', lambda: list(walk_integration_targets())) @property def dependency_map(self): """ :rtype: dict[str, set[IntegrationTarget]] """ return self.get('dependency_map', lambda: generate_dependency_map(self.integration_targets))
kvar/ansible
test/lib/ansible_test/_internal/integration/__init__.py
Python
gpl-3.0
11,813
"""Tests for the DirecTV integration.""" from homeassistant.components.directv.const import DOMAIN from homeassistant.config_entries import ConfigEntryState from homeassistant.core import HomeAssistant from tests.components.directv import setup_integration from tests.test_util.aiohttp import AiohttpClientMocker # pylint: disable=redefined-outer-name async def test_config_entry_not_ready( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker ) -> None: """Test the DirecTV configuration entry not ready.""" entry = await setup_integration(hass, aioclient_mock, setup_error=True) assert entry.state is ConfigEntryState.SETUP_RETRY async def test_unload_config_entry( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker ) -> None: """Test the DirecTV configuration entry unloading.""" entry = await setup_integration(hass, aioclient_mock) assert entry.entry_id in hass.data[DOMAIN] assert entry.state is ConfigEntryState.LOADED await hass.config_entries.async_unload(entry.entry_id) await hass.async_block_till_done() assert entry.entry_id not in hass.data[DOMAIN] assert entry.state is ConfigEntryState.NOT_LOADED
Danielhiversen/home-assistant
tests/components/directv/test_init.py
Python
apache-2.0
1,186
""" Plot spherical harmonics on the surface of the sphere, as well as a 3D polar plot. This example requires scipy. In this example we use the mlab's mesh function: :func:`mayavi.mlab.mesh`. For plotting surfaces this is a very versatile function. The surfaces can be defined as functions of a 2D grid. For each spherical harmonic, we plot its value on the surface of a sphere, and then in polar. The polar plot is simply obtained by varying the radius of the previous sphere. """ # Author: Gael Varoquaux <[email protected]> # Copyright (c) 2008, Enthought, Inc. # License: BSD Style. from mayavi import mlab import numpy as np from scipy.special import sph_harm # Create a sphere r = 0.3 pi = np.pi cos = np.cos sin = np.sin phi, theta = np.mgrid[0:pi:101j, 0:2 * pi:101j] x = r * sin(phi) * cos(theta) y = r * sin(phi) * sin(theta) z = r * cos(phi) mlab.figure(1, bgcolor=(1, 1, 1), fgcolor=(0, 0, 0), size=(400, 300)) mlab.clf() # Represent spherical harmonics on the surface of the sphere for n in range(1, 6): for m in range(n): s = sph_harm(m, n, theta, phi).real mlab.mesh(x - m, y - n, z, scalars=s, colormap='jet') s[s < 0] *= 0.97 s /= s.max() mlab.mesh(s * x - m, s * y - n, s * z + 1.3, scalars=s, colormap='Spectral') mlab.view(90, 70, 6.2, (-1.3, -2.9, 0.25)) mlab.show()
dmsurti/mayavi
examples/mayavi/mlab/spherical_harmonics.py
Python
bsd-3-clause
1,373
from __future__ import unicode_literals import os from subprocess import PIPE, Popen import sys from django.utils.encoding import force_text, DEFAULT_LOCALE_ENCODING from django.utils import six from .base import CommandError def popen_wrapper(args, os_err_exc_type=CommandError): """ Friendly wrapper around Popen. Returns stdout output, stderr output and OS status code. """ try: p = Popen(args, shell=False, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt', universal_newlines=True) except OSError as e: strerror = force_text(e.strerror, DEFAULT_LOCALE_ENCODING, strings_only=True) six.reraise(os_err_exc_type, os_err_exc_type('Error executing %s: %s' % (args[0], strerror)), sys.exc_info()[2]) output, errors = p.communicate() return ( output, force_text(errors, DEFAULT_LOCALE_ENCODING, strings_only=True), p.returncode ) def handle_extensions(extensions=('html',), ignored=('py',)): """ Organizes multiple extensions that are separated with commas or passed by using --extension/-e multiple times. Note that the .py extension is ignored here because of the way non-*.py files are handled in make_messages() (they are copied to file.ext.py files to trick xgettext to parse them as Python files). For example: running 'django-admin makemessages -e js,txt -e xhtml -a' would result in an extension list: ['.js', '.txt', '.xhtml'] >>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py']) {'.html', '.js'} >>> handle_extensions(['.html, txt,.tpl']) {'.html', '.tpl', '.txt'} """ ext_list = [] for ext in extensions: ext_list.extend(ext.replace(' ', '').split(',')) for i, ext in enumerate(ext_list): if not ext.startswith('.'): ext_list[i] = '.%s' % ext_list[i] return set(x for x in ext_list if x.strip('.') not in ignored) def find_command(cmd, path=None, pathext=None): if path is None: path = os.environ.get('PATH', '').split(os.pathsep) if isinstance(path, six.string_types): path = [path] # check if there are funny path extensions for executables, e.g. Windows if pathext is None: pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD').split(os.pathsep) # don't use extensions if the command ends with one of them for ext in pathext: if cmd.endswith(ext): pathext = [''] break # check if we find the command on PATH for p in path: f = os.path.join(p, cmd) if os.path.isfile(f): return f for ext in pathext: fext = f + ext if os.path.isfile(fext): return fext return None
simbha/mAngE-Gin
lib/django/core/management/utils.py
Python
mit
2,822
""" gmagoon 05/03/10: new class for MM4 parsing, based on mopacparser.py, which, in turn, is based on gaussianparser.py from cclib, described below: cclib (http://cclib.sf.net) is (c) 2006, the cclib development team and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). """ __revision__ = "$Revision: 814 $" #import re import numpy import math import utils import logfileparser def symbol2int(symbol): t = utils.PeriodicTable() return t.number[symbol] class MM4(logfileparser.Logfile): """An MM4 output file.""" def __init__(self, *args, **kwargs): # Call the __init__ method of the superclass super(MM4, self).__init__(logname="MM4", *args, **kwargs) def __str__(self): """Return a string representation of the object.""" return "MM4 log file %s" % (self.filename) def __repr__(self): """Return a representation of the object.""" return 'MM4("%s")' % (self.filename) def extract(self, inputfile, line): """Extract information from the file object inputfile.""" # Number of atoms. # Example: THE COORDINATES OF 20 ATOMS ARE READ IN. if line[0:28] == ' THE COORDINATES OF': self.updateprogress(inputfile, "Attributes", self.fupdate) natom = int(line.split()[-5]) #fifth to last component should be number of atoms if hasattr(self, "natom"): assert self.natom == natom else: self.natom = natom # Extract the atomic numbers and coordinates from the optimized (final) geometry # Example: # FINAL ATOMIC COORDINATE # ATOM X Y Z TYPE # C( 1) -3.21470 -0.22058 0.00000 ( 1) # H( 2) -3.30991 -0.87175 0.89724 ( 5) # H( 3) -3.30991 -0.87174 -0.89724 ( 5) # H( 4) -4.08456 0.47380 0.00000 ( 5) # C( 5) -1.88672 0.54893 0.00000 ( 1) # H( 6) -1.84759 1.21197 -0.89488 ( 5) # H( 7) -1.84759 1.21197 0.89488 ( 5) # C( 8) -0.66560 -0.38447 0.00000 ( 1) # H( 9) -0.70910 -1.04707 -0.89471 ( 5) # H( 10) -0.70910 -1.04707 0.89471 ( 5) # C( 11) 0.66560 0.38447 0.00000 ( 1) # H( 12) 0.70910 1.04707 0.89471 ( 5) # H( 13) 0.70910 1.04707 -0.89471 ( 5) # C( 14) 1.88672 -0.54893 0.00000 ( 1) # H( 15) 1.84759 -1.21197 -0.89488 ( 5) # H( 16) 1.84759 -1.21197 0.89488 ( 5) # C( 17) 3.21470 0.22058 0.00000 ( 1) # H( 18) 3.30991 0.87174 0.89724 ( 5) # H( 19) 4.08456 -0.47380 0.00000 ( 5) # H( 20) 3.30991 0.87175 -0.89724 ( 5) if line[0:29] == ' FINAL ATOMIC COORDINATE': self.updateprogress(inputfile, "Attributes", self.cupdate) self.inputcoords = [] self.inputatoms = [] headerline = inputfile.next() atomcoords = [] line = inputfile.next() while len(line.split()) > 0: broken = line.split() self.inputatoms.append(symbol2int(line[0:10].strip())) xc = float(line[17:29]) yc = float(line[29:41]) zc = float(line[41:53]) atomcoords.append([xc,yc,zc]) line = inputfile.next() self.inputcoords.append(atomcoords) if not hasattr(self, "atomnos"): self.atomnos = numpy.array(self.inputatoms, 'i') if not hasattr(self, "natom"): self.natom = len(self.atomnos) #read energy (in kcal/mol, converted to eV) # Example: HEAT OF FORMATION (HFN) AT 298.2 K = -42.51 KCAL/MOLE if line[0:31] == ' HEAT OF FORMATION (HFN) AT': if not hasattr(self, "scfenergies"): self.scfenergies = [] self.scfenergies.append(utils.convertor(self.float(line.split()[-2])/627.5095, "hartree", "eV")) #note conversion from kcal/mol to hartree #molecular mass parsing (units will be amu); note that this can occur multiple times in the file, but all values should be the same #Example: FORMULA WEIGHT : 86.112 if line[0:33] == ' FORMULA WEIGHT :': self.updateprogress(inputfile, "Attributes", self.fupdate) molmass = self.float(line.split()[-1]) if hasattr(self, "molmass"): assert self.molmass == molmass #check that subsequent occurences match the original value else: self.molmass = molmass #rotational constants (converted to GHZ) #Example: # THE MOMENTS OF INERTIA CALCULATED FROM R(g), R(z) VALUES # (also from R(e), R(alpha), R(s) VALUES) # # Note: (1) All calculations are based on principle isotopes. # (2) R(z) values include harmonic vibration (Coriolis) # contribution indicated in parentheses. # # # (1) UNIT = 10**(-39) GM*CM**2 # # IX IY IZ # # R(e) 5.7724 73.4297 76.0735 # R(z) 5.7221(-0.0518) 74.0311(-0.0285) 76.7102(-0.0064) # # (2) UNIT = AU A**2 # # IX IY IZ # # R(e) 34.7661 442.2527 458.1757 # R(z) 34.4633(-0.3117) 445.8746(-0.1714) 462.0104(-0.0385) #moments of inertia converted into rotational constants via rot cons= h/(8*Pi^2*I) #we will use the equilibrium values (R(e)) in units of 10**-39 GM*CM**2 (these units are less precise (fewer digits) than AU A**2 units but it is simpler as it doesn't require use of Avogadro's number #***even R(e) may include temperature dependent effects, though, and maybe the one I actually want is r(mm4) (not reported) if line[0:33] == ' (1) UNIT = 10**(-39) GM*CM**2': dummyline = inputfile.next(); dummyline = inputfile.next(); dummyline = inputfile.next(); rotinfo=inputfile.next(); if not hasattr(self, "rotcons"): self.rotcons = [] broken = rotinfo.split() h = 6.62606896E3 #Planck constant in 10^-37 J-s = 10^-37 kg m^2/s cf. http://physics.nist.gov/cgi-bin/cuu/Value?h#mid a = h/(8*math.pi*math.pi*float(broken[1])) b = h/(8*math.pi*math.pi*float(broken[2])) c = h/(8*math.pi*math.pi*float(broken[3])) self.rotcons.append([a, b, c]) # Start of the IR/Raman frequency section. #Example: #0 FUNDAMENTAL NORMAL VIBRATIONAL FREQUENCIES # ( THEORETICALLY 54 VALUES ) # # Frequency : in 1/cm # A(i) : IR intensity (vs,s,m,w,vw,-- or in 10**6 cm/mole) # A(i) = -- : IR inactive # # # no Frequency Symmetry A(i) # # 1. 2969.6 (Bu ) s # 2. 2969.6 (Bu ) w # 3. 2967.6 (Bu ) w # 4. 2967.6 (Bu ) s # 5. 2931.2 (Au ) vs # 6. 2927.8 (Bg ) -- # 7. 2924.9 (Au ) m # 8. 2923.6 (Bg ) -- # 9. 2885.8 (Ag ) -- # 10. 2883.9 (Bu ) w # 11. 2879.8 (Ag ) -- # 12. 2874.6 (Bu ) w # 13. 2869.6 (Ag ) -- # 14. 2869.2 (Bu ) s # 15. 1554.4 (Ag ) -- # 16. 1494.3 (Bu ) w # 17. 1449.7 (Bg ) -- # 18. 1449.5 (Au ) w # 19. 1444.8 (Ag ) -- # 20. 1438.5 (Bu ) w # 21. 1421.5 (Ag ) -- # 22. 1419.3 (Ag ) -- # 23. 1416.5 (Bu ) w # 24. 1398.8 (Bu ) w # 25. 1383.9 (Ag ) -- # 26. 1363.7 (Bu ) m # 27. 1346.3 (Ag ) -- # 28. 1300.2 (Au ) vw # 29. 1298.7 (Bg ) -- # 30. 1283.4 (Bu ) m # 31. 1267.4 (Bg ) -- # 32. 1209.6 (Au ) w # 33. 1132.2 (Bg ) -- # 34. 1094.4 (Ag ) -- # 35. 1063.4 (Bu ) w # 36. 1017.8 (Bu ) w # 37. 1011.6 (Ag ) -- # 38. 1004.2 (Au ) w # 39. 990.2 (Ag ) -- # 40. 901.8 (Ag ) -- # 41. 898.4 (Bg ) -- # 42. 875.9 (Bu ) w # 43. 795.4 (Au ) w # 44. 725.0 (Bg ) -- # 45. 699.6 (Au ) w # 46. 453.4 (Bu ) w # 47. 352.1 (Ag ) -- # 48. 291.1 (Ag ) -- # 49. 235.9 (Au ) vw # 50. 225.2 (Bg ) -- # 51. 151.6 (Bg ) -- # 52. 147.7 (Bu ) w # 53. 108.0 (Au ) vw # 54. 77.1 (Au ) vw # 55. ( 0.0) (t/r ) # 56. ( 0.0) (t/r ) # 57. ( 0.0) (t/r ) # 58. ( 0.0) (t/r ) # 59. ( 0.0) (t/r ) # 60. ( 0.0) (t/r ) if line[0:52] == ' no Frequency Symmetry A(i)': blankline = inputfile.next() self.updateprogress(inputfile, "Frequency Information", self.fupdate) if not hasattr(self, 'vibfreqs'): self.vibfreqs = [] line = inputfile.next() while(line[15:31].find('(') < 0):#terminate once we reach zero frequencies (which include parentheses) freq = self.float(line[15:31]) self.vibfreqs.append(freq) line = inputfile.next() #parsing of final steric energy in eV (for purposes of providing a baseline for possible subsequent hindered rotor calculations) #example line:" FINAL STERIC ENERGY IS 0.8063 KCAL/MOL." if line[6:28] == 'FINAL STERIC ENERGY IS': stericenergy = utils.convertor(self.float(line.split()[4])/627.5095, "hartree", "eV") #note conversion from kcal/mol to hartree if hasattr(self, "stericenergy"): assert self.stericenergy == stericenergy #check that subsequent occurences match the original value else: self.stericenergy = stericenergy if __name__ == "__main__": import doctest, mm4parser doctest.testmod(mm4parser, verbose=False)
faribas/RMG-Java
source/cclib/parser/mm4parser.py
Python
mit
11,929
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from google.net.proto import ProtocolBuffer import array import dummy_thread as thread __pychecker__ = """maxreturns=0 maxbranches=0 no-callinit unusednames=printElemNumber,debug_strs no-special""" if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'): _extension_runtime = True _ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage else: _extension_runtime = False _ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage from google.appengine.datastore.entity_pb import EntityProto class SchemaEntry(ProtocolBuffer.ProtocolMessage): STRING = 1 INT32 = 2 BOOLEAN = 3 DOUBLE = 4 POINT = 5 USER = 6 REFERENCE = 7 _Type_NAMES = { 1: "STRING", 2: "INT32", 3: "BOOLEAN", 4: "DOUBLE", 5: "POINT", 6: "USER", 7: "REFERENCE", } def Type_Name(cls, x): return cls._Type_NAMES.get(x, "") Type_Name = classmethod(Type_Name) has_name_ = 0 name_ = "" has_type_ = 0 type_ = 0 has_meaning_ = 0 meaning_ = 0 def __init__(self, contents=None): if contents is not None: self.MergeFromString(contents) def name(self): return self.name_ def set_name(self, x): self.has_name_ = 1 self.name_ = x def clear_name(self): if self.has_name_: self.has_name_ = 0 self.name_ = "" def has_name(self): return self.has_name_ def type(self): return self.type_ def set_type(self, x): self.has_type_ = 1 self.type_ = x def clear_type(self): if self.has_type_: self.has_type_ = 0 self.type_ = 0 def has_type(self): return self.has_type_ def meaning(self): return self.meaning_ def set_meaning(self, x): self.has_meaning_ = 1 self.meaning_ = x def clear_meaning(self): if self.has_meaning_: self.has_meaning_ = 0 self.meaning_ = 0 def has_meaning(self): return self.has_meaning_ def MergeFrom(self, x): assert x is not self if (x.has_name()): self.set_name(x.name()) if (x.has_type()): self.set_type(x.type()) if (x.has_meaning()): self.set_meaning(x.meaning()) def Equals(self, x): if x is self: return 1 if self.has_name_ != x.has_name_: return 0 if self.has_name_ and self.name_ != x.name_: return 0 if self.has_type_ != x.has_type_: return 0 if self.has_type_ and self.type_ != x.type_: return 0 if self.has_meaning_ != x.has_meaning_: return 0 if self.has_meaning_ and self.meaning_ != x.meaning_: return 0 return 1 def IsInitialized(self, debug_strs=None): initialized = 1 if (not self.has_name_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: name not set.') if (not self.has_type_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: type not set.') return initialized def ByteSize(self): n = 0 n += self.lengthString(len(self.name_)) n += self.lengthVarInt64(self.type_) if (self.has_meaning_): n += 1 + self.lengthVarInt64(self.meaning_) return n + 2 def ByteSizePartial(self): n = 0 if (self.has_name_): n += 1 n += self.lengthString(len(self.name_)) if (self.has_type_): n += 1 n += self.lengthVarInt64(self.type_) if (self.has_meaning_): n += 1 + self.lengthVarInt64(self.meaning_) return n def Clear(self): self.clear_name() self.clear_type() self.clear_meaning() def OutputUnchecked(self, out): out.putVarInt32(10) out.putPrefixedString(self.name_) out.putVarInt32(16) out.putVarInt32(self.type_) if (self.has_meaning_): out.putVarInt32(24) out.putVarInt32(self.meaning_) def OutputPartial(self, out): if (self.has_name_): out.putVarInt32(10) out.putPrefixedString(self.name_) if (self.has_type_): out.putVarInt32(16) out.putVarInt32(self.type_) if (self.has_meaning_): out.putVarInt32(24) out.putVarInt32(self.meaning_) def TryMerge(self, d): while d.avail() > 0: tt = d.getVarInt32() if tt == 10: self.set_name(d.getPrefixedString()) continue if tt == 16: self.set_type(d.getVarInt32()) continue if tt == 24: self.set_meaning(d.getVarInt32()) continue if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError d.skipData(tt) def __str__(self, prefix="", printElemNumber=0): res="" if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_)) if self.has_type_: res+=prefix+("type: %s\n" % self.DebugFormatInt32(self.type_)) if self.has_meaning_: res+=prefix+("meaning: %s\n" % self.DebugFormatInt32(self.meaning_)) return res def _BuildTagLookupTable(sparse, maxtag, default=None): return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) kname = 1 ktype = 2 kmeaning = 3 _TEXT = _BuildTagLookupTable({ 0: "ErrorCode", 1: "name", 2: "type", 3: "meaning", }, 3) _TYPES = _BuildTagLookupTable({ 0: ProtocolBuffer.Encoder.NUMERIC, 1: ProtocolBuffer.Encoder.STRING, 2: ProtocolBuffer.Encoder.NUMERIC, 3: ProtocolBuffer.Encoder.NUMERIC, }, 3, ProtocolBuffer.Encoder.MAX_TYPE) _STYLE = """""" _STYLE_CONTENT_TYPE = """""" _PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.SchemaEntry' class SubscribeRequest(ProtocolBuffer.ProtocolMessage): has_topic_ = 0 topic_ = "" has_sub_id_ = 0 sub_id_ = "" has_lease_duration_sec_ = 0 lease_duration_sec_ = 0.0 has_vanilla_query_ = 0 vanilla_query_ = "" def __init__(self, contents=None): self.schema_entry_ = [] if contents is not None: self.MergeFromString(contents) def topic(self): return self.topic_ def set_topic(self, x): self.has_topic_ = 1 self.topic_ = x def clear_topic(self): if self.has_topic_: self.has_topic_ = 0 self.topic_ = "" def has_topic(self): return self.has_topic_ def sub_id(self): return self.sub_id_ def set_sub_id(self, x): self.has_sub_id_ = 1 self.sub_id_ = x def clear_sub_id(self): if self.has_sub_id_: self.has_sub_id_ = 0 self.sub_id_ = "" def has_sub_id(self): return self.has_sub_id_ def lease_duration_sec(self): return self.lease_duration_sec_ def set_lease_duration_sec(self, x): self.has_lease_duration_sec_ = 1 self.lease_duration_sec_ = x def clear_lease_duration_sec(self): if self.has_lease_duration_sec_: self.has_lease_duration_sec_ = 0 self.lease_duration_sec_ = 0.0 def has_lease_duration_sec(self): return self.has_lease_duration_sec_ def vanilla_query(self): return self.vanilla_query_ def set_vanilla_query(self, x): self.has_vanilla_query_ = 1 self.vanilla_query_ = x def clear_vanilla_query(self): if self.has_vanilla_query_: self.has_vanilla_query_ = 0 self.vanilla_query_ = "" def has_vanilla_query(self): return self.has_vanilla_query_ def schema_entry_size(self): return len(self.schema_entry_) def schema_entry_list(self): return self.schema_entry_ def schema_entry(self, i): return self.schema_entry_[i] def mutable_schema_entry(self, i): return self.schema_entry_[i] def add_schema_entry(self): x = SchemaEntry() self.schema_entry_.append(x) return x def clear_schema_entry(self): self.schema_entry_ = [] def MergeFrom(self, x): assert x is not self if (x.has_topic()): self.set_topic(x.topic()) if (x.has_sub_id()): self.set_sub_id(x.sub_id()) if (x.has_lease_duration_sec()): self.set_lease_duration_sec(x.lease_duration_sec()) if (x.has_vanilla_query()): self.set_vanilla_query(x.vanilla_query()) for i in xrange(x.schema_entry_size()): self.add_schema_entry().CopyFrom(x.schema_entry(i)) def Equals(self, x): if x is self: return 1 if self.has_topic_ != x.has_topic_: return 0 if self.has_topic_ and self.topic_ != x.topic_: return 0 if self.has_sub_id_ != x.has_sub_id_: return 0 if self.has_sub_id_ and self.sub_id_ != x.sub_id_: return 0 if self.has_lease_duration_sec_ != x.has_lease_duration_sec_: return 0 if self.has_lease_duration_sec_ and self.lease_duration_sec_ != x.lease_duration_sec_: return 0 if self.has_vanilla_query_ != x.has_vanilla_query_: return 0 if self.has_vanilla_query_ and self.vanilla_query_ != x.vanilla_query_: return 0 if len(self.schema_entry_) != len(x.schema_entry_): return 0 for e1, e2 in zip(self.schema_entry_, x.schema_entry_): if e1 != e2: return 0 return 1 def IsInitialized(self, debug_strs=None): initialized = 1 if (not self.has_topic_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: topic not set.') if (not self.has_sub_id_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: sub_id not set.') if (not self.has_lease_duration_sec_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: lease_duration_sec not set.') if (not self.has_vanilla_query_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: vanilla_query not set.') for p in self.schema_entry_: if not p.IsInitialized(debug_strs): initialized=0 return initialized def ByteSize(self): n = 0 n += self.lengthString(len(self.topic_)) n += self.lengthString(len(self.sub_id_)) n += self.lengthString(len(self.vanilla_query_)) n += 1 * len(self.schema_entry_) for i in xrange(len(self.schema_entry_)): n += self.lengthString(self.schema_entry_[i].ByteSize()) return n + 12 def ByteSizePartial(self): n = 0 if (self.has_topic_): n += 1 n += self.lengthString(len(self.topic_)) if (self.has_sub_id_): n += 1 n += self.lengthString(len(self.sub_id_)) if (self.has_lease_duration_sec_): n += 9 if (self.has_vanilla_query_): n += 1 n += self.lengthString(len(self.vanilla_query_)) n += 1 * len(self.schema_entry_) for i in xrange(len(self.schema_entry_)): n += self.lengthString(self.schema_entry_[i].ByteSizePartial()) return n def Clear(self): self.clear_topic() self.clear_sub_id() self.clear_lease_duration_sec() self.clear_vanilla_query() self.clear_schema_entry() def OutputUnchecked(self, out): out.putVarInt32(10) out.putPrefixedString(self.topic_) out.putVarInt32(18) out.putPrefixedString(self.sub_id_) out.putVarInt32(25) out.putDouble(self.lease_duration_sec_) out.putVarInt32(34) out.putPrefixedString(self.vanilla_query_) for i in xrange(len(self.schema_entry_)): out.putVarInt32(42) out.putVarInt32(self.schema_entry_[i].ByteSize()) self.schema_entry_[i].OutputUnchecked(out) def OutputPartial(self, out): if (self.has_topic_): out.putVarInt32(10) out.putPrefixedString(self.topic_) if (self.has_sub_id_): out.putVarInt32(18) out.putPrefixedString(self.sub_id_) if (self.has_lease_duration_sec_): out.putVarInt32(25) out.putDouble(self.lease_duration_sec_) if (self.has_vanilla_query_): out.putVarInt32(34) out.putPrefixedString(self.vanilla_query_) for i in xrange(len(self.schema_entry_)): out.putVarInt32(42) out.putVarInt32(self.schema_entry_[i].ByteSizePartial()) self.schema_entry_[i].OutputPartial(out) def TryMerge(self, d): while d.avail() > 0: tt = d.getVarInt32() if tt == 10: self.set_topic(d.getPrefixedString()) continue if tt == 18: self.set_sub_id(d.getPrefixedString()) continue if tt == 25: self.set_lease_duration_sec(d.getDouble()) continue if tt == 34: self.set_vanilla_query(d.getPrefixedString()) continue if tt == 42: length = d.getVarInt32() tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length) d.skip(length) self.add_schema_entry().TryMerge(tmp) continue if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError d.skipData(tt) def __str__(self, prefix="", printElemNumber=0): res="" if self.has_topic_: res+=prefix+("topic: %s\n" % self.DebugFormatString(self.topic_)) if self.has_sub_id_: res+=prefix+("sub_id: %s\n" % self.DebugFormatString(self.sub_id_)) if self.has_lease_duration_sec_: res+=prefix+("lease_duration_sec: %s\n" % self.DebugFormat(self.lease_duration_sec_)) if self.has_vanilla_query_: res+=prefix+("vanilla_query: %s\n" % self.DebugFormatString(self.vanilla_query_)) cnt=0 for e in self.schema_entry_: elm="" if printElemNumber: elm="(%d)" % cnt res+=prefix+("schema_entry%s <\n" % elm) res+=e.__str__(prefix + " ", printElemNumber) res+=prefix+">\n" cnt+=1 return res def _BuildTagLookupTable(sparse, maxtag, default=None): return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) ktopic = 1 ksub_id = 2 klease_duration_sec = 3 kvanilla_query = 4 kschema_entry = 5 _TEXT = _BuildTagLookupTable({ 0: "ErrorCode", 1: "topic", 2: "sub_id", 3: "lease_duration_sec", 4: "vanilla_query", 5: "schema_entry", }, 5) _TYPES = _BuildTagLookupTable({ 0: ProtocolBuffer.Encoder.NUMERIC, 1: ProtocolBuffer.Encoder.STRING, 2: ProtocolBuffer.Encoder.STRING, 3: ProtocolBuffer.Encoder.DOUBLE, 4: ProtocolBuffer.Encoder.STRING, 5: ProtocolBuffer.Encoder.STRING, }, 5, ProtocolBuffer.Encoder.MAX_TYPE) _STYLE = """""" _STYLE_CONTENT_TYPE = """""" _PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.SubscribeRequest' class SubscribeResponse(ProtocolBuffer.ProtocolMessage): def __init__(self, contents=None): pass if contents is not None: self.MergeFromString(contents) def MergeFrom(self, x): assert x is not self def Equals(self, x): if x is self: return 1 return 1 def IsInitialized(self, debug_strs=None): initialized = 1 return initialized def ByteSize(self): n = 0 return n def ByteSizePartial(self): n = 0 return n def Clear(self): pass def OutputUnchecked(self, out): pass def OutputPartial(self, out): pass def TryMerge(self, d): while d.avail() > 0: tt = d.getVarInt32() if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError d.skipData(tt) def __str__(self, prefix="", printElemNumber=0): res="" return res def _BuildTagLookupTable(sparse, maxtag, default=None): return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) _TEXT = _BuildTagLookupTable({ 0: "ErrorCode", }, 0) _TYPES = _BuildTagLookupTable({ 0: ProtocolBuffer.Encoder.NUMERIC, }, 0, ProtocolBuffer.Encoder.MAX_TYPE) _STYLE = """""" _STYLE_CONTENT_TYPE = """""" _PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.SubscribeResponse' class UnsubscribeRequest(ProtocolBuffer.ProtocolMessage): has_topic_ = 0 topic_ = "" has_sub_id_ = 0 sub_id_ = "" def __init__(self, contents=None): if contents is not None: self.MergeFromString(contents) def topic(self): return self.topic_ def set_topic(self, x): self.has_topic_ = 1 self.topic_ = x def clear_topic(self): if self.has_topic_: self.has_topic_ = 0 self.topic_ = "" def has_topic(self): return self.has_topic_ def sub_id(self): return self.sub_id_ def set_sub_id(self, x): self.has_sub_id_ = 1 self.sub_id_ = x def clear_sub_id(self): if self.has_sub_id_: self.has_sub_id_ = 0 self.sub_id_ = "" def has_sub_id(self): return self.has_sub_id_ def MergeFrom(self, x): assert x is not self if (x.has_topic()): self.set_topic(x.topic()) if (x.has_sub_id()): self.set_sub_id(x.sub_id()) def Equals(self, x): if x is self: return 1 if self.has_topic_ != x.has_topic_: return 0 if self.has_topic_ and self.topic_ != x.topic_: return 0 if self.has_sub_id_ != x.has_sub_id_: return 0 if self.has_sub_id_ and self.sub_id_ != x.sub_id_: return 0 return 1 def IsInitialized(self, debug_strs=None): initialized = 1 if (not self.has_topic_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: topic not set.') if (not self.has_sub_id_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: sub_id not set.') return initialized def ByteSize(self): n = 0 n += self.lengthString(len(self.topic_)) n += self.lengthString(len(self.sub_id_)) return n + 2 def ByteSizePartial(self): n = 0 if (self.has_topic_): n += 1 n += self.lengthString(len(self.topic_)) if (self.has_sub_id_): n += 1 n += self.lengthString(len(self.sub_id_)) return n def Clear(self): self.clear_topic() self.clear_sub_id() def OutputUnchecked(self, out): out.putVarInt32(10) out.putPrefixedString(self.topic_) out.putVarInt32(18) out.putPrefixedString(self.sub_id_) def OutputPartial(self, out): if (self.has_topic_): out.putVarInt32(10) out.putPrefixedString(self.topic_) if (self.has_sub_id_): out.putVarInt32(18) out.putPrefixedString(self.sub_id_) def TryMerge(self, d): while d.avail() > 0: tt = d.getVarInt32() if tt == 10: self.set_topic(d.getPrefixedString()) continue if tt == 18: self.set_sub_id(d.getPrefixedString()) continue if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError d.skipData(tt) def __str__(self, prefix="", printElemNumber=0): res="" if self.has_topic_: res+=prefix+("topic: %s\n" % self.DebugFormatString(self.topic_)) if self.has_sub_id_: res+=prefix+("sub_id: %s\n" % self.DebugFormatString(self.sub_id_)) return res def _BuildTagLookupTable(sparse, maxtag, default=None): return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) ktopic = 1 ksub_id = 2 _TEXT = _BuildTagLookupTable({ 0: "ErrorCode", 1: "topic", 2: "sub_id", }, 2) _TYPES = _BuildTagLookupTable({ 0: ProtocolBuffer.Encoder.NUMERIC, 1: ProtocolBuffer.Encoder.STRING, 2: ProtocolBuffer.Encoder.STRING, }, 2, ProtocolBuffer.Encoder.MAX_TYPE) _STYLE = """""" _STYLE_CONTENT_TYPE = """""" _PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.UnsubscribeRequest' class UnsubscribeResponse(ProtocolBuffer.ProtocolMessage): def __init__(self, contents=None): pass if contents is not None: self.MergeFromString(contents) def MergeFrom(self, x): assert x is not self def Equals(self, x): if x is self: return 1 return 1 def IsInitialized(self, debug_strs=None): initialized = 1 return initialized def ByteSize(self): n = 0 return n def ByteSizePartial(self): n = 0 return n def Clear(self): pass def OutputUnchecked(self, out): pass def OutputPartial(self, out): pass def TryMerge(self, d): while d.avail() > 0: tt = d.getVarInt32() if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError d.skipData(tt) def __str__(self, prefix="", printElemNumber=0): res="" return res def _BuildTagLookupTable(sparse, maxtag, default=None): return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) _TEXT = _BuildTagLookupTable({ 0: "ErrorCode", }, 0) _TYPES = _BuildTagLookupTable({ 0: ProtocolBuffer.Encoder.NUMERIC, }, 0, ProtocolBuffer.Encoder.MAX_TYPE) _STYLE = """""" _STYLE_CONTENT_TYPE = """""" _PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.UnsubscribeResponse' class SubscriptionRecord(ProtocolBuffer.ProtocolMessage): OK = 0 PENDING = 1 ERROR = 2 _State_NAMES = { 0: "OK", 1: "PENDING", 2: "ERROR", } def State_Name(cls, x): return cls._State_NAMES.get(x, "") State_Name = classmethod(State_Name) has_id_ = 0 id_ = "" has_vanilla_query_ = 0 vanilla_query_ = "" has_expiration_time_sec_ = 0 expiration_time_sec_ = 0.0 has_state_ = 0 state_ = 0 has_error_message_ = 0 error_message_ = "" def __init__(self, contents=None): if contents is not None: self.MergeFromString(contents) def id(self): return self.id_ def set_id(self, x): self.has_id_ = 1 self.id_ = x def clear_id(self): if self.has_id_: self.has_id_ = 0 self.id_ = "" def has_id(self): return self.has_id_ def vanilla_query(self): return self.vanilla_query_ def set_vanilla_query(self, x): self.has_vanilla_query_ = 1 self.vanilla_query_ = x def clear_vanilla_query(self): if self.has_vanilla_query_: self.has_vanilla_query_ = 0 self.vanilla_query_ = "" def has_vanilla_query(self): return self.has_vanilla_query_ def expiration_time_sec(self): return self.expiration_time_sec_ def set_expiration_time_sec(self, x): self.has_expiration_time_sec_ = 1 self.expiration_time_sec_ = x def clear_expiration_time_sec(self): if self.has_expiration_time_sec_: self.has_expiration_time_sec_ = 0 self.expiration_time_sec_ = 0.0 def has_expiration_time_sec(self): return self.has_expiration_time_sec_ def state(self): return self.state_ def set_state(self, x): self.has_state_ = 1 self.state_ = x def clear_state(self): if self.has_state_: self.has_state_ = 0 self.state_ = 0 def has_state(self): return self.has_state_ def error_message(self): return self.error_message_ def set_error_message(self, x): self.has_error_message_ = 1 self.error_message_ = x def clear_error_message(self): if self.has_error_message_: self.has_error_message_ = 0 self.error_message_ = "" def has_error_message(self): return self.has_error_message_ def MergeFrom(self, x): assert x is not self if (x.has_id()): self.set_id(x.id()) if (x.has_vanilla_query()): self.set_vanilla_query(x.vanilla_query()) if (x.has_expiration_time_sec()): self.set_expiration_time_sec(x.expiration_time_sec()) if (x.has_state()): self.set_state(x.state()) if (x.has_error_message()): self.set_error_message(x.error_message()) def Equals(self, x): if x is self: return 1 if self.has_id_ != x.has_id_: return 0 if self.has_id_ and self.id_ != x.id_: return 0 if self.has_vanilla_query_ != x.has_vanilla_query_: return 0 if self.has_vanilla_query_ and self.vanilla_query_ != x.vanilla_query_: return 0 if self.has_expiration_time_sec_ != x.has_expiration_time_sec_: return 0 if self.has_expiration_time_sec_ and self.expiration_time_sec_ != x.expiration_time_sec_: return 0 if self.has_state_ != x.has_state_: return 0 if self.has_state_ and self.state_ != x.state_: return 0 if self.has_error_message_ != x.has_error_message_: return 0 if self.has_error_message_ and self.error_message_ != x.error_message_: return 0 return 1 def IsInitialized(self, debug_strs=None): initialized = 1 if (not self.has_id_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: id not set.') if (not self.has_vanilla_query_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: vanilla_query not set.') if (not self.has_expiration_time_sec_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: expiration_time_sec not set.') if (not self.has_state_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: state not set.') return initialized def ByteSize(self): n = 0 n += self.lengthString(len(self.id_)) n += self.lengthString(len(self.vanilla_query_)) n += self.lengthVarInt64(self.state_) if (self.has_error_message_): n += 1 + self.lengthString(len(self.error_message_)) return n + 12 def ByteSizePartial(self): n = 0 if (self.has_id_): n += 1 n += self.lengthString(len(self.id_)) if (self.has_vanilla_query_): n += 1 n += self.lengthString(len(self.vanilla_query_)) if (self.has_expiration_time_sec_): n += 9 if (self.has_state_): n += 1 n += self.lengthVarInt64(self.state_) if (self.has_error_message_): n += 1 + self.lengthString(len(self.error_message_)) return n def Clear(self): self.clear_id() self.clear_vanilla_query() self.clear_expiration_time_sec() self.clear_state() self.clear_error_message() def OutputUnchecked(self, out): out.putVarInt32(10) out.putPrefixedString(self.id_) out.putVarInt32(18) out.putPrefixedString(self.vanilla_query_) out.putVarInt32(25) out.putDouble(self.expiration_time_sec_) out.putVarInt32(32) out.putVarInt32(self.state_) if (self.has_error_message_): out.putVarInt32(42) out.putPrefixedString(self.error_message_) def OutputPartial(self, out): if (self.has_id_): out.putVarInt32(10) out.putPrefixedString(self.id_) if (self.has_vanilla_query_): out.putVarInt32(18) out.putPrefixedString(self.vanilla_query_) if (self.has_expiration_time_sec_): out.putVarInt32(25) out.putDouble(self.expiration_time_sec_) if (self.has_state_): out.putVarInt32(32) out.putVarInt32(self.state_) if (self.has_error_message_): out.putVarInt32(42) out.putPrefixedString(self.error_message_) def TryMerge(self, d): while d.avail() > 0: tt = d.getVarInt32() if tt == 10: self.set_id(d.getPrefixedString()) continue if tt == 18: self.set_vanilla_query(d.getPrefixedString()) continue if tt == 25: self.set_expiration_time_sec(d.getDouble()) continue if tt == 32: self.set_state(d.getVarInt32()) continue if tt == 42: self.set_error_message(d.getPrefixedString()) continue if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError d.skipData(tt) def __str__(self, prefix="", printElemNumber=0): res="" if self.has_id_: res+=prefix+("id: %s\n" % self.DebugFormatString(self.id_)) if self.has_vanilla_query_: res+=prefix+("vanilla_query: %s\n" % self.DebugFormatString(self.vanilla_query_)) if self.has_expiration_time_sec_: res+=prefix+("expiration_time_sec: %s\n" % self.DebugFormat(self.expiration_time_sec_)) if self.has_state_: res+=prefix+("state: %s\n" % self.DebugFormatInt32(self.state_)) if self.has_error_message_: res+=prefix+("error_message: %s\n" % self.DebugFormatString(self.error_message_)) return res def _BuildTagLookupTable(sparse, maxtag, default=None): return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) kid = 1 kvanilla_query = 2 kexpiration_time_sec = 3 kstate = 4 kerror_message = 5 _TEXT = _BuildTagLookupTable({ 0: "ErrorCode", 1: "id", 2: "vanilla_query", 3: "expiration_time_sec", 4: "state", 5: "error_message", }, 5) _TYPES = _BuildTagLookupTable({ 0: ProtocolBuffer.Encoder.NUMERIC, 1: ProtocolBuffer.Encoder.STRING, 2: ProtocolBuffer.Encoder.STRING, 3: ProtocolBuffer.Encoder.DOUBLE, 4: ProtocolBuffer.Encoder.NUMERIC, 5: ProtocolBuffer.Encoder.STRING, }, 5, ProtocolBuffer.Encoder.MAX_TYPE) _STYLE = """""" _STYLE_CONTENT_TYPE = """""" _PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.SubscriptionRecord' class ListSubscriptionsRequest(ProtocolBuffer.ProtocolMessage): has_topic_ = 0 topic_ = "" has_max_results_ = 0 max_results_ = 1000 has_expires_before_ = 0 expires_before_ = 0 has_subscription_id_start_ = 0 subscription_id_start_ = "" has_app_id_ = 0 app_id_ = "" def __init__(self, contents=None): if contents is not None: self.MergeFromString(contents) def topic(self): return self.topic_ def set_topic(self, x): self.has_topic_ = 1 self.topic_ = x def clear_topic(self): if self.has_topic_: self.has_topic_ = 0 self.topic_ = "" def has_topic(self): return self.has_topic_ def max_results(self): return self.max_results_ def set_max_results(self, x): self.has_max_results_ = 1 self.max_results_ = x def clear_max_results(self): if self.has_max_results_: self.has_max_results_ = 0 self.max_results_ = 1000 def has_max_results(self): return self.has_max_results_ def expires_before(self): return self.expires_before_ def set_expires_before(self, x): self.has_expires_before_ = 1 self.expires_before_ = x def clear_expires_before(self): if self.has_expires_before_: self.has_expires_before_ = 0 self.expires_before_ = 0 def has_expires_before(self): return self.has_expires_before_ def subscription_id_start(self): return self.subscription_id_start_ def set_subscription_id_start(self, x): self.has_subscription_id_start_ = 1 self.subscription_id_start_ = x def clear_subscription_id_start(self): if self.has_subscription_id_start_: self.has_subscription_id_start_ = 0 self.subscription_id_start_ = "" def has_subscription_id_start(self): return self.has_subscription_id_start_ def app_id(self): return self.app_id_ def set_app_id(self, x): self.has_app_id_ = 1 self.app_id_ = x def clear_app_id(self): if self.has_app_id_: self.has_app_id_ = 0 self.app_id_ = "" def has_app_id(self): return self.has_app_id_ def MergeFrom(self, x): assert x is not self if (x.has_topic()): self.set_topic(x.topic()) if (x.has_max_results()): self.set_max_results(x.max_results()) if (x.has_expires_before()): self.set_expires_before(x.expires_before()) if (x.has_subscription_id_start()): self.set_subscription_id_start(x.subscription_id_start()) if (x.has_app_id()): self.set_app_id(x.app_id()) def Equals(self, x): if x is self: return 1 if self.has_topic_ != x.has_topic_: return 0 if self.has_topic_ and self.topic_ != x.topic_: return 0 if self.has_max_results_ != x.has_max_results_: return 0 if self.has_max_results_ and self.max_results_ != x.max_results_: return 0 if self.has_expires_before_ != x.has_expires_before_: return 0 if self.has_expires_before_ and self.expires_before_ != x.expires_before_: return 0 if self.has_subscription_id_start_ != x.has_subscription_id_start_: return 0 if self.has_subscription_id_start_ and self.subscription_id_start_ != x.subscription_id_start_: return 0 if self.has_app_id_ != x.has_app_id_: return 0 if self.has_app_id_ and self.app_id_ != x.app_id_: return 0 return 1 def IsInitialized(self, debug_strs=None): initialized = 1 if (not self.has_topic_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: topic not set.') return initialized def ByteSize(self): n = 0 n += self.lengthString(len(self.topic_)) if (self.has_max_results_): n += 1 + self.lengthVarInt64(self.max_results_) if (self.has_expires_before_): n += 1 + self.lengthVarInt64(self.expires_before_) if (self.has_subscription_id_start_): n += 1 + self.lengthString(len(self.subscription_id_start_)) if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_)) return n + 1 def ByteSizePartial(self): n = 0 if (self.has_topic_): n += 1 n += self.lengthString(len(self.topic_)) if (self.has_max_results_): n += 1 + self.lengthVarInt64(self.max_results_) if (self.has_expires_before_): n += 1 + self.lengthVarInt64(self.expires_before_) if (self.has_subscription_id_start_): n += 1 + self.lengthString(len(self.subscription_id_start_)) if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_)) return n def Clear(self): self.clear_topic() self.clear_max_results() self.clear_expires_before() self.clear_subscription_id_start() self.clear_app_id() def OutputUnchecked(self, out): out.putVarInt32(10) out.putPrefixedString(self.topic_) if (self.has_max_results_): out.putVarInt32(16) out.putVarInt64(self.max_results_) if (self.has_expires_before_): out.putVarInt32(24) out.putVarInt64(self.expires_before_) if (self.has_subscription_id_start_): out.putVarInt32(34) out.putPrefixedString(self.subscription_id_start_) if (self.has_app_id_): out.putVarInt32(42) out.putPrefixedString(self.app_id_) def OutputPartial(self, out): if (self.has_topic_): out.putVarInt32(10) out.putPrefixedString(self.topic_) if (self.has_max_results_): out.putVarInt32(16) out.putVarInt64(self.max_results_) if (self.has_expires_before_): out.putVarInt32(24) out.putVarInt64(self.expires_before_) if (self.has_subscription_id_start_): out.putVarInt32(34) out.putPrefixedString(self.subscription_id_start_) if (self.has_app_id_): out.putVarInt32(42) out.putPrefixedString(self.app_id_) def TryMerge(self, d): while d.avail() > 0: tt = d.getVarInt32() if tt == 10: self.set_topic(d.getPrefixedString()) continue if tt == 16: self.set_max_results(d.getVarInt64()) continue if tt == 24: self.set_expires_before(d.getVarInt64()) continue if tt == 34: self.set_subscription_id_start(d.getPrefixedString()) continue if tt == 42: self.set_app_id(d.getPrefixedString()) continue if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError d.skipData(tt) def __str__(self, prefix="", printElemNumber=0): res="" if self.has_topic_: res+=prefix+("topic: %s\n" % self.DebugFormatString(self.topic_)) if self.has_max_results_: res+=prefix+("max_results: %s\n" % self.DebugFormatInt64(self.max_results_)) if self.has_expires_before_: res+=prefix+("expires_before: %s\n" % self.DebugFormatInt64(self.expires_before_)) if self.has_subscription_id_start_: res+=prefix+("subscription_id_start: %s\n" % self.DebugFormatString(self.subscription_id_start_)) if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_)) return res def _BuildTagLookupTable(sparse, maxtag, default=None): return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) ktopic = 1 kmax_results = 2 kexpires_before = 3 ksubscription_id_start = 4 kapp_id = 5 _TEXT = _BuildTagLookupTable({ 0: "ErrorCode", 1: "topic", 2: "max_results", 3: "expires_before", 4: "subscription_id_start", 5: "app_id", }, 5) _TYPES = _BuildTagLookupTable({ 0: ProtocolBuffer.Encoder.NUMERIC, 1: ProtocolBuffer.Encoder.STRING, 2: ProtocolBuffer.Encoder.NUMERIC, 3: ProtocolBuffer.Encoder.NUMERIC, 4: ProtocolBuffer.Encoder.STRING, 5: ProtocolBuffer.Encoder.STRING, }, 5, ProtocolBuffer.Encoder.MAX_TYPE) _STYLE = """""" _STYLE_CONTENT_TYPE = """""" _PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.ListSubscriptionsRequest' class ListSubscriptionsResponse(ProtocolBuffer.ProtocolMessage): def __init__(self, contents=None): self.subscription_ = [] if contents is not None: self.MergeFromString(contents) def subscription_size(self): return len(self.subscription_) def subscription_list(self): return self.subscription_ def subscription(self, i): return self.subscription_[i] def mutable_subscription(self, i): return self.subscription_[i] def add_subscription(self): x = SubscriptionRecord() self.subscription_.append(x) return x def clear_subscription(self): self.subscription_ = [] def MergeFrom(self, x): assert x is not self for i in xrange(x.subscription_size()): self.add_subscription().CopyFrom(x.subscription(i)) def Equals(self, x): if x is self: return 1 if len(self.subscription_) != len(x.subscription_): return 0 for e1, e2 in zip(self.subscription_, x.subscription_): if e1 != e2: return 0 return 1 def IsInitialized(self, debug_strs=None): initialized = 1 for p in self.subscription_: if not p.IsInitialized(debug_strs): initialized=0 return initialized def ByteSize(self): n = 0 n += 1 * len(self.subscription_) for i in xrange(len(self.subscription_)): n += self.lengthString(self.subscription_[i].ByteSize()) return n def ByteSizePartial(self): n = 0 n += 1 * len(self.subscription_) for i in xrange(len(self.subscription_)): n += self.lengthString(self.subscription_[i].ByteSizePartial()) return n def Clear(self): self.clear_subscription() def OutputUnchecked(self, out): for i in xrange(len(self.subscription_)): out.putVarInt32(10) out.putVarInt32(self.subscription_[i].ByteSize()) self.subscription_[i].OutputUnchecked(out) def OutputPartial(self, out): for i in xrange(len(self.subscription_)): out.putVarInt32(10) out.putVarInt32(self.subscription_[i].ByteSizePartial()) self.subscription_[i].OutputPartial(out) def TryMerge(self, d): while d.avail() > 0: tt = d.getVarInt32() if tt == 10: length = d.getVarInt32() tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length) d.skip(length) self.add_subscription().TryMerge(tmp) continue if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError d.skipData(tt) def __str__(self, prefix="", printElemNumber=0): res="" cnt=0 for e in self.subscription_: elm="" if printElemNumber: elm="(%d)" % cnt res+=prefix+("subscription%s <\n" % elm) res+=e.__str__(prefix + " ", printElemNumber) res+=prefix+">\n" cnt+=1 return res def _BuildTagLookupTable(sparse, maxtag, default=None): return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) ksubscription = 1 _TEXT = _BuildTagLookupTable({ 0: "ErrorCode", 1: "subscription", }, 1) _TYPES = _BuildTagLookupTable({ 0: ProtocolBuffer.Encoder.NUMERIC, 1: ProtocolBuffer.Encoder.STRING, }, 1, ProtocolBuffer.Encoder.MAX_TYPE) _STYLE = """""" _STYLE_CONTENT_TYPE = """""" _PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.ListSubscriptionsResponse' class ListTopicsRequest(ProtocolBuffer.ProtocolMessage): has_topic_start_ = 0 topic_start_ = "" has_max_results_ = 0 max_results_ = 1000 has_app_id_ = 0 app_id_ = "" def __init__(self, contents=None): if contents is not None: self.MergeFromString(contents) def topic_start(self): return self.topic_start_ def set_topic_start(self, x): self.has_topic_start_ = 1 self.topic_start_ = x def clear_topic_start(self): if self.has_topic_start_: self.has_topic_start_ = 0 self.topic_start_ = "" def has_topic_start(self): return self.has_topic_start_ def max_results(self): return self.max_results_ def set_max_results(self, x): self.has_max_results_ = 1 self.max_results_ = x def clear_max_results(self): if self.has_max_results_: self.has_max_results_ = 0 self.max_results_ = 1000 def has_max_results(self): return self.has_max_results_ def app_id(self): return self.app_id_ def set_app_id(self, x): self.has_app_id_ = 1 self.app_id_ = x def clear_app_id(self): if self.has_app_id_: self.has_app_id_ = 0 self.app_id_ = "" def has_app_id(self): return self.has_app_id_ def MergeFrom(self, x): assert x is not self if (x.has_topic_start()): self.set_topic_start(x.topic_start()) if (x.has_max_results()): self.set_max_results(x.max_results()) if (x.has_app_id()): self.set_app_id(x.app_id()) def Equals(self, x): if x is self: return 1 if self.has_topic_start_ != x.has_topic_start_: return 0 if self.has_topic_start_ and self.topic_start_ != x.topic_start_: return 0 if self.has_max_results_ != x.has_max_results_: return 0 if self.has_max_results_ and self.max_results_ != x.max_results_: return 0 if self.has_app_id_ != x.has_app_id_: return 0 if self.has_app_id_ and self.app_id_ != x.app_id_: return 0 return 1 def IsInitialized(self, debug_strs=None): initialized = 1 return initialized def ByteSize(self): n = 0 if (self.has_topic_start_): n += 1 + self.lengthString(len(self.topic_start_)) if (self.has_max_results_): n += 1 + self.lengthVarInt64(self.max_results_) if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_)) return n def ByteSizePartial(self): n = 0 if (self.has_topic_start_): n += 1 + self.lengthString(len(self.topic_start_)) if (self.has_max_results_): n += 1 + self.lengthVarInt64(self.max_results_) if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_)) return n def Clear(self): self.clear_topic_start() self.clear_max_results() self.clear_app_id() def OutputUnchecked(self, out): if (self.has_topic_start_): out.putVarInt32(10) out.putPrefixedString(self.topic_start_) if (self.has_max_results_): out.putVarInt32(16) out.putVarInt64(self.max_results_) if (self.has_app_id_): out.putVarInt32(26) out.putPrefixedString(self.app_id_) def OutputPartial(self, out): if (self.has_topic_start_): out.putVarInt32(10) out.putPrefixedString(self.topic_start_) if (self.has_max_results_): out.putVarInt32(16) out.putVarInt64(self.max_results_) if (self.has_app_id_): out.putVarInt32(26) out.putPrefixedString(self.app_id_) def TryMerge(self, d): while d.avail() > 0: tt = d.getVarInt32() if tt == 10: self.set_topic_start(d.getPrefixedString()) continue if tt == 16: self.set_max_results(d.getVarInt64()) continue if tt == 26: self.set_app_id(d.getPrefixedString()) continue if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError d.skipData(tt) def __str__(self, prefix="", printElemNumber=0): res="" if self.has_topic_start_: res+=prefix+("topic_start: %s\n" % self.DebugFormatString(self.topic_start_)) if self.has_max_results_: res+=prefix+("max_results: %s\n" % self.DebugFormatInt64(self.max_results_)) if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_)) return res def _BuildTagLookupTable(sparse, maxtag, default=None): return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) ktopic_start = 1 kmax_results = 2 kapp_id = 3 _TEXT = _BuildTagLookupTable({ 0: "ErrorCode", 1: "topic_start", 2: "max_results", 3: "app_id", }, 3) _TYPES = _BuildTagLookupTable({ 0: ProtocolBuffer.Encoder.NUMERIC, 1: ProtocolBuffer.Encoder.STRING, 2: ProtocolBuffer.Encoder.NUMERIC, 3: ProtocolBuffer.Encoder.STRING, }, 3, ProtocolBuffer.Encoder.MAX_TYPE) _STYLE = """""" _STYLE_CONTENT_TYPE = """""" _PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.ListTopicsRequest' class ListTopicsResponse(ProtocolBuffer.ProtocolMessage): def __init__(self, contents=None): self.topic_ = [] if contents is not None: self.MergeFromString(contents) def topic_size(self): return len(self.topic_) def topic_list(self): return self.topic_ def topic(self, i): return self.topic_[i] def set_topic(self, i, x): self.topic_[i] = x def add_topic(self, x): self.topic_.append(x) def clear_topic(self): self.topic_ = [] def MergeFrom(self, x): assert x is not self for i in xrange(x.topic_size()): self.add_topic(x.topic(i)) def Equals(self, x): if x is self: return 1 if len(self.topic_) != len(x.topic_): return 0 for e1, e2 in zip(self.topic_, x.topic_): if e1 != e2: return 0 return 1 def IsInitialized(self, debug_strs=None): initialized = 1 return initialized def ByteSize(self): n = 0 n += 1 * len(self.topic_) for i in xrange(len(self.topic_)): n += self.lengthString(len(self.topic_[i])) return n def ByteSizePartial(self): n = 0 n += 1 * len(self.topic_) for i in xrange(len(self.topic_)): n += self.lengthString(len(self.topic_[i])) return n def Clear(self): self.clear_topic() def OutputUnchecked(self, out): for i in xrange(len(self.topic_)): out.putVarInt32(10) out.putPrefixedString(self.topic_[i]) def OutputPartial(self, out): for i in xrange(len(self.topic_)): out.putVarInt32(10) out.putPrefixedString(self.topic_[i]) def TryMerge(self, d): while d.avail() > 0: tt = d.getVarInt32() if tt == 10: self.add_topic(d.getPrefixedString()) continue if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError d.skipData(tt) def __str__(self, prefix="", printElemNumber=0): res="" cnt=0 for e in self.topic_: elm="" if printElemNumber: elm="(%d)" % cnt res+=prefix+("topic%s: %s\n" % (elm, self.DebugFormatString(e))) cnt+=1 return res def _BuildTagLookupTable(sparse, maxtag, default=None): return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) ktopic = 1 _TEXT = _BuildTagLookupTable({ 0: "ErrorCode", 1: "topic", }, 1) _TYPES = _BuildTagLookupTable({ 0: ProtocolBuffer.Encoder.NUMERIC, 1: ProtocolBuffer.Encoder.STRING, }, 1, ProtocolBuffer.Encoder.MAX_TYPE) _STYLE = """""" _STYLE_CONTENT_TYPE = """""" _PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.ListTopicsResponse' class MatchRequest(ProtocolBuffer.ProtocolMessage): ENTITY = 1 MODEL = 2 _PythonDocumentClass_NAMES = { 1: "ENTITY", 2: "MODEL", } def PythonDocumentClass_Name(cls, x): return cls._PythonDocumentClass_NAMES.get(x, "") PythonDocumentClass_Name = classmethod(PythonDocumentClass_Name) has_topic_ = 0 topic_ = "" has_document_ = 0 has_result_batch_size_ = 0 result_batch_size_ = 0 has_result_task_queue_ = 0 result_task_queue_ = "" has_result_relative_url_ = 0 result_relative_url_ = "" has_result_key_ = 0 result_key_ = "" has_result_python_document_class_ = 0 result_python_document_class_ = 0 def __init__(self, contents=None): self.document_ = EntityProto() if contents is not None: self.MergeFromString(contents) def topic(self): return self.topic_ def set_topic(self, x): self.has_topic_ = 1 self.topic_ = x def clear_topic(self): if self.has_topic_: self.has_topic_ = 0 self.topic_ = "" def has_topic(self): return self.has_topic_ def document(self): return self.document_ def mutable_document(self): self.has_document_ = 1; return self.document_ def clear_document(self):self.has_document_ = 0; self.document_.Clear() def has_document(self): return self.has_document_ def result_batch_size(self): return self.result_batch_size_ def set_result_batch_size(self, x): self.has_result_batch_size_ = 1 self.result_batch_size_ = x def clear_result_batch_size(self): if self.has_result_batch_size_: self.has_result_batch_size_ = 0 self.result_batch_size_ = 0 def has_result_batch_size(self): return self.has_result_batch_size_ def result_task_queue(self): return self.result_task_queue_ def set_result_task_queue(self, x): self.has_result_task_queue_ = 1 self.result_task_queue_ = x def clear_result_task_queue(self): if self.has_result_task_queue_: self.has_result_task_queue_ = 0 self.result_task_queue_ = "" def has_result_task_queue(self): return self.has_result_task_queue_ def result_relative_url(self): return self.result_relative_url_ def set_result_relative_url(self, x): self.has_result_relative_url_ = 1 self.result_relative_url_ = x def clear_result_relative_url(self): if self.has_result_relative_url_: self.has_result_relative_url_ = 0 self.result_relative_url_ = "" def has_result_relative_url(self): return self.has_result_relative_url_ def result_key(self): return self.result_key_ def set_result_key(self, x): self.has_result_key_ = 1 self.result_key_ = x def clear_result_key(self): if self.has_result_key_: self.has_result_key_ = 0 self.result_key_ = "" def has_result_key(self): return self.has_result_key_ def result_python_document_class(self): return self.result_python_document_class_ def set_result_python_document_class(self, x): self.has_result_python_document_class_ = 1 self.result_python_document_class_ = x def clear_result_python_document_class(self): if self.has_result_python_document_class_: self.has_result_python_document_class_ = 0 self.result_python_document_class_ = 0 def has_result_python_document_class(self): return self.has_result_python_document_class_ def MergeFrom(self, x): assert x is not self if (x.has_topic()): self.set_topic(x.topic()) if (x.has_document()): self.mutable_document().MergeFrom(x.document()) if (x.has_result_batch_size()): self.set_result_batch_size(x.result_batch_size()) if (x.has_result_task_queue()): self.set_result_task_queue(x.result_task_queue()) if (x.has_result_relative_url()): self.set_result_relative_url(x.result_relative_url()) if (x.has_result_key()): self.set_result_key(x.result_key()) if (x.has_result_python_document_class()): self.set_result_python_document_class(x.result_python_document_class()) def Equals(self, x): if x is self: return 1 if self.has_topic_ != x.has_topic_: return 0 if self.has_topic_ and self.topic_ != x.topic_: return 0 if self.has_document_ != x.has_document_: return 0 if self.has_document_ and self.document_ != x.document_: return 0 if self.has_result_batch_size_ != x.has_result_batch_size_: return 0 if self.has_result_batch_size_ and self.result_batch_size_ != x.result_batch_size_: return 0 if self.has_result_task_queue_ != x.has_result_task_queue_: return 0 if self.has_result_task_queue_ and self.result_task_queue_ != x.result_task_queue_: return 0 if self.has_result_relative_url_ != x.has_result_relative_url_: return 0 if self.has_result_relative_url_ and self.result_relative_url_ != x.result_relative_url_: return 0 if self.has_result_key_ != x.has_result_key_: return 0 if self.has_result_key_ and self.result_key_ != x.result_key_: return 0 if self.has_result_python_document_class_ != x.has_result_python_document_class_: return 0 if self.has_result_python_document_class_ and self.result_python_document_class_ != x.result_python_document_class_: return 0 return 1 def IsInitialized(self, debug_strs=None): initialized = 1 if (not self.has_topic_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: topic not set.') if (not self.has_document_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: document not set.') elif not self.document_.IsInitialized(debug_strs): initialized = 0 if (not self.has_result_batch_size_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: result_batch_size not set.') if (not self.has_result_task_queue_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: result_task_queue not set.') if (not self.has_result_relative_url_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: result_relative_url not set.') return initialized def ByteSize(self): n = 0 n += self.lengthString(len(self.topic_)) n += self.lengthString(self.document_.ByteSize()) n += self.lengthVarInt64(self.result_batch_size_) n += self.lengthString(len(self.result_task_queue_)) n += self.lengthString(len(self.result_relative_url_)) if (self.has_result_key_): n += 1 + self.lengthString(len(self.result_key_)) if (self.has_result_python_document_class_): n += 1 + self.lengthVarInt64(self.result_python_document_class_) return n + 5 def ByteSizePartial(self): n = 0 if (self.has_topic_): n += 1 n += self.lengthString(len(self.topic_)) if (self.has_document_): n += 1 n += self.lengthString(self.document_.ByteSizePartial()) if (self.has_result_batch_size_): n += 1 n += self.lengthVarInt64(self.result_batch_size_) if (self.has_result_task_queue_): n += 1 n += self.lengthString(len(self.result_task_queue_)) if (self.has_result_relative_url_): n += 1 n += self.lengthString(len(self.result_relative_url_)) if (self.has_result_key_): n += 1 + self.lengthString(len(self.result_key_)) if (self.has_result_python_document_class_): n += 1 + self.lengthVarInt64(self.result_python_document_class_) return n def Clear(self): self.clear_topic() self.clear_document() self.clear_result_batch_size() self.clear_result_task_queue() self.clear_result_relative_url() self.clear_result_key() self.clear_result_python_document_class() def OutputUnchecked(self, out): out.putVarInt32(10) out.putPrefixedString(self.topic_) out.putVarInt32(18) out.putVarInt32(self.document_.ByteSize()) self.document_.OutputUnchecked(out) out.putVarInt32(24) out.putVarInt32(self.result_batch_size_) out.putVarInt32(34) out.putPrefixedString(self.result_task_queue_) out.putVarInt32(42) out.putPrefixedString(self.result_relative_url_) if (self.has_result_key_): out.putVarInt32(50) out.putPrefixedString(self.result_key_) if (self.has_result_python_document_class_): out.putVarInt32(56) out.putVarInt32(self.result_python_document_class_) def OutputPartial(self, out): if (self.has_topic_): out.putVarInt32(10) out.putPrefixedString(self.topic_) if (self.has_document_): out.putVarInt32(18) out.putVarInt32(self.document_.ByteSizePartial()) self.document_.OutputPartial(out) if (self.has_result_batch_size_): out.putVarInt32(24) out.putVarInt32(self.result_batch_size_) if (self.has_result_task_queue_): out.putVarInt32(34) out.putPrefixedString(self.result_task_queue_) if (self.has_result_relative_url_): out.putVarInt32(42) out.putPrefixedString(self.result_relative_url_) if (self.has_result_key_): out.putVarInt32(50) out.putPrefixedString(self.result_key_) if (self.has_result_python_document_class_): out.putVarInt32(56) out.putVarInt32(self.result_python_document_class_) def TryMerge(self, d): while d.avail() > 0: tt = d.getVarInt32() if tt == 10: self.set_topic(d.getPrefixedString()) continue if tt == 18: length = d.getVarInt32() tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length) d.skip(length) self.mutable_document().TryMerge(tmp) continue if tt == 24: self.set_result_batch_size(d.getVarInt32()) continue if tt == 34: self.set_result_task_queue(d.getPrefixedString()) continue if tt == 42: self.set_result_relative_url(d.getPrefixedString()) continue if tt == 50: self.set_result_key(d.getPrefixedString()) continue if tt == 56: self.set_result_python_document_class(d.getVarInt32()) continue if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError d.skipData(tt) def __str__(self, prefix="", printElemNumber=0): res="" if self.has_topic_: res+=prefix+("topic: %s\n" % self.DebugFormatString(self.topic_)) if self.has_document_: res+=prefix+"document <\n" res+=self.document_.__str__(prefix + " ", printElemNumber) res+=prefix+">\n" if self.has_result_batch_size_: res+=prefix+("result_batch_size: %s\n" % self.DebugFormatInt32(self.result_batch_size_)) if self.has_result_task_queue_: res+=prefix+("result_task_queue: %s\n" % self.DebugFormatString(self.result_task_queue_)) if self.has_result_relative_url_: res+=prefix+("result_relative_url: %s\n" % self.DebugFormatString(self.result_relative_url_)) if self.has_result_key_: res+=prefix+("result_key: %s\n" % self.DebugFormatString(self.result_key_)) if self.has_result_python_document_class_: res+=prefix+("result_python_document_class: %s\n" % self.DebugFormatInt32(self.result_python_document_class_)) return res def _BuildTagLookupTable(sparse, maxtag, default=None): return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) ktopic = 1 kdocument = 2 kresult_batch_size = 3 kresult_task_queue = 4 kresult_relative_url = 5 kresult_key = 6 kresult_python_document_class = 7 _TEXT = _BuildTagLookupTable({ 0: "ErrorCode", 1: "topic", 2: "document", 3: "result_batch_size", 4: "result_task_queue", 5: "result_relative_url", 6: "result_key", 7: "result_python_document_class", }, 7) _TYPES = _BuildTagLookupTable({ 0: ProtocolBuffer.Encoder.NUMERIC, 1: ProtocolBuffer.Encoder.STRING, 2: ProtocolBuffer.Encoder.STRING, 3: ProtocolBuffer.Encoder.NUMERIC, 4: ProtocolBuffer.Encoder.STRING, 5: ProtocolBuffer.Encoder.STRING, 6: ProtocolBuffer.Encoder.STRING, 7: ProtocolBuffer.Encoder.NUMERIC, }, 7, ProtocolBuffer.Encoder.MAX_TYPE) _STYLE = """""" _STYLE_CONTENT_TYPE = """""" _PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.MatchRequest' class MatchResponse(ProtocolBuffer.ProtocolMessage): def __init__(self, contents=None): pass if contents is not None: self.MergeFromString(contents) def MergeFrom(self, x): assert x is not self def Equals(self, x): if x is self: return 1 return 1 def IsInitialized(self, debug_strs=None): initialized = 1 return initialized def ByteSize(self): n = 0 return n def ByteSizePartial(self): n = 0 return n def Clear(self): pass def OutputUnchecked(self, out): pass def OutputPartial(self, out): pass def TryMerge(self, d): while d.avail() > 0: tt = d.getVarInt32() if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError d.skipData(tt) def __str__(self, prefix="", printElemNumber=0): res="" return res def _BuildTagLookupTable(sparse, maxtag, default=None): return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) _TEXT = _BuildTagLookupTable({ 0: "ErrorCode", }, 0) _TYPES = _BuildTagLookupTable({ 0: ProtocolBuffer.Encoder.NUMERIC, }, 0, ProtocolBuffer.Encoder.MAX_TYPE) _STYLE = """""" _STYLE_CONTENT_TYPE = """""" _PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.MatchResponse' if _extension_runtime: pass __all__ = ['SchemaEntry','SubscribeRequest','SubscribeResponse','UnsubscribeRequest','UnsubscribeResponse','SubscriptionRecord','ListSubscriptionsRequest','ListSubscriptionsResponse','ListTopicsRequest','ListTopicsResponse','MatchRequest','MatchResponse']
adviti/melange
thirdparty/google_appengine/google/appengine/api/prospective_search/prospective_search_pb.py
Python
apache-2.0
60,260
# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test alarm notifier.""" from ceilometer.alarm import notifier class TestAlarmNotifier(notifier.AlarmNotifier): "Test alarm notifier.""" def __init__(self): self.notifications = [] def notify(self, action, alarm_id, alarm_name, severity, previous, current, reason, reason_data): self.notifications.append((action, alarm_id, alarm_name, severity, previous, current, reason, reason_data))
Juniper/ceilometer
ceilometer/alarm/notifier/test.py
Python
apache-2.0
1,257
# -*- coding: utf-8 -*- # (c) 2015 Incaser Informatica S.L. - Sergio Teruel # (c) 2015 Incaser Informatica S.L. - Carlos Dauden # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html from . import main
incaser/incaser-odoo-addons
website_maintenance/controllers/__init__.py
Python
agpl-3.0
213
# coding: utf8 { '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"更新" 是選擇性的條件式, 格式就像 "欄位1=\'值\'". 但是 JOIN 的資料不可以使用 update 或是 delete"', '%Y-%m-%d': '%Y-%m-%d', '%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S', '%s rows deleted': '已刪除 %s 筆', '%s rows updated': '已更新 %s 筆', '(something like "it-it")': '(格式類似 "zh-tw")', 'A new version of web2py is available': '新版的 web2py 已發行', 'A new version of web2py is available: %s': '新版的 web2py 已發行: %s', 'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': '注意: 登入管理帳號需要安全連線(HTTPS)或是在本機連線(localhost).', 'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': '注意: 因為在測試模式不保證多執行緒安全性,也就是說不可以同時執行多個測試案例', 'ATTENTION: you cannot edit the running application!': '注意:不可編輯正在執行的應用程式!', 'About': '關於', 'About application': '關於本應用程式', 'Admin is disabled because insecure channel': '管理功能(Admin)在不安全連線環境下自動關閉', 'Admin is disabled because unsecure channel': '管理功能(Admin)在不安全連線環境下自動關閉', 'Administrator Password:': '管理員密碼:', 'Are you sure you want to delete file "%s"?': '確定要刪除檔案"%s"?', 'Are you sure you want to uninstall application "%s"': '確定要移除應用程式 "%s"', 'Are you sure you want to uninstall application "%s"?': '確定要移除應用程式 "%s"', 'Asíncrona': 'Asíncrona', 'Authentication': '驗證', 'Available databases and tables': '可提供的資料庫和資料表', 'Ayuda': 'Ayuda', 'Cannot be empty': '不可空白', 'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': '無法編譯:應用程式中含有錯誤,請除錯後再試一次.', 'Change Password': '變更密碼', 'Check to delete': '打勾代表刪除', 'Check to delete:': '點選以示刪除:', 'Client IP': '客戶端網址(IP)', 'Comprobantes': 'Comprobantes', 'Configuración': 'Configuración', 'Configurar': 'Configurar', 'Consultas': 'Consultas', 'Controller': '控件', 'Controllers': '控件', 'Copyright': '版權所有', 'Cotización': 'Cotización', 'Create new application': '創建應用程式', 'Current request': '目前網路資料要求(request)', 'Current response': '目前網路資料回應(response)', 'Current session': '目前網路連線資訊(session)', 'DB Model': '資料庫模組', 'DESIGN': '設計', 'Database': '資料庫', 'Date and Time': '日期和時間', 'Delete': '刪除', 'Delete:': '刪除:', 'Deploy on Google App Engine': '配置到 Google App Engine', 'Description': '描述', 'Design for': '設計為了', 'Detalles': 'Detalles', 'E-mail': '電子郵件', 'EDIT': '編輯', 'Edit': '編輯', 'Edit Profile': '編輯設定檔', 'Edit This App': '編輯本應用程式', 'Edit application': '編輯應用程式', 'Edit current record': '編輯當前紀錄', 'Editing file': '編輯檔案', 'Editing file "%s"': '編輯檔案"%s"', 'Emisión': 'Emisión', 'Error logs for "%(app)s"': '"%(app)s"的錯誤紀錄', 'Estado (dummy)': 'Estado (dummy)', 'FacturaLibre': 'FacturaLibre', 'FacturaLibre. Aplicación en desarrollo': 'FacturaLibre. Aplicación en desarrollo', 'FacturaLibre. Aplicación web para factura electrónica': 'FacturaLibre. Aplicación web para factura electrónica', 'FacturaLibre: interfase alternativa': 'FacturaLibre: interfase alternativa', 'FacturaLibre: interfaz de usuario alternativa': 'FacturaLibre: interfaz de usuario alternativa', 'First name': '名', 'Functions with no doctests will result in [passed] tests.': '沒有 doctests 的函式會顯示 [passed].', 'Group ID': '群組編號', 'Hello World': '嗨! 世界', 'Import/Export': '匯入/匯出', 'Index': '索引', 'Información General': 'Información General', 'Información Técnica': 'Información Técnica', 'Inicio': 'Inicio', 'Installed applications': '已安裝應用程式', 'Internal State': '內部狀態', 'Invalid Query': '不合法的查詢', 'Invalid action': '不合法的動作(action)', 'Invalid email': '不合法的電子郵件', 'Language files (static strings) updated': '語言檔已更新', 'Languages': '各國語言', 'Last name': '姓', 'Last saved on:': '最後儲存時間:', 'Layout': '網頁配置', 'License for': '軟體版權為', 'Listar comprobantes.': 'Listar comprobantes.', 'Listar detalles': 'Listar detalles', 'Login': '登入', 'Login to the Administrative Interface': '登入到管理員介面', 'Logout': '登出', 'Lost Password': '密碼遺忘', 'Main Menu': '主選單', 'Menu Model': '選單模組(menu)', 'Models': '資料模組', 'Modules': '程式模組', 'NO': '否', 'Name': '名字', 'New Record': '新紀錄', 'No databases in this application': '這應用程式不含資料庫', 'Origin': '原文', 'Original/Translation': '原文/翻譯', 'Password': '密碼', "Password fields don't match": '密碼欄不匹配', 'Peeking at file': '選擇檔案', 'Powered by': '基於以下技術構建:', 'Query:': '查詢:', 'Record ID': '紀錄編號', 'Register': '註冊', 'Registration key': '註冊金鑰', 'Remember me (for 30 days)': '記住我(30 天)', 'Reset Password key': '重設密碼', 'Resolve Conflict file': '解決衝突檔案', 'Role': '角色', 'Rows in table': '在資料表裏的資料', 'Rows selected': '筆資料被選擇', 'Saved file hash:': '檔案雜湊值已紀錄:', 'Secuencial': 'Secuencial', 'Servicios Web': 'Servicios Web', 'Static files': '靜態檔案', 'Stylesheet': '網頁風格檔', 'Submit': '傳送', 'Sure you want to delete this object?': '確定要刪除此物件?', 'Table name': '資料表名稱', 'Testing application': '測試中的應用程式', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"查詢"是一個像 "db.表1.欄位1==\'值\'" 的條件式. 以"db.表1.欄位1==db.表2.欄位2"方式則相當於執行 JOIN SQL.', 'There are no controllers': '沒有控件(controllers)', 'There are no models': '沒有資料庫模組(models)', 'There are no modules': '沒有程式模組(modules)', 'There are no static files': '沒有靜態檔案', 'There are no translators, only default language is supported': '沒有翻譯檔,只支援原始語言', 'There are no views': '沒有視圖', 'This is the %(filename)s template': '這是%(filename)s檔案的樣板(template)', 'Ticket': '問題單', 'Timestamp': '時間標記', 'Unable to check for upgrades': '無法做升級檢查', 'Unable to download': '無法下載', 'Unable to download app': '無法下載應用程式', 'Update:': '更新:', 'Upload existing application': '更新存在的應用程式', 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': '使用下列方式來組合更複雜的條件式, (...)&(...) 代表同時存在的條件, (...)|(...) 代表擇一的條件, ~(...)則代表反向條件.', 'User %(id)s Logged-in': '使用者 %(id)s 已登入', 'User %(id)s Registered': '使用者 %(id)s 已註冊', 'User ID': '使用者編號', 'Verify Password': '驗證密碼', 'View': '視圖', 'Views': '視圖', 'WSBFE': 'WSBFE', 'WSFEX': 'WSFEX', 'WSFEv0': 'WSFEv0', 'WSFEv1': 'WSFEv1', 'WSMTXCA': 'WSMTXCA', 'Welcome %s': '歡迎 %s', 'Welcome to web2py': '歡迎使用 web2py', 'YES': '是', 'about': '關於', 'appadmin is disabled because insecure channel': '因為來自非安全通道,管理介面關閉', 'cache': '快取記憶體', 'change password': '變更密碼', 'click here for online examples': '點此處進入線上範例', 'click here for the administrative interface': '點此處進入管理介面', 'customize me!': '請調整我!', 'data uploaded': '資料已上傳', 'database': '資料庫', 'database %s select': '已選擇 %s 資料庫', 'db': 'db', 'design': '設計', 'done!': '完成!', 'edit profile': '編輯設定檔', 'export as csv file': '以逗號分隔檔(csv)格式匯出', 'insert new': '插入新資料', 'insert new %s': '插入新資料 %s', 'invalid request': '不合法的網路要求(request)', 'login': '登入', 'logout': '登出', 'new record inserted': '已插入新紀錄', 'next 100 rows': '往後 100 筆', 'or import from csv file': '或是從逗號分隔檔(CSV)匯入', 'previous 100 rows': '往前 100 筆', 'record': '紀錄', 'record does not exist': '紀錄不存在', 'record id': '紀錄編號', 'register': '註冊', 'selected': '已選擇', 'state': '狀態', 'table': '資料表', 'unable to parse csv file': '無法解析逗號分隔檔(csv)', 'Últ.ID': 'Últ.ID', 'Últ.Nro.Cbte.': 'Últ.Nro.Cbte.', }
tectronics/pyafipws.web2py-app
languages/zh-tw.py
Python
agpl-3.0
8,854
"""SCons.Tool.g++ Tool-specific initialization for g++. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/g++.py 3842 2008/12/20 22:59:52 scons" import os.path import re import subprocess import SCons.Tool import SCons.Util cplusplus = __import__('c++', globals(), locals(), []) compilers = ['g++'] def generate(env): """Add Builders and construction variables for g++ to an Environment.""" static_obj, shared_obj = SCons.Tool.createObjBuilders(env) cplusplus.generate(env) env['CXX'] = env.Detect(compilers) # platform specific settings if env['PLATFORM'] == 'aix': env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS -mminimal-toc') env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1 env['SHOBJSUFFIX'] = '$OBJSUFFIX' elif env['PLATFORM'] == 'hpux': env['SHOBJSUFFIX'] = '.pic.o' elif env['PLATFORM'] == 'sunos': env['SHOBJSUFFIX'] = '.pic.o' # determine compiler version if env['CXX']: #pipe = SCons.Action._subproc(env, [env['CXX'], '-dumpversion'], pipe = SCons.Action._subproc(env, [env['CXX'], '--version'], stdin = 'devnull', stderr = 'devnull', stdout = subprocess.PIPE) if pipe.wait() != 0: return # -dumpversion was added in GCC 3.0. As long as we're supporting # GCC versions older than that, we should use --version and a # regular expression. #line = pipe.stdout.read().strip() #if line: # env['CXXVERSION'] = line line = pipe.stdout.readline() match = re.search(r'[0-9]+(\.[0-9]+)+', line) if match: env['CXXVERSION'] = match.group(0) def exists(env): return env.Detect(compilers)
makinacorpus/mapnik2
scons/scons-local-1.2.0/SCons/Tool/g++.py
Python
lgpl-2.1
3,111
# Copyright 2015 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.conf import settings DASHBOARD = 'developer' ADD_ANGULAR_MODULES = [ 'horizon.dashboard.developer' ] ADD_INSTALLED_APPS = [ 'openstack_dashboard.contrib.developer' ] ADD_SCSS_FILES = [ 'dashboard/developer/developer.scss', ] AUTO_DISCOVER_STATIC_FILES = True DISABLED = True if getattr(settings, 'DEBUG', False): DISABLED = False
yangleo/cloud-github
openstack_dashboard/enabled/_9001_developer.py
Python
apache-2.0
948
# -*- coding: utf-8 -*- # Unit tests for cache framework # Uses whatever cache backend is set in the test settings file. import time import unittest from django.core.cache import cache from django.utils.cache import patch_vary_headers from django.http import HttpResponse # functions/classes for complex data type tests def f(): return 42 class C: def m(n): return 24 class Cache(unittest.TestCase): def test_simple(self): # simple set/get cache.set("key", "value") self.assertEqual(cache.get("key"), "value") def test_add(self): # test add (only add if key isn't already in cache) cache.add("addkey1", "value") cache.add("addkey1", "newvalue") self.assertEqual(cache.get("addkey1"), "value") def test_non_existent(self): # get with non-existent keys self.assertEqual(cache.get("does_not_exist"), None) self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!") def test_get_many(self): # get_many cache.set('a', 'a') cache.set('b', 'b') cache.set('c', 'c') cache.set('d', 'd') self.assertEqual(cache.get_many(['a', 'c', 'd']), {'a' : 'a', 'c' : 'c', 'd' : 'd'}) self.assertEqual(cache.get_many(['a', 'b', 'e']), {'a' : 'a', 'b' : 'b'}) def test_delete(self): # delete cache.set("key1", "spam") cache.set("key2", "eggs") self.assertEqual(cache.get("key1"), "spam") cache.delete("key1") self.assertEqual(cache.get("key1"), None) self.assertEqual(cache.get("key2"), "eggs") def test_has_key(self): # has_key cache.set("hello1", "goodbye1") self.assertEqual(cache.has_key("hello1"), True) self.assertEqual(cache.has_key("goodbye1"), False) def test_in(self): cache.set("hello2", "goodbye2") self.assertEqual("hello2" in cache, True) self.assertEqual("goodbye2" in cache, False) def test_data_types(self): stuff = { 'string' : 'this is a string', 'int' : 42, 'list' : [1, 2, 3, 4], 'tuple' : (1, 2, 3, 4), 'dict' : {'A': 1, 'B' : 2}, 'function' : f, 'class' : C, } cache.set("stuff", stuff) self.assertEqual(cache.get("stuff"), stuff) def test_expiration(self): cache.set('expire1', 'very quickly', 1) cache.set('expire2', 'very quickly', 1) cache.set('expire3', 'very quickly', 1) time.sleep(2) self.assertEqual(cache.get("expire1"), None) cache.add("expire2", "newvalue") self.assertEqual(cache.get("expire2"), "newvalue") self.assertEqual(cache.has_key("expire3"), False) def test_unicode(self): stuff = { u'ascii': u'ascii_value', u'unicode_ascii': u'Iñtërnâtiônàlizætiøn1', u'Iñtërnâtiônàlizætiøn': u'Iñtërnâtiônàlizætiøn2', u'ascii': {u'x' : 1 } } for (key, value) in stuff.items(): cache.set(key, value) self.assertEqual(cache.get(key), value) import os import md5 import shutil import tempfile from django.core.cache.backends.filebased import CacheClass as FileCache class FileBasedCacheTests(unittest.TestCase): """ Specific test cases for the file-based cache. """ def setUp(self): self.dirname = tempfile.mktemp() os.mkdir(self.dirname) self.cache = FileCache(self.dirname, {}) def tearDown(self): shutil.rmtree(self.dirname) def test_hashing(self): """Test that keys are hashed into subdirectories correctly""" self.cache.set("foo", "bar") keyhash = md5.new("foo").hexdigest() keypath = os.path.join(self.dirname, keyhash[:2], keyhash[2:4], keyhash[4:]) self.assert_(os.path.exists(keypath)) def test_subdirectory_removal(self): """ Make sure that the created subdirectories are correctly removed when empty. """ self.cache.set("foo", "bar") keyhash = md5.new("foo").hexdigest() keypath = os.path.join(self.dirname, keyhash[:2], keyhash[2:4], keyhash[4:]) self.assert_(os.path.exists(keypath)) self.cache.delete("foo") self.assert_(not os.path.exists(keypath)) self.assert_(not os.path.exists(os.path.dirname(keypath))) self.assert_(not os.path.exists(os.path.dirname(os.path.dirname(keypath)))) class CacheUtils(unittest.TestCase): """TestCase for django.utils.cache functions.""" def test_patch_vary_headers(self): headers = ( # Initial vary, new headers, resulting vary. (None, ('Accept-Encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'), ('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), (None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ) for initial_vary, newheaders, resulting_vary in headers: response = HttpResponse() if initial_vary is not None: response['Vary'] = initial_vary patch_vary_headers(response, newheaders) self.assertEqual(response['Vary'], resulting_vary) if __name__ == '__main__': unittest.main()
paulsmith/geodjango
tests/regressiontests/cache/tests.py
Python
bsd-3-clause
5,938
""" Provides a set of pluggable permission policies. """ from __future__ import unicode_literals from django.http import Http404 from rest_framework import exceptions from rest_framework.compat import is_authenticated SAFE_METHODS = ('GET', 'HEAD', 'OPTIONS') class BasePermission(object): """ A base class from which all permission classes should inherit. """ def has_permission(self, request, view): """ Return `True` if permission is granted, `False` otherwise. """ return True def has_object_permission(self, request, view, obj): """ Return `True` if permission is granted, `False` otherwise. """ return True class AllowAny(BasePermission): """ Allow any access. This isn't strictly required, since you could use an empty permission_classes list, but it's useful because it makes the intention more explicit. """ def has_permission(self, request, view): return True class IsAuthenticated(BasePermission): """ Allows access only to authenticated users. """ def has_permission(self, request, view): return request.user and is_authenticated(request.user) class IsAdminUser(BasePermission): """ Allows access only to admin users. """ def has_permission(self, request, view): return request.user and request.user.is_staff class IsAuthenticatedOrReadOnly(BasePermission): """ The request is authenticated as a user, or is a read-only request. """ def has_permission(self, request, view): return ( request.method in SAFE_METHODS or request.user and is_authenticated(request.user) ) class DjangoModelPermissions(BasePermission): """ The request is authenticated using `django.contrib.auth` permissions. See: https://docs.djangoproject.com/en/dev/topics/auth/#permissions It ensures that the user is authenticated, and has the appropriate `add`/`change`/`delete` permissions on the model. This permission can only be applied against view classes that provide a `.queryset` attribute. """ # Map methods into required permission codes. # Override this if you need to also provide 'view' permissions, # or if you want to provide custom permission codes. perms_map = { 'GET': [], 'OPTIONS': [], 'HEAD': [], 'POST': ['%(app_label)s.add_%(model_name)s'], 'PUT': ['%(app_label)s.change_%(model_name)s'], 'PATCH': ['%(app_label)s.change_%(model_name)s'], 'DELETE': ['%(app_label)s.delete_%(model_name)s'], } authenticated_users_only = True def get_required_permissions(self, method, model_cls): """ Given a model and an HTTP method, return the list of permission codes that the user is required to have. """ kwargs = { 'app_label': model_cls._meta.app_label, 'model_name': model_cls._meta.model_name } if method not in self.perms_map: raise exceptions.MethodNotAllowed(method) return [perm % kwargs for perm in self.perms_map[method]] def has_permission(self, request, view): # Workaround to ensure DjangoModelPermissions are not applied # to the root view when using DefaultRouter. if getattr(view, '_ignore_model_permissions', False): return True if hasattr(view, 'get_queryset'): queryset = view.get_queryset() else: queryset = getattr(view, 'queryset', None) assert queryset is not None, ( 'Cannot apply DjangoModelPermissions on a view that ' 'does not set `.queryset` or have a `.get_queryset()` method.' ) perms = self.get_required_permissions(request.method, queryset.model) return ( request.user and (is_authenticated(request.user) or not self.authenticated_users_only) and request.user.has_perms(perms) ) class DjangoModelPermissionsOrAnonReadOnly(DjangoModelPermissions): """ Similar to DjangoModelPermissions, except that anonymous users are allowed read-only access. """ authenticated_users_only = False class DjangoObjectPermissions(DjangoModelPermissions): """ The request is authenticated using Django's object-level permissions. It requires an object-permissions-enabled backend, such as Django Guardian. It ensures that the user is authenticated, and has the appropriate `add`/`change`/`delete` permissions on the object using .has_perms. This permission can only be applied against view classes that provide a `.queryset` attribute. """ perms_map = { 'GET': [], 'OPTIONS': [], 'HEAD': [], 'POST': ['%(app_label)s.add_%(model_name)s'], 'PUT': ['%(app_label)s.change_%(model_name)s'], 'PATCH': ['%(app_label)s.change_%(model_name)s'], 'DELETE': ['%(app_label)s.delete_%(model_name)s'], } def get_required_object_permissions(self, method, model_cls): kwargs = { 'app_label': model_cls._meta.app_label, 'model_name': model_cls._meta.model_name } if method not in self.perms_map: raise exceptions.MethodNotAllowed(method) return [perm % kwargs for perm in self.perms_map[method]] def has_object_permission(self, request, view, obj): if hasattr(view, 'get_queryset'): queryset = view.get_queryset() else: queryset = getattr(view, 'queryset', None) assert queryset is not None, ( 'Cannot apply DjangoObjectPermissions on a view that ' 'does not set `.queryset` or have a `.get_queryset()` method.' ) model_cls = queryset.model user = request.user perms = self.get_required_object_permissions(request.method, model_cls) if not user.has_perms(perms, obj): # If the user does not have permissions we need to determine if # they have read permissions to see 403, or not, and simply see # a 404 response. if request.method in SAFE_METHODS: # Read permissions already checked and failed, no need # to make another lookup. raise Http404 read_perms = self.get_required_object_permissions('GET', model_cls) if not user.has_perms(read_perms, obj): raise Http404 # Has read permissions. return False return True
hchen1202/django-react
virtualenv/lib/python3.6/site-packages/rest_framework/permissions.py
Python
mit
6,655
import tarfile import time import os import json class BackupTool(object): """Simple backup utility.""" def __init__(self): pass @staticmethod def backup(openbazaar_installation_path, backup_folder_path, on_success_callback=None, on_error_callback=None): """ Creates an 'openbazaar-YYYY-MM-DD-hh-mm-ss.tar.gz' file inside the html/backups/ folder. @param openbazaar_installation_path: str The path to OpenBazaar's installation folder, where the db/ folder lives. @param backup_folder_path: str The folder where the backup file will reside. Optional callback functions can be passed: @param on_success_callback(backupFilePath: str) @param on_error_callback(errorMessage: str) """ date_time = time.strftime('%Y-%h-%d-%H-%M-%S') output_file_path = os.path.join( backup_folder_path, "openbazaar-%s.tar.gz" % date_time ) # Create the folder for the backup, if it doesn't exist. try: os.makedirs(backup_folder_path) except os.error: pass db_folder = os.path.join(openbazaar_installation_path, "db") try: with tarfile.open(output_file_path, "w:gz") as tar: tar.add(db_folder, arcname=os.path.basename(db_folder)) except tarfile.TarError as exc: # TODO: Install proper error logging. print "Error while backing up to:", output_file_path if on_error_callback is not None: on_error_callback(exc) return if on_success_callback is not None: on_success_callback(output_file_path) @staticmethod def restore(backup_tar_filepath): raise NotImplementedError @staticmethod def get_installation_path(): """Return the Project Root path.""" file_abs_path = os.path.abspath(__file__) real_file_abs_path = os.path.realpath(file_abs_path) return real_file_abs_path[:real_file_abs_path.find('/node')] @classmethod def get_backup_path(cls): """Return the backup path.""" # TODO: Make backup path configurable on server settings. return os.path.join( cls.get_installation_path(), 'html', 'backups' ) class Backup(json.JSONEncoder): """ A (meant to be immutable) POPO to represent a backup. So that we can tell our Web client about the backups available. """ def __init__(self, file_name=None, full_file_path=None, created_timestamp_millis=None, size_in_bytes=None): super(Backup, self).__init__() self.file_name = file_name self.full_file_path = full_file_path self.created_timestamp_millis = created_timestamp_millis self.size_in_bytes = size_in_bytes def to_dict(self): """Return a dictionary with attributes of self.""" return { "file_name": self.file_name, "full_file_path": self.full_file_path, "created_timestamp_millis": self.created_timestamp_millis, "size_in_bytes": self.size_in_bytes } def __repr__(self): return repr(self.to_dict()) @classmethod def get_backups(cls, backup_folder_path=None): """ Return a list of Backup objects found in the backup folder path given. """ if backup_folder_path is None or not os.path.isdir(backup_folder_path): return [] result_gen = ( cls.get_backup(os.path.join(backup_folder_path, x)) for x in os.listdir(backup_folder_path) ) result = [backup for backup in result_gen if backup is not None] result.reverse() return result @classmethod def get_backup(cls, backup_file_path): """ Create and return a Backup object from a backup path. Return None if the path was invalid. """ try: file_stat = os.stat(backup_file_path) file_name = os.path.basename(backup_file_path) except os.error: print "Invalid backup path:", backup_file_path return None created_timestamp_millis = file_stat.st_ctime size_in_bytes = file_stat.st_size return cls( file_name=file_name, full_file_path=backup_file_path, created_timestamp_millis=created_timestamp_millis, size_in_bytes=size_in_bytes ) class BackupJSONEncoder(json.JSONEncoder): # pylint: disable=method-hidden def default(self, o): if isinstance(o, Backup): return o.to_dict()
atsuyim/OpenBazaar
node/backuptool.py
Python
mit
4,817
uname = ParseFunction('uname -a > {OUT}') for group in ('disc', 'ccl', 'gh'): batch_options = 'requirements = MachineGroup == "{0}"'.format(group) uname(outputs='uname.{0}'.format(group), environment={'BATCH_OPTIONS': batch_options}) #for group in ('disc', 'ccl', 'gh'): # with Options(batch='requirements = MachineGroup == "{0}"'.format(group)): # uname(outputs='uname.{0}'.format(group))
isanwong/cctools
weaver/src/examples/batch.py
Python
gpl-2.0
410
""" This module hosts all the extension functions and classes created via SDK. The function :py:func:`ext_import` is used to import a toolkit module (shared library) into the workspace. The shared library can be directly imported from a remote source, e.g. http, s3, or hdfs. The imported module will be under namespace `graphlab.extensions`. Alternatively, if the shared library is local, it can be directly imported using the python import statement. Note that graphlab must be imported first. """ ''' Copyright (C) 2015 Dato, Inc. All rights reserved. This software may be modified and distributed under the terms of the BSD license. See the DATO-PYTHON-LICENSE file for details. ''' # This is a fake meta namespace which contains toolkit functions and toolkit # models implemented as extensions in C++ import graphlab as _gl import types as _types from graphlab.util import _make_internal_url from graphlab.cython.cy_sframe import UnitySFrameProxy as _UnitySFrameProxy from graphlab.cython.cy_sarray import UnitySArrayProxy as _UnitySArrayProxy from graphlab.cython.cy_graph import UnityGraphProxy as _UnityGraphProxy from graphlab.cython.cy_model import UnityModel as _UnityModel from graphlab.toolkits._main import ToolkitError as _ToolkitError from graphlab.cython.context import debug_trace as cython_context # Now. a bit of magic hackery is going to happen to this module. # This module is going to be first imported as graphlab.extensions # After which, inside graphlab/__init__.py, sys.modules['graphlab.extensions'] # will be modified to become a class called _extension_wrapper which redirects # getattr calls into this module. # # The reason for this wrapping is so that uses of functions in gl.extensions # (for instance) # # import graphlab as gl # gl.extensions._demo_addone(5) # # This will normally not work because gl.extensions._publish() was not called # hence _demo_addone will not be found. # # By wrapping the extensions module in another class, we can redefine # __getattr__ on that class and have it force gl.extensions._publish() when # an attribute name is not found. # # However, there are some odd sideeffects due to the use of the metapath # system as well. the metapath importer (this module) is going to look in # gl.extensions, but gl.extensions is going poke this module hence resulting # in an interesting recursive relationship. # # Also, we need gl.extensions.__dict__ to have all the published information # so that tab completion in ipython works. # # The result is that we need gl.extensions._publish() to publish into both # places. # - the current module # - the gl.extensions wrapper # # Then the metapath importer (this module) will just need to look in this # module, breaking the recursive relation. And the gl.extensions wrapper will # have all the stuff in it for tab completion by IPython. import sys as _sys _thismodule = _sys.modules[__name__] class_uid_to_class = {} def _wrap_function_return(val): """ Recursively walks each thing in val, opening lists and dictionaries, converting all occurances of UnityGraphProxy to an SGraph, UnitySFrameProxy to SFrame, and UnitySArrayProxy to SArray. """ if type(val) == _UnityGraphProxy: return _gl.SGraph(_proxy = val) elif type(val) == _UnitySFrameProxy: return _gl.SFrame(_proxy = val) elif type(val) == _UnitySArrayProxy: return _gl.SArray(_proxy = val) elif type(val) == _UnityModel: # we need to cast it up to the appropriate type try: if '__uid__' in val.list_fields(): uid = val.get('__uid__') if uid in class_uid_to_class: return class_uid_to_class[uid](_proxy=val) except: pass return val elif type(val) == list: return [_wrap_function_return(i) for i in val] elif type(val) == dict: return {i:_wrap_function_return(val[i]) for i in val} else: return val def _setattr_wrapper(mod, key, value): """ A setattr wrapper call used only by _publish(). This ensures that anything published into this module is also published into gl.extensions """ setattr(mod, key, value) if mod == _thismodule: setattr(_sys.modules[__name__], key, value) def _translate_function_arguments(argument): import inspect if inspect.isfunction(argument): try: return _build_native_function_call(argument) except: raise TypeError("Only native functions, or simple lambdas of native functions (with constant capture values) can be passed to an extension function.") elif type(argument) is list: return [_translate_function_arguments(i) for i in argument] elif type(argument) is tuple: return [_translate_function_arguments(i) for i in argument] elif type(argument) is dict: return {i:_translate_function_arguments(v) for (i, v) in argument.iteritems()} elif hasattr(argument, '_tkclass') and hasattr(argument, '__glmeta__'): return argument._tkclass else: return argument def _run_toolkit_function(fnname, arguments, args, kwargs): """ Dispatches arguments to a toolkit function. Parameters ---------- fnname : string The toolkit function to run arguments : list[string] The list of all the arguments the function takes. args : list The arguments that were passed kwargs : dictionary The keyword arguments that were passed """ # scan for all the arguments in args num_args_got = len(args) + len(kwargs) num_args_required = len(arguments) if num_args_got != num_args_required: raise TypeError("Expecting " + str(num_args_required) + " arguments, got " + str(num_args_got)) ## fill the dict first with the regular args argument_dict = {} for i in range(len(args)): argument_dict[arguments[i]] = args[i] # now fill with the kwargs. for k in kwargs.keys(): if k in argument_dict: raise TypeError("Got multiple values for keyword argument '" + k + "'") argument_dict[k] = kwargs[k] argument_dict = _translate_function_arguments(argument_dict) # unwrap it with cython_context(): ret = _gl.connect.main.get_unity().run_toolkit(fnname, argument_dict) # handle errors if ret[0] != True: if len(ret[1]) > 0: raise _ToolkitError(ret[1]) else: raise _ToolkitError("Toolkit failed with unknown error") ret = _wrap_function_return(ret[2]) if type(ret) == dict and 'return_value' in ret: return ret['return_value'] else: return ret def _make_injected_function(fn, arguments): return lambda *args, **kwargs: _run_toolkit_function(fn, arguments, args, kwargs) def _class_instance_from_name(class_name, *arg, **kwarg): """ class_name is of the form modA.modB.modC.class module_path splits on "." and the import_path is then ['modA','modB','modC'] the __import__ call is really annoying but essentially it reads like: import class from modA.modB.modC - Then the module variable points to modC - Then you get the class from the module. """ # we first look in gl.extensions for the class name module_path = class_name.split('.') import_path = module_path[0:-1] module = __import__('.'.join(import_path), fromlist=[module_path[-1]]) class_ = getattr(module, module_path[-1]) instance = class_(*arg, **kwarg) return instance def _create_class_instance(class_name, _proxy): """ Look for the class in graphlab.extensions in case it has already been imported (perhaps as a builtin extensions hard compiled into unity_server). """ try: return _class_instance_from_name("graphlab.extensions." + class_name, _proxy=_proxy) except: pass return _class_instance_from_name(class_name, _proxy=_proxy) class _ToolkitClass: """ The actual class class that is rewritten to become each user defined toolkit class. Certain care with attributes (__getattr__ / __setattr__) has to be done to inject functions, and attributes into their appropriate places. """ _functions = {} # The functions in the class _get_properties = [] # The getable properties in the class _set_properties = [] # The setable properties in the class _tkclass = None def __init__(self, *args, **kwargs): tkclass_name = getattr(self.__init__, "tkclass_name") _proxy = None if "_proxy" in kwargs: _proxy = kwargs['_proxy'] del kwargs['_proxy'] if _proxy: self.__dict__['_tkclass'] = _proxy elif tkclass_name: self.__dict__['_tkclass'] = _gl.connect.main.get_unity().create_toolkit_class(tkclass_name) try: # fill the functions and properties self.__dict__['_functions'] = self._tkclass.get('list_functions') self.__dict__['_get_properties'] = self._tkclass.get('list_get_properties') self.__dict__['_set_properties'] = self._tkclass.get('list_set_properties') # rewrite the doc string for this class try: self.__dict__['__doc__'] = self._tkclass.get('get_docstring', {'__symbol__':'__doc__'}) self.__class__.__dict__['__doc__'] = self.__dict__['__doc__'] except: pass except: raise _ToolkitError("Cannot create Toolkit Class for this class. " "This class was not created with the new toolkit class system.") # for compatibility with older classes / models self.__dict__['__proxy__'] = self.__dict__['_tkclass'] if '__init__' in self.__dict__['_functions']: self.__run_class_function("__init__", args, kwargs) elif len(args) != 0 or len(kwargs) != 0: raise TypeError("This constructor takes no arguments") def _get_wrapper(self): gl_meta_value = self.__glmeta__['extension_name'] return lambda _proxy: _create_class_instance(gl_meta_value, _proxy) def __dir__(self): return self._functions.keys() + self._get_properties + self._set_properties def __run_class_function(self, fnname, args, kwargs): # scan for all the arguments in args arguments = self._functions[fnname] num_args_got = len(args) + len(kwargs) num_args_required = len(arguments) if num_args_got != num_args_required: raise TypeError("Expecting " + str(num_args_required) + " arguments, got " + str(num_args_got)) ## fill the dict first with the regular args argument_dict = {} for i in range(len(args)): argument_dict[arguments[i]] = args[i] # now fill with the kwargs. for k in kwargs.keys(): if k in argument_dict: raise TypeError("Got multiple values for keyword argument '" + k + "'") argument_dict[k] = kwargs[k] # unwrap it argument_dict['__function_name__'] = fnname ret = self._tkclass.get('call_function', argument_dict) ret = _wrap_function_return(ret) return ret def __getattr__(self, name): if name == '__proxy__': return self.__dict__['__proxy__'] elif name in self._get_properties: # is it an attribute? arguments = {'__property_name__':name} return _wrap_function_return(self._tkclass.get('get_property', arguments)) elif name in self._functions: # is it a function? ret = lambda *args, **kwargs: self.__run_class_function(name, args, kwargs) ret.__doc__ = "Name: " + name + "\nParameters: " + str(self._functions[name]) + "\n" try: ret.__doc__ += self._tkclass.get('get_docstring', {'__symbol__':name}) ret.__doc__ += '\n' except: pass return ret else: raise AttributeError("no attribute " + name) def __setattr__(self, name, value): if name == '__proxy__': self.__dict__['__proxy__'] = value elif name in self._set_properties: # is it a setable property? arguments = {'__property_name__':name, 'value':value} return _wrap_function_return(self._tkclass.get('set_property', arguments)) else: raise AttributeError("no attribute " + name) def _list_functions(): """ Lists all the functions registered in unity_server. """ unity = _gl.connect.main.get_unity() return unity.list_toolkit_functions() def _publish(): import sys import copy """ Publishes all functions and classes registered in unity_server. The functions and classes will appear in the module graphlab.extensions """ unity = _gl.connect.main.get_unity() fnlist = unity.list_toolkit_functions() # Loop through all the functions and inject it into # graphlab.extensions.[blah] # Note that [blah] may be somemodule.somefunction # and so the injection has to be # graphlab.extensions.somemodule.somefunction for fn in fnlist: props = unity.describe_toolkit_function(fn) # quit if there is nothing we can process if 'arguments' not in props: continue arguments = props['arguments'] newfunc = _make_injected_function(fn, arguments) newfunc.__doc__ = "Name: " + fn + "\nParameters: " + str(arguments) + "\n" if 'documentation' in props: newfunc.__doc__ += props['documentation'] + "\n" newfunc.__dict__['__glmeta__'] = {'extension_name':fn} modpath = fn.split('.') # walk the module tree mod = _thismodule for path in modpath[:-1]: try: getattr(mod, path) except: _setattr_wrapper(mod, path, _types.ModuleType(name=path)) mod = getattr(mod, path) _setattr_wrapper(mod, modpath[-1], newfunc) # Repeat for classes tkclasslist = unity.list_toolkit_classes() for tkclass in tkclasslist: pathpos = tkclass.split('.') m = unity.describe_toolkit_class(tkclass) # of v2 type if not ('functions' in m and 'get_properties' in m and 'set_properties' in m and 'uid' in m): continue # create a new class new_class = copy.deepcopy(_ToolkitClass.__dict__) # rewrite the init method to add the toolkit class name so it will # default construct correctly new_class['__init__'] = _types.FunctionType(new_class['__init__'].func_code, new_class['__init__'].func_globals, name='__init__', argdefs=(), closure=()) new_class['__init__'].tkclass_name = tkclass newclass = _types.ClassType(tkclass, (object,), new_class) setattr(newclass, '__glmeta__', {'extension_name':tkclass}) class_uid_to_class[m['uid']] = newclass modpath = tkclass.split('.') # walk the module tree mod = _thismodule for path in modpath[:-1]: try: getattr(mod, path) except: _setattr_wrapper(mod, path, _types.ModuleType(name=path)) mod = getattr(mod, path) _setattr_wrapper(mod, modpath[-1], newclass) class _ExtMetaPath(object): """ This is a magic metapath searcher. To understand how this works, See the PEP 302 document. Essentially this class is inserted into the sys.meta_path list. This class must implement find_module() and load_module(). After which, this class is called first when any particular module import was requested, allowing this to essentially 'override' the default import behaviors. """ def find_module(self, fullname, submodule_path=None): """ We have to see if fullname refers to a module we can import. Some care is needed here because: import xxx # tries to load xxx.so from any of the python import paths import aaa.bbb.xxx # tries to load aaa/bbb/xxx.so from any of the python import paths """ # first see if we have this particular so has been loaded by # graphlab's extension library before ret = self.try_find_module(fullname, submodule_path) if ret is not None: return ret # nope. has not been loaded before # lets try to find a ".so" or a ".dylib" if any of the python # locations import sys import os # This drops the last "." So if I am importing aaa.bbb.xxx # module_subpath is aaa.bbb module_subpath = ".".join(fullname.split('.')[:-1]) for path in sys.path: # joins the path to aaa/bbb/xxx pathname = os.path.join(path, os.sep.join(fullname.split('.'))) # try to laod the ".so" extension try: if os.path.exists(pathname + '.so'): ext_import(pathname + '.so', module_subpath) break except: pass # try to laod the ".dylib" extension try: if os.path.exists(pathname + '.dylib'): ext_import(pathname + '.dylib', module_subpath) break except: pass ret = self.try_find_module(fullname, submodule_path) if ret is not None: return ret def try_find_module(self, fullname, submodule_path=None): # check if the so has been loaded before import sys # try to find the module inside of gl.extensions # Essentially: if fullname == aaa.bbb.xxx # Then we try to see if we have loaded gl.extensions.aaa.bbb.xxx mod = _thismodule modpath = fullname.split('.') # walk the module tree mod = _thismodule for path in modpath: try: mod = getattr(mod, path) except: return None return self def load_module(self, fullname): import sys # we may have already been loaded if fullname in sys.modules: return sys.modules[fullname] # try to find the module inside of gl.extensions # Essentially: if fullname == aaa.bbb.xxx # Then we try to look for gl.extensions.aaa.bbb.xxx mod = _thismodule modpath = fullname.split('.') for path in modpath: mod = getattr(mod, path) # Inject the module into aaa.bbb.xxx mod.__loader__ = self mod.__package__ = fullname mod.__name__ = fullname sys.modules[fullname] = mod return mod _ext_meta_path_singleton = None def _add_meta_path(): """ called on unity_server import to insert the meta path loader. """ import sys global _ext_meta_path_singleton if _ext_meta_path_singleton == None: _ext_meta_path_singleton = _ExtMetaPath() sys.meta_path += [_ext_meta_path_singleton] def ext_import(soname, module_subpath=""): """ Loads a graphlab toolkit module (a shared library) into the gl.extensions namespace. Toolkit module created via SDK can either be directly imported, e.g. ``import example`` or via this function, e.g. ``graphlab.ext_import("example.so")``. Use ``ext_import`` when you need more namespace control, or when the shared library is not local, e.g. in http, s3 or hdfs. Parameters ---------- soname : string The filename of the shared library to load. This can be a URL, or a HDFS location. For instance if soname is somewhere/outthere/toolkit.so The functions in toolkit.so will appear in gl.extensions.toolkit.* module_subpath : string, optional Any additional module paths to prepend to the toolkit module after it is imported. For instance if soname is somewhere/outthere/toolkit.so, by default the functions in toolkit.so will appear in gl.extensions.toolkit.*. However, if I module_subpath="somewhere.outthere", the functions in toolkit.so will appear in gl.extensions.somewhere.outthere.toolkit.* Returns ------- out : a list of functions and classes loaded. Examples -------- For instance, given a module which implements the function "square_root", .. code-block:: c++ #include <cmath> #include <graphlab/sdk/toolkit_function_macros.hpp> double square_root(double a) { return sqrt(a); } BEGIN_FUNCTION_REGISTRATION REGISTER_FUNCTION(square_root, "a"); END_FUNCTION_REGISTRATION compiled into example.so >>> graphlab.ext_import('example1.so') ['example1.square_root'] >>> graphlab.extensions.example1.square_root(9) 3.0 We can customize the import location with module_subpath which can be used to avoid namespace conflicts when you have multiple toolkits with the same filename. >>> graphlab.ext_import('example1.so', 'math') ['math.example1.square_root'] >>> graphlab.extensions.math.example1.square_root(9) 3.0 The module can also be imported directly, but graphlab *must* be imported first. graphlab will intercept the module loading process to load the toolkit. >>> import graphlab >>> import example1 #searches for example1.so in all the python paths >>> example1.square_root(9) 3.0 """ unity = _gl.connect.main.get_unity() import os if os.path.exists(soname): soname = os.path.abspath(soname) else: soname = _make_internal_url(soname) ret = unity.load_toolkit(soname, module_subpath) if len(ret) > 0: raise RuntimeError(ret) _publish() # push the functions into the corresponding module namespace filename = os.path.basename(soname) modulename = filename.split('.')[0] return unity.list_toolkit_functions_in_dynamic_module(soname) + unity.list_toolkit_classes_in_dynamic_module(soname) def _get_toolkit_function_name_from_function(fn): """ If fn is a toolkit function either imported by graphlab.extensions.ext_import or the magic import system, we return the name of toolkit function. Otherwise we return an empty string. """ try: if '__glmeta__' in fn.__dict__: return fn.__dict__['__glmeta__']['extension_name'] else: return "" except: return "" def _get_argument_list_from_toolkit_function_name(fn): """ Given a toolkit function name, return the argument list """ unity = _gl.connect.main.get_unity() fnprops = unity.describe_toolkit_function(fn) argnames = fnprops['arguments'] return argnames class _Closure: """ Defines a closure class describing a lambda closure. Contains 2 fields: native_fn_name: The toolkit native function name arguments: An array of the same length as the toolkit native function. Each array element is an array of 2 elements [is_capture, value] If is_capture == 1: value contains the captured value If is_capture == 0: value contains a number denoting the lambda argument position. Example: lambda x, y: fn(10, x, x, y) Then arguments will be [1, 10], --> is captured value. has value 10 [0, 0], --> is not captured value. is argument 0 of the lambda. [0, 0], --> is not captured value. is argument 0 of the lambda. [0, 1] --> is not captured value. is argument 1 of the lambda. """ def __init__(self, native_fn_name, arguments): self.native_fn_name = native_fn_name self.arguments = arguments def _descend_namespace(caller_globals, name): """ Given a globals dictionary, and a name of the form "a.b.c.d", recursively walk the globals expanding caller_globals['a']['b']['c']['d'] returning the result. Raises an exception (IndexError) on failure. """ names = name.split('.') cur = caller_globals for i in names: if type(cur) is dict: cur = cur[i] else: cur = getattr(cur, i) return cur def _build_native_function_call(fn): """ If fn can be interpreted and handled as a native function: i.e. fn is one of the extensions, or fn is a simple lambda closure using one of the extensions. fn = gl.extensions.add fn = lambda x: gl.extensions.add(5) Then, this returns a closure object, which describes the function call which can then be passed to C++. Returns a _Closure object on success, raises an exception on failure. """ # See if fn is the native function itself native_function_name = _get_toolkit_function_name_from_function(fn) if native_function_name != "": # yup! # generate an "identity" argument list argnames = _get_argument_list_from_toolkit_function_name(native_function_name) arglist = [[0, i] for i in range(len(argnames))] return _Closure(native_function_name, arglist) # ok. its not a native function from graphlab_util.lambda_closure_capture import translate from graphlab_util.lambda_closure_capture import Parameter # Lets see if it is a simple lambda capture = translate(fn) # ok. build up the closure arguments # Try to pick up the lambda function = _descend_namespace(capture.caller_globals, capture.closure_fn_name) native_function_name = _get_toolkit_function_name_from_function(function) if native_function_name == "": raise RuntimeError("Lambda does not contain a native function") argnames = _get_argument_list_from_toolkit_function_name(native_function_name) # ok. build up the argument list. this is mildly annoying due to the mix of # positional and named arguments # make an argument list with a placeholder for everything first arglist = [[-1, i] for i in argnames] # loop through the positional arguments for i in range(len(capture.positional_args)): arg = capture.positional_args[i] if type(arg) is Parameter: # This is a lambda argument # arg.name is the actual string of the argument # here we need the index arglist[i] = [0, capture.input_arg_names.index(arg.name)] else: # this is a captured value arglist[i] = [1, arg] # now. the named arguments are somewhat annoying for i in capture.named_args: arg = capture.named_args[i] if type(arg) is Parameter: # This is a lambda argument # arg.name is the actual string of the argument # here we need the index arglist[argnames.index(i)] = [0, capture.input_arg_names.index(arg.name)] else: # this is a captured value arglist[argnames.index(i)] = [1, arg] # done. Make sure all arguments are filled for i in arglist: if i[0] == -1: raise RuntimeError("Incomplete function specification") # attempt to recursively break down any other functions import inspect for i in range(len(arglist)): if arglist[i][0] == 1 and inspect.isfunction(arglist[i][1]): try: arglist[i][1] = _build_native_function_call(arglist[i][1]) except: pass return _Closure(native_function_name, arglist)
ypkang/Dato-Core
src/unity/python/graphlab/extensions.py
Python
agpl-3.0
27,795
# pylint: skip-file # vim: expandtab:tabstop=4:shiftwidth=4 #pylint: disable=too-many-branches def main(): ''' ansible module for gcloud iam servicetaccount''' module = AnsibleModule( argument_spec=dict( # credentials state=dict(default='present', type='str', choices=['present', 'absent', 'list']), name=dict(default=None, type='str'), display_name=dict(default=None, type='str'), ), supports_check_mode=True, ) gcloud = GcloudIAMServiceAccount(module.params['name'], module.params['display_name']) state = module.params['state'] api_rval = gcloud.list_service_accounts() ##### # Get ##### if state == 'list': if api_rval['returncode'] != 0: module.fail_json(msg=api_rval, state="list") module.exit_json(changed=False, results=api_rval['results'], state="list") ######## # Delete ######## if state == 'absent': if gcloud.exists(): if module.check_mode: module.exit_json(changed=False, msg='Would have performed a delete.') api_rval = gcloud.delete_service_account() module.exit_json(changed=True, results=api_rval, state="absent") module.exit_json(changed=False, state="absent") if state == 'present': ######## # Create ######## if not gcloud.exists(): if module.check_mode: module.exit_json(changed=False, msg='Would have performed a create.') # Create it here api_rval = gcloud.create_service_account() if api_rval['returncode'] != 0: module.fail_json(msg=api_rval) module.exit_json(changed=True, results=api_rval, state="present") # update elif gcloud.needs_update(): if module.check_mode: module.exit_json(changed=False, msg='Would have performed an update.') api_rval = gcloud.update_service_account() if api_rval['returncode'] != 0: module.fail_json(msg=api_rval) module.exit_json(changed=True, results=api_rval, state="present|update") module.exit_json(changed=False, results=api_rval, state="present") module.exit_json(failed=True, changed=False, results='Unknown state passed. %s' % state, state="unknown") # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled # import module snippets. This are required from ansible.module_utils.basic import * main()
appuio/ansible-role-openshift-zabbix-monitoring
vendor/openshift-tools/ansible/roles/lib_gcloud/build/ansible/gcloud_iam_sa.py
Python
apache-2.0
2,675
# encoding: UTF-8 import os class Constant: conf_dir = os.path.join(os.path.expanduser('~'), '.netease-musicbox') download_dir = conf_dir + "/cached"
smileboywtu/LTCodeSerialDecoder
netease/const.py
Python
apache-2.0
163
# Copyright (C) Mesosphere, Inc. See LICENSE file for details. """MesosDNS mock endpoint""" import copy import logging import re from exceptions import EndpointException from mocker.endpoints.recording import ( RecordingHTTPRequestHandler, RecordingTcpIpEndpoint, ) # pylint: disable=C0103 log = logging.getLogger(__name__) # pylint: disable=R0903 class MesosDnsHTTPRequestHandler(RecordingHTTPRequestHandler): """Request handler that mimics MesosDNS Depending on how it was set up, it will respond with different SRV entries for preset services. """ SRV_QUERY_REGEXP = re.compile('^/v1/services/_([^_]+)._tcp.marathon.mesos$') def _calculate_response(self, base_path, url_args, body_args=None): """Reply with the currently set mock-reply for given SRV record query. Please refer to the description of the BaseHTTPRequestHandler class for details on the arguments and return value of this method. Raises: EndpointException: request URL path is unsupported """ if base_path == '/v1/reflect/me': # A test URI that is used by tests. In some cases it is impossible # to reuse SRV record path. return self._reflect_request(base_path, url_args, body_args) match = self.SRV_QUERY_REGEXP.search(base_path) if match: return self.__srv_permissions_request_handler(match.group(1)) raise EndpointException( code=500, content="Path `{}` is not supported yet".format(base_path)) def __srv_permissions_request_handler(self, srvid): """Calculate reply for given service-ID Arguments: srvid (string): service ID to reply to """ ctx = self.server.context if srvid not in ctx.data['services']: raise EndpointException( code=500, content="Service `{}` is unknown".format(srvid)) blob = self._convert_data_to_blob(ctx.data['services'][srvid]) return 200, 'application/json', blob def create_srv_entry(srv_name, ip, port): """Create a SRV entry based on the supplied data Arguments: srv_name (string): service ID that the new SRV-entry should represent port (string): TCP/IP port that the new agent should pretend to listen on ip (string): IP address that the new agent hould pretend to listen on Returns: SRV entry dict mimicing the one returned by MesosDNS """ res = {} res['service'] = "_{}._tcp.marathon.mesos".format(srv_name) res['host'] = "{}-74b1w-s1.marathon.mesos.".format(srv_name) res['ip'] = ip res['port'] = port return res EMPTY_SRV = { "scheduler-alwaysthere": [ { "service": "", "host": "", "ip": "", "port": "", } ], } SCHEDULER_SRV_ALWAYSTHERE = { "scheduler-alwaysthere": [ create_srv_entry("scheduler-alwaysthere", "127.0.0.1", 16000), create_srv_entry("scheduler-alwaysthere", "127.0.0.1", 16002), ], } SCHEDULER_SRV_ALWAYSTHERE_DIFFERENTPORT = { "scheduler-alwaysthere": [ create_srv_entry("scheduler-alwaysthere", "127.0.0.15", 16001), create_srv_entry("scheduler-alwaysthere", "127.0.0.1", 16002), ], } SCHEDULER_SRV_ALWAYSTHERE_NEST1 = { "scheduler-alwaysthere.nest1.nest2": [ create_srv_entry("scheduler-alwaysthere.nest1.nest2", "127.0.0.1", 18000), create_srv_entry("scheduler-alwaysthere.nest1.nest2", "127.0.0.1", 16002), ], } SCHEDULER_SRV_ALWAYSTHERE_NEST2 = { "scheduler-alwaysthere.nest1": [ create_srv_entry("scheduler-alwaysthere.nest1", "127.0.0.1", 17000), create_srv_entry("scheduler-alwaysthere.nest1", "127.0.0.1", 16002), ], } SCHEDULER_SRV_ONLYMESOSDNS_NEST2 = { "scheduler-onlymesosdns.nest1.nest2": [ create_srv_entry("scheduler-onlymesosdns.nest1.nest2", "127.0.0.1", 18003), create_srv_entry("scheduler-onlymesosdns.nest1.nest2", "127.0.0.1", 16002), ], } INITIAL_SRVDATA = {} INITIAL_SRVDATA.update(SCHEDULER_SRV_ALWAYSTHERE) INITIAL_SRVDATA.update(SCHEDULER_SRV_ALWAYSTHERE_NEST1) INITIAL_SRVDATA.update(SCHEDULER_SRV_ALWAYSTHERE_NEST2) INITIAL_SRVDATA.update(SCHEDULER_SRV_ONLYMESOSDNS_NEST2) # pylint: disable=R0903,C0103 class MesosDnsEndpoint(RecordingTcpIpEndpoint): """An endpoint that mimics DC/OS MesosDNS""" def __init__(self, port, ip=''): super().__init__(port, ip, MesosDnsHTTPRequestHandler) self.__context_init() def reset(self, *_): """Reset the endpoint to the default/initial state.""" with self._context.lock: super().reset() self.__context_init() def set_srv_response(self, srvs): """Change the endpoint output so that it responds with a non-default MesosDNS srv node. """ with self._context.lock: self._context.data["services"] = srvs def __context_init(self): """Helper function meant to initialize all the data relevant to this particular type of endpoint""" self._context.data["services"] = copy.deepcopy(INITIAL_SRVDATA)
asridharan/dcos
packages/adminrouter/extra/src/test-harness/modules/mocker/endpoints/mesos_dns.py
Python
apache-2.0
5,228
from os.path import dirname, join from math import ceil import numpy as np from bokeh.io import curdoc from bokeh.layouts import row, column, widgetbox from bokeh.models import ColumnDataSource, Slider, Div from bokeh.plotting import figure import audio from audio import MAX_FREQ, TIMESLICE, NUM_BINS from waterfall import WaterfallRenderer MAX_FREQ_KHZ = MAX_FREQ*0.001 NUM_GRAMS = 800 GRAM_LENGTH = 512 TILE_WIDTH = 200 EQ_CLAMP = 20 PALETTE = ['#081d58', '#253494', '#225ea8', '#1d91c0', '#41b6c4', '#7fcdbb', '#c7e9b4', '#edf8b1', '#ffffd9'] PLOTARGS = dict(tools="", toolbar_location=None, outline_line_color='#595959') filename = join(dirname(__file__), "description.html") desc = Div(text=open(filename).read(), render_as_text=False, width=1000) waterfall_renderer = WaterfallRenderer(palette=PALETTE, num_grams=NUM_GRAMS, gram_length=GRAM_LENGTH, tile_width=TILE_WIDTH) waterfall_plot = figure(plot_width=990, plot_height=300, min_border_left=80, x_range=[0, NUM_GRAMS], y_range=[0, MAX_FREQ_KHZ], **PLOTARGS) waterfall_plot.grid.grid_line_color = None waterfall_plot.background_fill_color = "#024768" waterfall_plot.renderers.append(waterfall_renderer) signal_source = ColumnDataSource(data=dict(t=[], y=[])) signal_plot = figure(plot_width=600, plot_height=200, title="Signal", x_range=[0, TIMESLICE], y_range=[-0.8, 0.8], **PLOTARGS) signal_plot.background_fill_color = "#eaeaea" signal_plot.line(x="t", y="y", line_color="#024768", source=signal_source) spectrum_source = ColumnDataSource(data=dict(f=[], y=[])) spectrum_plot = figure(plot_width=600, plot_height=200, title="Power Spectrum", y_range=[10**(-4), 10**3], x_range=[0, MAX_FREQ_KHZ], y_axis_type="log", **PLOTARGS) spectrum_plot.background_fill_color = "#eaeaea" spectrum_plot.line(x="f", y="y", line_color="#024768", source=spectrum_source) eq_angle = 2*np.pi/NUM_BINS eq_range = np.arange(EQ_CLAMP, dtype=np.float64) eq_data = dict( inner=np.tile(eq_range+2, NUM_BINS), outer=np.tile(eq_range+2.95, NUM_BINS), start=np.hstack([np.ones_like(eq_range)*eq_angle*(i+0.05) for i in range(NUM_BINS)]), end=np.hstack([np.ones_like(eq_range)*eq_angle*(i+0.95) for i in range(NUM_BINS)]), alpha=np.tile(np.zeros_like(eq_range), NUM_BINS), ) eq_source = ColumnDataSource(data=eq_data) eq = figure(plot_width=400, plot_height=400, x_axis_type=None, y_axis_type=None, x_range=[-20, 20], y_range=[-20, 20], **PLOTARGS) eq.background_fill_color = "#eaeaea" eq.annular_wedge(x=0, y=0, fill_color="#024768", fill_alpha="alpha", line_color=None, inner_radius="inner", outer_radius="outer", start_angle="start", end_angle="end", source=eq_source) freq = Slider(start=1, end=MAX_FREQ, value=MAX_FREQ, step=1, title="Frequency") gain = Slider(start=1, end=20, value=1, step=1, title="Gain") def update(): signal, spectrum, bins = audio.data['values'] # seems to be a problem with Array property, using List for now waterfall_renderer.latest = spectrum.tolist() waterfall_plot.y_range.end = freq.value*0.001 # the if-elses below are small optimization: avoid computing and sending # all the x-values, if the length has not changed if len(signal) == len(signal_source.data['y']): signal_source.data['y'] = signal*gain.value else: t = np.linspace(0, TIMESLICE, len(signal)) signal_source.data = dict(t=t, y=signal*gain.value) if len(spectrum) == len(spectrum_source.data['y']): spectrum_source.data['y'] = spectrum else: f = np.linspace(0, MAX_FREQ_KHZ, len(spectrum)) spectrum_source.data = dict(f=f, y=spectrum) spectrum_plot.x_range.end = freq.value*0.001 alphas = [] for x in bins: a = np.zeros_like(eq_range) N = int(ceil(x)) a[:N] = (1 - eq_range[:N]*0.05) alphas.append(a) eq_source.data['alpha'] = np.hstack(alphas) curdoc().add_periodic_callback(update, 80) controls = row(widgetbox(gain), widgetbox(freq)) plots = column(waterfall_plot, row(column(signal_plot, spectrum_plot), eq)) curdoc().add_root(desc) curdoc().add_root(controls) curdoc().add_root(plots)
percyfal/bokeh
examples/app/spectrogram/main.py
Python
bsd-3-clause
4,299
# # Copyright (c) 2008--2015 Red Hat, Inc. # # This software is licensed to you under the GNU General Public License, # version 2 (GPLv2). There is NO WARRANTY for this software, express or # implied, including the implied warranties of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 # along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. # # Red Hat trademarks are not licensed under GPLv2. No permission is # granted to use or replicate Red Hat trademarks that are incorporated # in this software or its documentation. # # # Product Names Import from importLib import Import class ProductNamesImport(Import): def __init__(self, batch, backend): Import.__init__(self, batch, backend) def preprocess(self): pass def fix(self): pass def submit(self): try: self.backend.processProductNames(self.batch) except: self.backend.rollback() raise self.backend.commit()
xkollar/spacewalk
backend/server/importlib/productNamesImport.py
Python
gpl-2.0
1,057
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2018, Simon Dodsley ([email protected]) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['deprecated'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: purefa_facts version_added: '2.6' deprecated: removed_in: '2.13' why: Deprecated in favor of C(_info) module. alternative: Use M(purefa_info) instead. short_description: Collect facts from Pure Storage FlashArray description: - Collect facts information from a Pure Storage Flasharray running the Purity//FA operating system. By default, the module will collect basic fact information including hosts, host groups, protection groups and volume counts. Additional fact information can be collected based on the configured set of arguments. author: - Pure Storage ansible Team (@sdodsley) <[email protected]> options: gather_subset: description: - When supplied, this argument will define the facts to be collected. Possible values for this include all, minimum, config, performance, capacity, network, subnet, interfaces, hgroups, pgroups, hosts, admins, volumes, snapshots, pods, vgroups, offload, apps and arrays. type: list required: false default: minimum extends_documentation_fragment: - purestorage.fa ''' EXAMPLES = r''' - name: collect default set of facts purefa_facts: fa_url: 10.10.10.2 api_token: e31060a7-21fc-e277-6240-25983c6c4592 - name: collect configuration and capacity facts purefa_facts: gather_subset: - config - capacity fa_url: 10.10.10.2 api_token: e31060a7-21fc-e277-6240-25983c6c4592 - name: collect all facts purefa_facts: gather_subset: - all fa_url: 10.10.10.2 api_token: e31060a7-21fc-e277-6240-25983c6c4592 ''' RETURN = r''' ansible_facts: description: Returns the facts collected from the FlashArray returned: always type: complex sample: { "capacity": {}, "config": { "directory_service": { "array_admin_group": null, "base_dn": null, "bind_password": null, "bind_user": null, "check_peer": false, "enabled": false, "group_base": null, "readonly_group": null, "storage_admin_group": null, "uri": [] }, "dns": { "domain": "domain.com", "nameservers": [ "8.8.8.8", "8.8.4.4" ] }, "ntp": [ "0.ntp.pool.org", "1.ntp.pool.org", "2.ntp.pool.org", "3.ntp.pool.org" ], "smtp": [ { "enabled": true, "name": "[email protected]" }, { "enabled": true, "name": "[email protected]" } ], "snmp": [ { "auth_passphrase": null, "auth_protocol": null, "community": null, "host": "localhost", "name": "localhost", "privacy_passphrase": null, "privacy_protocol": null, "user": null, "version": "v2c" } ], "ssl_certs": { "country": null, "email": null, "issued_by": "", "issued_to": "", "key_size": 2048, "locality": null, "organization": "Acme Storage, Inc.", "organizational_unit": "Acme Storage, Inc.", "state": null, "status": "self-signed", "valid_from": "2017-08-11T23:09:06Z", "valid_to": "2027-08-09T23:09:06Z" }, "syslog": [] }, "default": { "array_name": "flasharray1", "connected_arrays": 1, "hostgroups": 0, "hosts": 10, "pods": 3, "protection_groups": 1, "purity_version": "5.0.4", "snapshots": 1, "volume_groups": 2 }, "hgroups": {}, "hosts": { "host1": { "hgroup": null, "iqn": [ "iqn.1994-05.com.redhat:2f6f5715a533" ], "wwn": [] }, "host2": { "hgroup": null, "iqn": [ "iqn.1994-05.com.redhat:d17fb13fe0b" ], "wwn": [] }, "host3": { "hgroup": null, "iqn": [ "iqn.1994-05.com.redhat:97b1351bfb2" ], "wwn": [] }, "host4": { "hgroup": null, "iqn": [ "iqn.1994-05.com.redhat:dd84e9a7b2cb" ], "wwn": [ "10000000C96C48D1", "10000000C96C48D2" ] } }, "interfaces": { "CT0.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682", "CT0.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682", "CT1.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682", "CT1.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682" }, "network": { "ct0.eth0": { "address": "10.10.10.10", "gateway": "10.10.10.1", "hwaddr": "ec:f4:bb:c8:8a:04", "mtu": 1500, "netmask": "255.255.255.0", "services": [ "management" ], "speed": 1000000000 }, "ct0.eth2": { "address": "10.10.10.11", "gateway": null, "hwaddr": "ec:f4:bb:c8:8a:00", "mtu": 1500, "netmask": "255.255.255.0", "services": [ "replication" ], "speed": 10000000000 }, "ct0.eth3": { "address": "10.10.10.12", "gateway": null, "hwaddr": "ec:f4:bb:c8:8a:02", "mtu": 1500, "netmask": "255.255.255.0", "services": [ "replication" ], "speed": 10000000000 }, "ct0.eth4": { "address": "10.10.10.13", "gateway": null, "hwaddr": "90:e2:ba:83:79:0c", "mtu": 1500, "netmask": "255.255.255.0", "services": [ "iscsi" ], "speed": 10000000000 }, "ct0.eth5": { "address": "10.10.10.14", "gateway": null, "hwaddr": "90:e2:ba:83:79:0d", "mtu": 1500, "netmask": "255.255.255.0", "services": [ "iscsi" ], "speed": 10000000000 }, "vir0": { "address": "10.10.10.20", "gateway": "10.10.10.1", "hwaddr": "fe:ba:e9:e7:6b:0f", "mtu": 1500, "netmask": "255.255.255.0", "services": [ "management" ], "speed": 1000000000 } }, "offload": { "nfstarget": { "address": "10.0.2.53", "mount_options": null, "mount_point": "/offload", "protocol": "nfs", "status": "scanning" } }, "performance": { "input_per_sec": 8191, "output_per_sec": 0, "queue_depth": 1, "reads_per_sec": 0, "san_usec_per_write_op": 15, "usec_per_read_op": 0, "usec_per_write_op": 642, "writes_per_sec": 2 }, "pgroups": { "consisgroup-07b6b983-986e-46f5-bdc3-deaa3dbb299e-cinder": { "hgroups": null, "hosts": null, "source": "host1", "targets": null, "volumes": [ "volume-1" ] } }, "pods": { "srm-pod": { "arrays": [ { "array_id": "52595f7e-b460-4b46-8851-a5defd2ac192", "mediator_status": "online", "name": "sn1-405-c09-37", "status": "online" }, { "array_id": "a2c32301-f8a0-4382-949b-e69b552ce8ca", "mediator_status": "online", "name": "sn1-420-c11-31", "status": "online" } ], "source": null } }, "snapshots": { "consisgroup.cgsnapshot": { "created": "2018-03-28T09:34:02Z", "size": 13958643712, "source": "volume-1" } }, "subnet": {}, "vgroups": { "vvol--vSphere-HA-0ffc7dd1-vg": { "volumes": [ "vvol--vSphere-HA-0ffc7dd1-vg/Config-aad5d7c6" ] } }, "volumes": { "ansible_data": { "bandwidth": null, "hosts": [ [ "host1", 1 ] ], "serial": "43BE47C12334399B000114A6", "size": 1099511627776, "source": null } } } ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pure import get_system, purefa_argument_spec ADMIN_API_VERSION = '1.14' S3_REQUIRED_API_VERSION = '1.16' LATENCY_REQUIRED_API_VERSION = '1.16' AC_REQUIRED_API_VERSION = '1.14' CAP_REQUIRED_API_VERSION = '1.6' SAN_REQUIRED_API_VERSION = '1.10' NVME_API_VERSION = '1.16' PREFERRED_API_VERSION = '1.15' CONN_STATUS_API_VERSION = '1.17' def generate_default_dict(array): default_facts = {} defaults = array.get() api_version = array._list_available_rest_versions() if AC_REQUIRED_API_VERSION in api_version: default_facts['volume_groups'] = len(array.list_vgroups()) default_facts['connected_arrays'] = len(array.list_array_connections()) default_facts['pods'] = len(array.list_pods()) default_facts['connection_key'] = array.get(connection_key=True)['connection_key'] hosts = array.list_hosts() admins = array.list_admins() snaps = array.list_volumes(snap=True, pending=True) pgroups = array.list_pgroups(pending=True) hgroups = array.list_hgroups() # Old FA arrays only report model from the primary controller ct0_model = array.get_hardware('CT0')['model'] if ct0_model: model = ct0_model else: ct1_model = array.get_hardware('CT1')['model'] model = ct1_model default_facts['array_model'] = model default_facts['array_name'] = defaults['array_name'] default_facts['purity_version'] = defaults['version'] default_facts['hosts'] = len(hosts) default_facts['snapshots'] = len(snaps) default_facts['protection_groups'] = len(pgroups) default_facts['hostgroups'] = len(hgroups) default_facts['admins'] = len(admins) return default_facts def generate_perf_dict(array): perf_facts = {} api_version = array._list_available_rest_versions() if LATENCY_REQUIRED_API_VERSION in api_version: latency_info = array.get(action='monitor', latency=True)[0] perf_info = array.get(action='monitor')[0] # IOPS perf_facts['writes_per_sec'] = perf_info['writes_per_sec'] perf_facts['reads_per_sec'] = perf_info['reads_per_sec'] # Bandwidth perf_facts['input_per_sec'] = perf_info['input_per_sec'] perf_facts['output_per_sec'] = perf_info['output_per_sec'] # Latency if LATENCY_REQUIRED_API_VERSION in api_version: perf_facts['san_usec_per_read_op'] = latency_info['san_usec_per_read_op'] perf_facts['san_usec_per_write_op'] = latency_info['san_usec_per_write_op'] perf_facts['queue_usec_per_read_op'] = latency_info['queue_usec_per_read_op'] perf_facts['queue_usec_per_write_op'] = latency_info['queue_usec_per_write_op'] perf_facts['qos_rate_limit_usec_per_read_op'] = latency_info['qos_rate_limit_usec_per_read_op'] perf_facts['qos_rate_limit_usec_per_write_op'] = latency_info['qos_rate_limit_usec_per_write_op'] perf_facts['local_queue_usec_per_op'] = perf_info['local_queue_usec_per_op'] perf_facts['usec_per_read_op'] = perf_info['usec_per_read_op'] perf_facts['usec_per_write_op'] = perf_info['usec_per_write_op'] perf_facts['queue_depth'] = perf_info['queue_depth'] return perf_facts def generate_config_dict(array): config_facts = {} api_version = array._list_available_rest_versions() # DNS config_facts['dns'] = array.get_dns() # SMTP config_facts['smtp'] = array.list_alert_recipients() # SNMP config_facts['snmp'] = array.list_snmp_managers() config_facts['snmp_v3_engine_id'] = array.get_snmp_engine_id()['engine_id'] # DS config_facts['directory_service'] = array.get_directory_service() if S3_REQUIRED_API_VERSION in api_version: config_facts['directory_service_roles'] = {} roles = array.list_directory_service_roles() for role in range(0, len(roles)): role_name = roles[role]['name'] config_facts['directory_service_roles'][role_name] = { 'group': roles[role]['group'], 'group_base': roles[role]['group_base'], } else: config_facts['directory_service'].update(array.get_directory_service(groups=True)) # NTP config_facts['ntp'] = array.get(ntpserver=True)['ntpserver'] # SYSLOG config_facts['syslog'] = array.get(syslogserver=True)['syslogserver'] # Phonehome config_facts['phonehome'] = array.get(phonehome=True)['phonehome'] # Proxy config_facts['proxy'] = array.get(proxy=True)['proxy'] # Relay Host config_facts['relayhost'] = array.get(relayhost=True)['relayhost'] # Sender Domain config_facts['senderdomain'] = array.get(senderdomain=True)['senderdomain'] # SYSLOG config_facts['syslog'] = array.get(syslogserver=True)['syslogserver'] # Idle Timeout config_facts['idle_timeout'] = array.get(idle_timeout=True)['idle_timeout'] # SCSI Timeout config_facts['scsi_timeout'] = array.get(scsi_timeout=True)['scsi_timeout'] # SSL config_facts['ssl_certs'] = array.get_certificate() # Global Admin settings if S3_REQUIRED_API_VERSION in api_version: config_facts['global_admin'] = array.get_global_admin_attributes() return config_facts def generate_admin_dict(array): api_version = array._list_available_rest_versions() admin_facts = {} if ADMIN_API_VERSION in api_version: admins = array.list_admins() for admin in range(0, len(admins)): admin_name = admins[admin]['name'] admin_facts[admin_name] = { 'type': admins[admin]['type'], 'role': admins[admin]['role'], } return admin_facts def generate_subnet_dict(array): sub_facts = {} subnets = array.list_subnets() for sub in range(0, len(subnets)): sub_name = subnets[sub]['name'] if subnets[sub]['enabled']: sub_facts[sub_name] = { 'gateway': subnets[sub]['gateway'], 'mtu': subnets[sub]['mtu'], 'vlan': subnets[sub]['vlan'], 'prefix': subnets[sub]['prefix'], 'interfaces': subnets[sub]['interfaces'], 'services': subnets[sub]['services'], } return sub_facts def generate_network_dict(array): net_facts = {} ports = array.list_network_interfaces() for port in range(0, len(ports)): int_name = ports[port]['name'] net_facts[int_name] = { 'hwaddr': ports[port]['hwaddr'], 'mtu': ports[port]['mtu'], 'enabled': ports[port]['enabled'], 'speed': ports[port]['speed'], 'address': ports[port]['address'], 'slaves': ports[port]['slaves'], 'services': ports[port]['services'], 'gateway': ports[port]['gateway'], 'netmask': ports[port]['netmask'], } if ports[port]['subnet']: subnets = array.get_subnet(ports[port]['subnet']) if subnets['enabled']: net_facts[int_name]['subnet'] = { 'name': subnets['name'], 'prefix': subnets['prefix'], 'vlan': subnets['vlan'], } return net_facts def generate_capacity_dict(array): capacity_facts = {} api_version = array._list_available_rest_versions() if CAP_REQUIRED_API_VERSION in api_version: volumes = array.list_volumes(pending=True) capacity_facts['provisioned_space'] = sum(item['size'] for item in volumes) capacity = array.get(space=True) total_capacity = capacity[0]['capacity'] used_space = capacity[0]["total"] capacity_facts['free_space'] = total_capacity - used_space capacity_facts['total_capacity'] = total_capacity capacity_facts['data_reduction'] = capacity[0]['data_reduction'] capacity_facts['system_space'] = capacity[0]['system'] capacity_facts['volume_space'] = capacity[0]['volumes'] capacity_facts['shared_space'] = capacity[0]['shared_space'] capacity_facts['snapshot_space'] = capacity[0]['snapshots'] capacity_facts['thin_provisioning'] = capacity[0]['thin_provisioning'] capacity_facts['total_reduction'] = capacity[0]['total_reduction'] return capacity_facts def generate_snap_dict(array): snap_facts = {} snaps = array.list_volumes(snap=True) for snap in range(0, len(snaps)): snapshot = snaps[snap]['name'] snap_facts[snapshot] = { 'size': snaps[snap]['size'], 'source': snaps[snap]['source'], 'created': snaps[snap]['created'], } return snap_facts def generate_vol_dict(array): volume_facts = {} vols = array.list_volumes() for vol in range(0, len(vols)): volume = vols[vol]['name'] volume_facts[volume] = { 'source': vols[vol]['source'], 'size': vols[vol]['size'], 'serial': vols[vol]['serial'], 'hosts': [], 'bandwidth': "" } api_version = array._list_available_rest_versions() if AC_REQUIRED_API_VERSION in api_version: qvols = array.list_volumes(qos=True) for qvol in range(0, len(qvols)): volume = qvols[qvol]['name'] qos = qvols[qvol]['bandwidth_limit'] volume_facts[volume]['bandwidth'] = qos vvols = array.list_volumes(protocol_endpoint=True) for vvol in range(0, len(vvols)): volume = vvols[vvol]['name'] volume_facts[volume] = { 'source': vvols[vvol]['source'], 'serial': vvols[vvol]['serial'], 'hosts': [] } cvols = array.list_volumes(connect=True) for cvol in range(0, len(cvols)): volume = cvols[cvol]['name'] voldict = [cvols[cvol]['host'], cvols[cvol]['lun']] volume_facts[volume]['hosts'].append(voldict) return volume_facts def generate_host_dict(array): api_version = array._list_available_rest_versions() host_facts = {} hosts = array.list_hosts() for host in range(0, len(hosts)): hostname = hosts[host]['name'] tports = [] host_all_info = array.get_host(hostname, all=True) if host_all_info: tports = host_all_info[0]['target_port'] host_facts[hostname] = { 'hgroup': hosts[host]['hgroup'], 'iqn': hosts[host]['iqn'], 'wwn': hosts[host]['wwn'], 'personality': array.get_host(hostname, personality=True)['personality'], 'target_port': tports } if NVME_API_VERSION in api_version: host_facts[hostname]['nqn'] = hosts[host]['nqn'] if PREFERRED_API_VERSION in api_version: hosts = array.list_hosts(preferred_array=True) for host in range(0, len(hosts)): hostname = hosts[host]['name'] host_facts[hostname]['preferred_array'] = hosts[host]['preferred_array'] return host_facts def generate_pgroups_dict(array): pgroups_facts = {} pgroups = array.list_pgroups() for pgroup in range(0, len(pgroups)): protgroup = pgroups[pgroup]['name'] pgroups_facts[protgroup] = { 'hgroups': pgroups[pgroup]['hgroups'], 'hosts': pgroups[pgroup]['hosts'], 'source': pgroups[pgroup]['source'], 'targets': pgroups[pgroup]['targets'], 'volumes': pgroups[pgroup]['volumes'], } prot_sched = array.get_pgroup(protgroup, schedule=True) prot_reten = array.get_pgroup(protgroup, retention=True) if prot_sched['snap_enabled'] or prot_sched['replicate_enabled']: pgroups_facts[protgroup]['snap_freqyency'] = prot_sched['snap_frequency'] pgroups_facts[protgroup]['replicate_freqyency'] = prot_sched['replicate_frequency'] pgroups_facts[protgroup]['snap_enabled'] = prot_sched['snap_enabled'] pgroups_facts[protgroup]['replicate_enabled'] = prot_sched['replicate_enabled'] pgroups_facts[protgroup]['snap_at'] = prot_sched['snap_at'] pgroups_facts[protgroup]['replicate_at'] = prot_sched['replicate_at'] pgroups_facts[protgroup]['replicate_blackout'] = prot_sched['replicate_blackout'] pgroups_facts[protgroup]['per_day'] = prot_reten['per_day'] pgroups_facts[protgroup]['target_per_day'] = prot_reten['target_per_day'] pgroups_facts[protgroup]['target_days'] = prot_reten['target_days'] pgroups_facts[protgroup]['days'] = prot_reten['days'] pgroups_facts[protgroup]['all_for'] = prot_reten['all_for'] pgroups_facts[protgroup]['target_all_for'] = prot_reten['target_all_for'] if ":" in protgroup: snap_transfers = array.get_pgroup(protgroup, snap=True, transfer=True) pgroups_facts[protgroup]['snaps'] = {} for snap_transfer in range(0, len(snap_transfers)): snap = snap_transfers[snap_transfer]['name'] pgroups_facts[protgroup]['snaps'][snap] = { 'created': snap_transfers[snap_transfer]['created'], 'started': snap_transfers[snap_transfer]['started'], 'completed': snap_transfers[snap_transfer]['completed'], 'physical_bytes_written': snap_transfers[snap_transfer]['physical_bytes_written'], 'data_transferred': snap_transfers[snap_transfer]['data_transferred'], 'progress': snap_transfers[snap_transfer]['progress'], } return pgroups_facts def generate_pods_dict(array): pods_facts = {} api_version = array._list_available_rest_versions() if AC_REQUIRED_API_VERSION in api_version: pods = array.list_pods() for pod in range(0, len(pods)): acpod = pods[pod]['name'] pods_facts[acpod] = { 'source': pods[pod]['source'], 'arrays': pods[pod]['arrays'], } return pods_facts def generate_conn_array_dict(array): conn_array_facts = {} api_version = array._list_available_rest_versions() if CONN_STATUS_API_VERSION in api_version: carrays = array.list_connected_arrays() for carray in range(0, len(carrays)): arrayname = carrays[carray]['array_name'] conn_array_facts[arrayname] = { 'array_id': carrays[carray]['id'], 'throtled': carrays[carray]['throtled'], 'version': carrays[carray]['version'], 'type': carrays[carray]['type'], 'mgmt_ip': carrays[carray]['management_address'], 'repl_ip': carrays[carray]['replication_address'], } if CONN_STATUS_API_VERSION in api_version: conn_array_facts[arrayname]['status'] = carrays[carray]['status'] return conn_array_facts def generate_apps_dict(array): apps_facts = {} api_version = array._list_available_rest_versions() if SAN_REQUIRED_API_VERSION in api_version: apps = array.list_apps() for app in range(0, len(apps)): appname = apps[app]['name'] apps_facts[appname] = { 'version': apps[app]['version'], 'status': apps[app]['status'], 'description': apps[app]['description'], } return apps_facts def generate_vgroups_dict(array): vgroups_facts = {} api_version = array._list_available_rest_versions() if AC_REQUIRED_API_VERSION in api_version: vgroups = array.list_vgroups() for vgroup in range(0, len(vgroups)): virtgroup = vgroups[vgroup]['name'] vgroups_facts[virtgroup] = { 'volumes': vgroups[vgroup]['volumes'], } return vgroups_facts def generate_nfs_offload_dict(array): offload_facts = {} api_version = array._list_available_rest_versions() if AC_REQUIRED_API_VERSION in api_version: offload = array.list_nfs_offload() for target in range(0, len(offload)): offloadt = offload[target]['name'] offload_facts[offloadt] = { 'status': offload[target]['status'], 'mount_point': offload[target]['mount_point'], 'protocol': offload[target]['protocol'], 'mount_options': offload[target]['mount_options'], 'address': offload[target]['address'], } return offload_facts def generate_s3_offload_dict(array): offload_facts = {} api_version = array._list_available_rest_versions() if S3_REQUIRED_API_VERSION in api_version: offload = array.list_s3_offload() for target in range(0, len(offload)): offloadt = offload[target]['name'] offload_facts[offloadt] = { 'status': offload[target]['status'], 'bucket': offload[target]['bucket'], 'protocol': offload[target]['protocol'], 'access_key_id': offload[target]['access_key_id'], } return offload_facts def generate_hgroups_dict(array): hgroups_facts = {} hgroups = array.list_hgroups() for hgroup in range(0, len(hgroups)): hostgroup = hgroups[hgroup]['name'] hgroups_facts[hostgroup] = { 'hosts': hgroups[hgroup]['hosts'], 'pgs': [], 'vols': [], } pghgroups = array.list_hgroups(protect=True) for pghg in range(0, len(pghgroups)): pgname = pghgroups[pghg]['name'] hgroups_facts[pgname]['pgs'].append(pghgroups[pghg]['protection_group']) volhgroups = array.list_hgroups(connect=True) for pgvol in range(0, len(volhgroups)): pgname = volhgroups[pgvol]['name'] volpgdict = [volhgroups[pgvol]['vol'], volhgroups[pgvol]['lun']] hgroups_facts[pgname]['vols'].append(volpgdict) return hgroups_facts def generate_interfaces_dict(array): api_version = array._list_available_rest_versions() int_facts = {} ports = array.list_ports() for port in range(0, len(ports)): int_name = ports[port]['name'] if ports[port]['wwn']: int_facts[int_name] = ports[port]['wwn'] if ports[port]['iqn']: int_facts[int_name] = ports[port]['iqn'] if NVME_API_VERSION in api_version: if ports[port]['nqn']: int_facts[int_name] = ports[port]['nqn'] return int_facts def main(): argument_spec = purefa_argument_spec() argument_spec.update(dict( gather_subset=dict(default='minimum', type='list',) )) module = AnsibleModule(argument_spec, supports_check_mode=False) array = get_system(module) subset = [test.lower() for test in module.params['gather_subset']] valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity', 'network', 'subnet', 'interfaces', 'hgroups', 'pgroups', 'hosts', 'admins', 'volumes', 'snapshots', 'pods', 'vgroups', 'offload', 'apps', 'arrays') subset_test = (test in valid_subsets for test in subset) if not all(subset_test): module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s" % (",".join(valid_subsets), ",".join(subset))) facts = {} if 'minimum' in subset or 'all' in subset: facts['default'] = generate_default_dict(array) if 'performance' in subset or 'all' in subset: facts['performance'] = generate_perf_dict(array) if 'config' in subset or 'all' in subset: facts['config'] = generate_config_dict(array) if 'capacity' in subset or 'all' in subset: facts['capacity'] = generate_capacity_dict(array) if 'network' in subset or 'all' in subset: facts['network'] = generate_network_dict(array) if 'subnet' in subset or 'all' in subset: facts['subnet'] = generate_subnet_dict(array) if 'interfaces' in subset or 'all' in subset: facts['interfaces'] = generate_interfaces_dict(array) if 'hosts' in subset or 'all' in subset: facts['hosts'] = generate_host_dict(array) if 'volumes' in subset or 'all' in subset: facts['volumes'] = generate_vol_dict(array) if 'snapshots' in subset or 'all' in subset: facts['snapshots'] = generate_snap_dict(array) if 'hgroups' in subset or 'all' in subset: facts['hgroups'] = generate_hgroups_dict(array) if 'pgroups' in subset or 'all' in subset: facts['pgroups'] = generate_pgroups_dict(array) if 'pods' in subset or 'all' in subset: facts['pods'] = generate_pods_dict(array) if 'admins' in subset or 'all' in subset: facts['admins'] = generate_admin_dict(array) if 'vgroups' in subset or 'all' in subset: facts['vgroups'] = generate_vgroups_dict(array) if 'offload' in subset or 'all' in subset: facts['nfs_offload'] = generate_nfs_offload_dict(array) facts['s3_offload'] = generate_s3_offload_dict(array) if 'apps' in subset or 'all' in subset: facts['apps'] = generate_apps_dict(array) if 'arrays' in subset or 'all' in subset: facts['arrays'] = generate_conn_array_dict(array) module.exit_json(ansible_facts={'ansible_purefa_facts': facts}) if __name__ == '__main__': main()
kvar/ansible
lib/ansible/modules/storage/purestorage/_purefa_facts.py
Python
gpl-3.0
32,021
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2016, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: win_find version_added: "2.3" short_description: Return a list of files based on specific criteria description: - Return a list of files based on specified criteria. - Multiple criteria are AND'd together. - For non-Windows targets, use the M(find) module instead. options: age: description: - Select files or folders whose age is equal to or greater than the specified time. - Use a negative age to find files equal to or less than the specified time. - You can choose seconds, minutes, hours, days or weeks by specifying the first letter of an of those words (e.g., "2s", "10d", 1w"). type: str age_stamp: description: - Choose the file property against which we compare C(age). - The default attribute we compare with is the last modification time. type: str choices: [ atime, ctime, mtime ] default: mtime checksum_algorithm: description: - Algorithm to determine the checksum of a file. - Will throw an error if the host is unable to use specified algorithm. type: str choices: [ md5, sha1, sha256, sha384, sha512 ] default: sha1 file_type: description: Type of file to search for. type: str choices: [ directory, file ] default: file follow: description: - Set this to C(yes) to follow symlinks in the path. - This needs to be used in conjunction with C(recurse). type: bool default: no get_checksum: description: - Whether to return a checksum of the file in the return info (default sha1), use C(checksum_algorithm) to change from the default. type: bool default: yes hidden: description: Set this to include hidden files or folders. type: bool default: no paths: description: - List of paths of directories to search for files or folders in. - This can be supplied as a single path or a list of paths. type: list required: yes patterns: description: - One or more (powershell or regex) patterns to compare filenames with. - The type of pattern matching is controlled by C(use_regex) option. - The patterns retrict the list of files or folders to be returned based on the filenames. - For a file to be matched it only has to match with one pattern in a list provided. type: list recurse: description: - Will recursively descend into the directory looking for files or folders. type: bool default: no size: description: - Select files or folders whose size is equal to or greater than the specified size. - Use a negative value to find files equal to or less than the specified size. - You can specify the size with a suffix of the byte type i.e. kilo = k, mega = m... - Size is not evaluated for symbolic links. type: str use_regex: description: - Will set patterns to run as a regex check if set to C(yes). type: bool default: no author: - Jordan Borean (@jborean93) ''' EXAMPLES = r''' - name: Find files in path win_find: paths: D:\Temp - name: Find hidden files in path win_find: paths: D:\Temp hidden: yes - name: Find files in multiple paths win_find: paths: - C:\Temp - D:\Temp - name: Find files in directory while searching recursively win_find: paths: D:\Temp recurse: yes - name: Find files in directory while following symlinks win_find: paths: D:\Temp recurse: yes follow: yes - name: Find files with .log and .out extension using powershell wildcards win_find: paths: D:\Temp patterns: [ '*.log', '*.out' ] - name: Find files in path based on regex pattern win_find: paths: D:\Temp patterns: out_\d{8}-\d{6}.log - name: Find files older than 1 day win_find: paths: D:\Temp age: 86400 - name: Find files older than 1 day based on create time win_find: paths: D:\Temp age: 86400 age_stamp: ctime - name: Find files older than 1 day with unit syntax win_find: paths: D:\Temp age: 1d - name: Find files newer than 1 hour win_find: paths: D:\Temp age: -3600 - name: Find files newer than 1 hour with unit syntax win_find: paths: D:\Temp age: -1h - name: Find files larger than 1MB win_find: paths: D:\Temp size: 1048576 - name: Find files larger than 1GB with unit syntax win_find: paths: D:\Temp size: 1g - name: Find files smaller than 1MB win_find: paths: D:\Temp size: -1048576 - name: Find files smaller than 1GB with unit syntax win_find: paths: D:\Temp size: -1g - name: Find folders/symlinks in multiple paths win_find: paths: - C:\Temp - D:\Temp file_type: directory - name: Find files and return SHA256 checksum of files found win_find: paths: C:\Temp get_checksum: yes checksum_algorithm: sha256 - name: Find files and do not return the checksum win_find: paths: C:\Temp get_checksum: no ''' RETURN = r''' examined: description: The number of files/folders that was checked. returned: always type: int sample: 10 matched: description: The number of files/folders that match the criteria. returned: always type: int sample: 2 files: description: Information on the files/folders that match the criteria returned as a list of dictionary elements for each file matched. returned: success type: complex contains: attributes: description: attributes of the file at path in raw form. returned: success, path exists type: str sample: "Archive, Hidden" checksum: description: The checksum of a file based on checksum_algorithm specified. returned: success, path exists, path is a file, get_checksum == True type: str sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98 creationtime: description: The create time of the file represented in seconds since epoch. returned: success, path exists type: float sample: 1477984205.15 extension: description: The extension of the file at path. returned: success, path exists, path is a file type: str sample: ".ps1" isarchive: description: If the path is ready for archiving or not. returned: success, path exists type: bool sample: true isdir: description: If the path is a directory or not. returned: success, path exists type: bool sample: true ishidden: description: If the path is hidden or not. returned: success, path exists type: bool sample: true islnk: description: If the path is a symbolic link or junction or not. returned: success, path exists type: bool sample: true isreadonly: description: If the path is read only or not. returned: success, path exists type: bool sample: true isshared: description: If the path is shared or not. returned: success, path exists type: bool sample: true lastaccesstime: description: The last access time of the file represented in seconds since epoch. returned: success, path exists type: float sample: 1477984205.15 lastwritetime: description: The last modification time of the file represented in seconds since epoch. returned: success, path exists type: float sample: 1477984205.15 lnk_source: description: The target of the symbolic link, will return null if not a link or the link is broken. return: success, path exists, path is a symbolic link type: str sample: C:\temp owner: description: The owner of the file. returned: success, path exists type: str sample: BUILTIN\Administrators path: description: The full absolute path to the file. returned: success, path exists type: str sample: BUILTIN\Administrators sharename: description: The name of share if folder is shared. returned: success, path exists, path is a directory and isshared == True type: str sample: file-share size: description: The size in bytes of a file or folder. returned: success, path exists, path is not a link type: int sample: 1024 '''
Jorge-Rodriguez/ansible
lib/ansible/modules/windows/win_find.py
Python
gpl-3.0
9,583
# Copyright (C) 2009 Kevin Ollivier All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Helper functions for the WebKit build. import commands import glob import os import platform import re import shutil import sys import urllib import urlparse def get_output(command): """ Windows-compatible function for getting output from a command. """ if sys.platform.startswith('win'): f = os.popen(command) return f.read().strip() else: return commands.getoutput(command) def get_excludes(root, patterns): """ Get a list of exclude patterns going down several dirs. TODO: Make this fully recursive. """ excludes = [] for pattern in patterns: subdir_pattern = os.sep + '*' for subdir in [subdir_pattern, subdir_pattern*2, subdir_pattern*3]: adir = root + subdir + os.sep + pattern files = glob.glob(adir) for afile in files: excludes.append(os.path.basename(afile)) return excludes def get_dirs_for_features(root, features, dirs): """ Find which directories to include in the list of build dirs based upon the enabled port(s) and features. """ outdirs = dirs for adir in dirs: for feature in features: relpath = os.path.join(adir, feature) featuredir = os.path.join(root, relpath) if os.path.exists(featuredir) and not relpath in outdirs: outdirs.append(relpath) return outdirs def download_if_newer(url, destdir): """ Checks if the file on the server is newer than the one in the user's tree, and if so, downloads it. Returns the filename of the downloaded file if downloaded, or None if the existing file matches the one on the server. """ obj = urlparse.urlparse(url) filename = os.path.basename(obj.path) destfile = os.path.join(destdir, filename) urlobj = urllib.urlopen(url) size = long(urlobj.info().getheader('Content-Length')) def download_callback(downloaded, block_size, total_size): downloaded = block_size * downloaded if downloaded > total_size: downloaded = total_size sys.stdout.write('%s %d of %d bytes downloaded\r' % (filename, downloaded, total_size)) # NB: We don't check modified time as Python doesn't yet handle timezone conversion # properly when converting strings to time objects. if not os.path.exists(destfile) or os.path.getsize(destfile) != size: urllib.urlretrieve(url, destfile, download_callback) print '' return destfile return None def update_wx_deps(conf, wk_root, msvc_version='msvc2008'): """ Download and update tools needed to build the wx port. """ import Logs Logs.info('Ensuring wxWebKit dependencies are up-to-date.') wklibs_dir = os.path.join(wk_root, 'WebKitLibraries') waf = download_if_newer('http://wxwebkit.wxcommunity.com/downloads/deps/waf', os.path.join(wk_root, 'Tools', 'wx')) if waf: # TODO: Make the build restart itself after an update. Logs.warn('Build system updated, please restart build.') sys.exit(1) # since this module is still experimental wxpy_dir = os.path.join(wk_root, 'Source', 'WebKit', 'wx', 'bindings', 'python') swig_module = download_if_newer('http://wxwebkit.wxcommunity.com/downloads/deps/swig.py.txt', wxpy_dir) if swig_module: shutil.copy(os.path.join(wxpy_dir, 'swig.py.txt'), os.path.join(wxpy_dir, 'swig.py')) if sys.platform.startswith('win'): Logs.info('downloading deps package') archive = download_if_newer('http://wxwebkit.wxcommunity.com/downloads/deps/wxWebKitDeps-%s.zip' % msvc_version, wklibs_dir) if archive and os.path.exists(archive): os.system('unzip -o %s -d %s' % (archive, os.path.join(wklibs_dir, msvc_version))) elif sys.platform.startswith('darwin'): # export the right compiler for building the dependencies if platform.release().startswith('10'): # Snow Leopard os.environ['CC'] = conf.env['CC'][0] os.environ['CXX'] = conf.env['CXX'][0] os.system('%s/Tools/wx/install-unix-extras' % wk_root) def includeDirsForSources(sources): include_dirs = [] for group in sources: for source in group: dirname = os.path.dirname(source) if not dirname in include_dirs: include_dirs.append(dirname) return include_dirs def flattenSources(sources): flat_sources = [] for group in sources: flat_sources.extend(group) return flat_sources def git_branch_name(): try: branches = commands.getoutput("git branch --no-color") match = re.search('^\* (.*)', branches, re.MULTILINE) if match: return ".%s" % match.group(1) except: pass return "" def get_config(wk_root): config_file = os.path.join(wk_root, 'WebKitBuild', 'Configuration') config = 'Debug' if os.path.exists(config_file): config = open(config_file).read() return config def svn_revision(): if os.system("git-svn info") == 0: info = commands.getoutput("git-svn info ../..") else: info = commands.getoutput("svn info") for line in info.split("\n"): if line.startswith("Revision: "): return line.replace("Revision: ", "").strip() return ""
danialbehzadi/Nokia-RM-1013-2.0.0.11
webkit/Tools/wx/build/build_utils.py
Python
gpl-3.0
6,778
"""Tests for the Plaato integration."""
lukas-hetzenecker/home-assistant
tests/components/plaato/__init__.py
Python
apache-2.0
40
# Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Bare-Metal DB testcase for BareMetalNode """ from nova import exception from nova.tests.virt.baremetal.db import base from nova.tests.virt.baremetal.db import utils from nova.virt.baremetal import db class BareMetalNodesTestCase(base.BMDBTestCase): def _create_nodes(self): nodes = [ utils.new_bm_node(pm_address='0', service_host="host1", memory_mb=100000, cpus=100, local_gb=10000), utils.new_bm_node(pm_address='1', service_host="host2", instance_uuid='A', memory_mb=100000, cpus=100, local_gb=10000), utils.new_bm_node(pm_address='2', service_host="host2", memory_mb=1000, cpus=1, local_gb=1000), utils.new_bm_node(pm_address='3', service_host="host2", memory_mb=1000, cpus=2, local_gb=1000), utils.new_bm_node(pm_address='4', service_host="host2", memory_mb=2000, cpus=1, local_gb=1000), utils.new_bm_node(pm_address='5', service_host="host2", memory_mb=2000, cpus=2, local_gb=1000), ] self.ids = [] for n in nodes: ref = db.bm_node_create(self.context, n) self.ids.append(ref['id']) def test_get_all(self): r = db.bm_node_get_all(self.context) self.assertEquals(r, []) self._create_nodes() r = db.bm_node_get_all(self.context) self.assertEquals(len(r), 6) def test_get(self): self._create_nodes() r = db.bm_node_get(self.context, self.ids[0]) self.assertEquals(r['pm_address'], '0') r = db.bm_node_get(self.context, self.ids[1]) self.assertEquals(r['pm_address'], '1') self.assertRaises( exception.NodeNotFound, db.bm_node_get, self.context, -1) def test_get_by_service_host(self): self._create_nodes() r = db.bm_node_get_all(self.context, service_host=None) self.assertEquals(len(r), 6) r = db.bm_node_get_all(self.context, service_host="host1") self.assertEquals(len(r), 1) self.assertEquals(r[0]['pm_address'], '0') r = db.bm_node_get_all(self.context, service_host="host2") self.assertEquals(len(r), 5) pmaddrs = [x['pm_address'] for x in r] self.assertIn('1', pmaddrs) self.assertIn('2', pmaddrs) self.assertIn('3', pmaddrs) self.assertIn('4', pmaddrs) self.assertIn('5', pmaddrs) r = db.bm_node_get_all(self.context, service_host="host3") self.assertEquals(r, []) def test_get_associated(self): self._create_nodes() r = db.bm_node_get_associated(self.context, service_host=None) self.assertEquals(len(r), 1) self.assertEquals(r[0]['pm_address'], '1') r = db.bm_node_get_unassociated(self.context, service_host=None) self.assertEquals(len(r), 5) pmaddrs = [x['pm_address'] for x in r] self.assertIn('0', pmaddrs) self.assertIn('2', pmaddrs) self.assertIn('3', pmaddrs) self.assertIn('4', pmaddrs) self.assertIn('5', pmaddrs) def test_destroy(self): self._create_nodes() db.bm_node_destroy(self.context, self.ids[0]) self.assertRaises( exception.NodeNotFound, db.bm_node_get, self.context, self.ids[0]) r = db.bm_node_get_all(self.context) self.assertEquals(len(r), 5) def test_destroy_with_interfaces(self): self._create_nodes() if_a_id = db.bm_interface_create(self.context, self.ids[0], 'aa:aa:aa:aa:aa:aa', None, None) if_b_id = db.bm_interface_create(self.context, self.ids[0], 'bb:bb:bb:bb:bb:bb', None, None) if_x_id = db.bm_interface_create(self.context, self.ids[1], '11:22:33:44:55:66', None, None) db.bm_node_destroy(self.context, self.ids[0]) self.assertRaises( exception.NovaException, db.bm_interface_get, self.context, if_a_id) self.assertRaises( exception.NovaException, db.bm_interface_get, self.context, if_b_id) # Another node's interface is not affected if_x = db.bm_interface_get(self.context, if_x_id) self.assertEqual(self.ids[1], if_x['bm_node_id']) self.assertRaises( exception.NodeNotFound, db.bm_node_get, self.context, self.ids[0]) r = db.bm_node_get_all(self.context) self.assertEquals(len(r), 5) def test_find_free(self): self._create_nodes() fn = db.bm_node_find_free(self.context, 'host2') self.assertEqual(fn['pm_address'], '2') fn = db.bm_node_find_free(self.context, 'host2', memory_mb=500, cpus=2, local_gb=100) self.assertEqual(fn['pm_address'], '3') fn = db.bm_node_find_free(self.context, 'host2', memory_mb=1001, cpus=1, local_gb=1000) self.assertEqual(fn['pm_address'], '4') fn = db.bm_node_find_free(self.context, 'host2', memory_mb=2000, cpus=1, local_gb=1000) self.assertEqual(fn['pm_address'], '4') fn = db.bm_node_find_free(self.context, 'host2', memory_mb=2000, cpus=2, local_gb=1000) self.assertEqual(fn['pm_address'], '5') # check memory_mb fn = db.bm_node_find_free(self.context, 'host2', memory_mb=2001, cpus=2, local_gb=1000) self.assertTrue(fn is None) # check cpus fn = db.bm_node_find_free(self.context, 'host2', memory_mb=2000, cpus=3, local_gb=1000) self.assertTrue(fn is None) # check local_gb fn = db.bm_node_find_free(self.context, 'host2', memory_mb=2000, cpus=2, local_gb=1001) self.assertTrue(fn is None)
Brocade-OpenSource/OpenStack-DNRM-Nova
nova/tests/virt/baremetal/db/test_bm_node.py
Python
apache-2.0
6,918
import numpy as np from scipy import linalg from sklearn.decomposition import nmf from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import raises from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less random_state = np.random.mtrand.RandomState(0) @raises(ValueError) def test_initialize_nn_input(): # Test NNDSVD behaviour on negative input nmf._initialize_nmf(-np.ones((2, 2)), 2) def test_initialize_nn_output(): # Test that NNDSVD does not return negative values data = np.abs(random_state.randn(10, 10)) for var in (None, 'a', 'ar'): W, H = nmf._initialize_nmf(data, 10, random_state=0) assert_false((W < 0).any() or (H < 0).any()) def test_initialize_close(): # Test NNDSVD error # Test that _initialize_nmf error is less than the standard deviation of # the entries in the matrix. A = np.abs(random_state.randn(10, 10)) W, H = nmf._initialize_nmf(A, 10) error = linalg.norm(np.dot(W, H) - A) sdev = linalg.norm(A - A.mean()) assert_true(error <= sdev) def test_initialize_variants(): # Test NNDSVD variants correctness # Test that the variants 'a' and 'ar' differ from basic NNDSVD only where # the basic version has zeros. data = np.abs(random_state.randn(10, 10)) W0, H0 = nmf._initialize_nmf(data, 10, variant=None) Wa, Ha = nmf._initialize_nmf(data, 10, variant='a') War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0) for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)): assert_true(np.allclose(evl[ref != 0], ref[ref != 0])) @raises(ValueError) def test_projgrad_nmf_fit_nn_input(): # Test model fit behaviour on negative input A = -np.ones((2, 2)) m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0) m.fit(A) def test_projgrad_nmf_fit_nn_output(): # Test that the decomposition does not contain negative values A = np.c_[5 * np.ones(5) - np.arange(1, 6), 5 * np.ones(5) + np.arange(1, 6)] for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'): model = nmf.ProjectedGradientNMF(n_components=2, init=init, random_state=0) transf = model.fit_transform(A) assert_false((model.components_ < 0).any() or (transf < 0).any()) def test_projgrad_nmf_fit_close(): # Test that the fit is not too far away pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0) X = np.abs(random_state.randn(6, 5)) assert_less(pnmf.fit(X).reconstruction_err_, 0.05) def test_nls_nn_output(): # Test that NLS solver doesn't return negative values A = np.arange(1, 5).reshape(1, -1) Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100) assert_false((Ap < 0).any()) def test_nls_close(): # Test that the NLS results should be close A = np.arange(1, 5).reshape(1, -1) Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A), 0.001, 100) assert_true((np.abs(Ap - A) < 0.01).all()) def test_projgrad_nmf_transform(): # Test that NMF.transform returns close values # (transform uses scipy.optimize.nnls for now) A = np.abs(random_state.randn(6, 5)) m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0) transf = m.fit_transform(A) assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0)) def test_n_components_greater_n_features(): # Smoke test for the case of more components than features. A = np.abs(random_state.randn(30, 10)) nmf.ProjectedGradientNMF(n_components=15, sparseness='data', random_state=0).fit(A) def test_projgrad_nmf_sparseness(): # Test sparseness # Test that sparsity constraints actually increase sparseness in the # part where they are applied. A = np.abs(random_state.randn(10, 10)) m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A) data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data', random_state=0).fit(A).data_sparseness_ comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components', random_state=0).fit(A).comp_sparseness_ assert_greater(data_sp, m.data_sparseness_) assert_greater(comp_sp, m.comp_sparseness_) def test_sparse_input(): # Test that sparse matrices are accepted as input from scipy.sparse import csc_matrix A = np.abs(random_state.randn(10, 10)) A[:, 2 * np.arange(5)] = 0 T1 = nmf.ProjectedGradientNMF(n_components=5, init='random', random_state=999).fit_transform(A) A_sparse = csc_matrix(A) pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random', random_state=999) T2 = pg_nmf.fit_transform(A_sparse) assert_array_almost_equal(pg_nmf.reconstruction_err_, linalg.norm(A - np.dot(T2, pg_nmf.components_), 'fro')) assert_array_almost_equal(T1, T2) # same with sparseness T2 = nmf.ProjectedGradientNMF( n_components=5, init='random', sparseness='data', random_state=999).fit_transform(A_sparse) T1 = nmf.ProjectedGradientNMF( n_components=5, init='random', sparseness='data', random_state=999).fit_transform(A) def test_sparse_transform(): # Test that transform works on sparse data. Issue #2124 from scipy.sparse import csc_matrix A = np.abs(random_state.randn(5, 4)) A[A > 1.0] = 0 A = csc_matrix(A) model = nmf.NMF() A_fit_tr = model.fit_transform(A) A_tr = model.transform(A) # This solver seems pretty inconsistent assert_array_almost_equal(A_fit_tr, A_tr, decimal=2) if __name__ == '__main__': import nose nose.run(argv=['', __file__])
ningchi/scikit-learn
sklearn/decomposition/tests/test_nmf.py
Python
bsd-3-clause
6,123
""" test pretrained models """ from __future__ import print_function import mxnet as mx from common import find_mxnet, modelzoo from score import score VAL_DATA='data/val-5k-256.rec' def download_data(): return mx.test_utils.download( 'http://data.mxnet.io/data/val-5k-256.rec', VAL_DATA) def test_imagenet1k_resnet(**kwargs): models = ['imagenet1k-resnet-50', 'imagenet1k-resnet-152'] accs = [.77, .78] for (m, g) in zip(models, accs): acc = mx.metric.create('acc') (speed,) = score(model=m, data_val=VAL_DATA, rgb_mean='0,0,0', metrics=acc, **kwargs) r = acc.get()[1] print('Tested %s, acc = %f, speed = %f img/sec' % (m, r, speed)) assert r > g and r < g + .1 def test_imagenet1k_inception_bn(**kwargs): acc = mx.metric.create('acc') m = 'imagenet1k-inception-bn' g = 0.75 (speed,) = score(model=m, data_val=VAL_DATA, rgb_mean='123.68,116.779,103.939', metrics=acc, **kwargs) r = acc.get()[1] print('Tested %s acc = %f, speed = %f img/sec' % (m, r, speed)) assert r > g and r < g + .1 if __name__ == '__main__': gpus = mx.test_utils.list_gpus() assert len(gpus) > 0 batch_size = 16 * len(gpus) gpus = ','.join([str(i) for i in gpus]) kwargs = {'gpus':gpus, 'batch_size':batch_size, 'max_num_examples':500} download_data() test_imagenet1k_resnet(**kwargs) test_imagenet1k_inception_bn(**kwargs)
hotpxl/mxnet
example/image-classification/test_score.py
Python
apache-2.0
1,493
# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """nsx_gw_devices Revision ID: 19180cf98af6 Revises: 117643811bca Create Date: 2014-02-26 02:46:26.151741 """ # revision identifiers, used by Alembic. revision = '19180cf98af6' down_revision = '117643811bca' from alembic import op import sqlalchemy as sa from neutron.db import migration def upgrade(): if not migration.schema_has_table('networkgatewaydevices'): # Assume that, in the database we are migrating from, the # configured plugin did not create any nsx tables. return op.create_table( 'networkgatewaydevicereferences', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('network_gateway_id', sa.String(length=36), nullable=True), sa.Column('interface_name', sa.String(length=64), nullable=True), sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id', 'network_gateway_id', 'interface_name')) # Copy data from networkgatewaydevices into networkgatewaydevicereference op.execute("INSERT INTO networkgatewaydevicereferences SELECT " "id, network_gateway_id, interface_name FROM " "networkgatewaydevices") # drop networkgatewaydevices op.drop_table('networkgatewaydevices') op.create_table( 'networkgatewaydevices', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('nsx_id', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('connector_type', sa.String(length=10), nullable=True), sa.Column('connector_ip', sa.String(length=64), nullable=True), sa.Column('status', sa.String(length=16), nullable=True), sa.PrimaryKeyConstraint('id')) # Create a networkgatewaydevice for each existing reference. # For existing references nsx_id == neutron_id # Do not fill conenctor info as they would be unknown op.execute("INSERT INTO networkgatewaydevices (id, nsx_id, tenant_id) " "SELECT gw_dev_ref.id, gw_dev_ref.id as nsx_id, tenant_id " "FROM networkgatewaydevicereferences AS gw_dev_ref " "INNER JOIN networkgateways AS net_gw ON " "gw_dev_ref.network_gateway_id=net_gw.id")
yuewko/neutron
neutron/db/migration/alembic_migrations/versions/19180cf98af6_nsx_gw_devices.py
Python
apache-2.0
3,023
from coalib.bearlib.abstractions.Linter import linter from dependency_management.requirements.DistributionRequirement import ( DistributionRequirement) @linter(executable='chktex', output_format='regex', output_regex=r'(?P<severity>Error|Warning) \d+ in .+ line ' r'(?P<line>\d+): (?P<message>.*)') class LatexLintBear: """ Checks the code with ``chktex``. """ LANGUAGES = {'Tex'} REQUIREMENTS = {DistributionRequirement('chktex', zypper='texlive-chktex')} AUTHORS = {'The coala developers'} AUTHORS_EMAILS = {'[email protected]'} LICENSE = 'AGPL-3.0' CAN_DETECT = {'Syntax', 'Formatting'} @staticmethod def create_arguments(filename, file, config_file): return ( '--format', '%k %n in {0} line %l: %m!n'.format(filename), filename, )
IPMITMO/statan
coala-bears/bears/latex/LatexLintBear.py
Python
mit
886
from __future__ import unicode_literals, division, absolute_import from .schedule import *
ratoaq2/Flexget
flexget/ui/plugins/schedule/__init__.py
Python
mit
91
#!/usr/bin/python # # Example nfcpy to wpa_supplicant wrapper for P2P NFC operations # Copyright (c) 2012-2013, Jouni Malinen <[email protected]> # # This software may be distributed under the terms of the BSD license. # See README for more details. import os import sys import time import random import threading import argparse import nfc import nfc.ndef import nfc.llcp import nfc.handover import logging import wpaspy wpas_ctrl = '/var/run/wpa_supplicant' ifname = None init_on_touch = False in_raw_mode = False prev_tcgetattr = 0 include_wps_req = True include_p2p_req = True no_input = False srv = None continue_loop = True terminate_now = False summary_file = None success_file = None def summary(txt): print txt if summary_file: with open(summary_file, 'a') as f: f.write(txt + "\n") def success_report(txt): summary(txt) if success_file: with open(success_file, 'a') as f: f.write(txt + "\n") def wpas_connect(): ifaces = [] if os.path.isdir(wpas_ctrl): try: ifaces = [os.path.join(wpas_ctrl, i) for i in os.listdir(wpas_ctrl)] except OSError, error: print "Could not find wpa_supplicant: ", error return None if len(ifaces) < 1: print "No wpa_supplicant control interface found" return None for ctrl in ifaces: if ifname: if ifname not in ctrl: continue try: print "Trying to use control interface " + ctrl wpas = wpaspy.Ctrl(ctrl) return wpas except Exception, e: pass return None def wpas_tag_read(message): wpas = wpas_connect() if (wpas == None): return False cmd = "WPS_NFC_TAG_READ " + str(message).encode("hex") global force_freq if force_freq: cmd = cmd + " freq=" + force_freq if "FAIL" in wpas.request(cmd): return False return True def wpas_get_handover_req(): wpas = wpas_connect() if (wpas == None): return None res = wpas.request("NFC_GET_HANDOVER_REQ NDEF P2P-CR").rstrip() if "FAIL" in res: return None return res.decode("hex") def wpas_get_handover_req_wps(): wpas = wpas_connect() if (wpas == None): return None res = wpas.request("NFC_GET_HANDOVER_REQ NDEF WPS-CR").rstrip() if "FAIL" in res: return None return res.decode("hex") def wpas_get_handover_sel(tag=False): wpas = wpas_connect() if (wpas == None): return None if tag: res = wpas.request("NFC_GET_HANDOVER_SEL NDEF P2P-CR-TAG").rstrip() else: res = wpas.request("NFC_GET_HANDOVER_SEL NDEF P2P-CR").rstrip() if "FAIL" in res: return None return res.decode("hex") def wpas_get_handover_sel_wps(): wpas = wpas_connect() if (wpas == None): return None res = wpas.request("NFC_GET_HANDOVER_SEL NDEF WPS-CR"); if "FAIL" in res: return None return res.rstrip().decode("hex") def wpas_report_handover(req, sel, type): wpas = wpas_connect() if (wpas == None): return None cmd = "NFC_REPORT_HANDOVER " + type + " P2P " + str(req).encode("hex") + " " + str(sel).encode("hex") global force_freq if force_freq: cmd = cmd + " freq=" + force_freq return wpas.request(cmd) def wpas_report_handover_wsc(req, sel, type): wpas = wpas_connect() if (wpas == None): return None cmd = "NFC_REPORT_HANDOVER " + type + " WPS " + str(req).encode("hex") + " " + str(sel).encode("hex") if force_freq: cmd = cmd + " freq=" + force_freq return wpas.request(cmd) def p2p_handover_client(llc): message = nfc.ndef.HandoverRequestMessage(version="1.2") message.nonce = random.randint(0, 0xffff) global include_p2p_req if include_p2p_req: data = wpas_get_handover_req() if (data == None): summary("Could not get handover request carrier record from wpa_supplicant") return print "Handover request carrier record from wpa_supplicant: " + data.encode("hex") datamsg = nfc.ndef.Message(data) message.add_carrier(datamsg[0], "active", datamsg[1:]) global include_wps_req if include_wps_req: print "Handover request (pre-WPS):" try: print message.pretty() except Exception, e: print e data = wpas_get_handover_req_wps() if data: print "Add WPS request in addition to P2P" datamsg = nfc.ndef.Message(data) message.add_carrier(datamsg[0], "active", datamsg[1:]) print "Handover request:" try: print message.pretty() except Exception, e: print e print str(message).encode("hex") client = nfc.handover.HandoverClient(llc) try: summary("Trying to initiate NFC connection handover") client.connect() summary("Connected for handover") except nfc.llcp.ConnectRefused: summary("Handover connection refused") client.close() return except Exception, e: summary("Other exception: " + str(e)) client.close() return summary("Sending handover request") if not client.send(message): summary("Failed to send handover request") client.close() return summary("Receiving handover response") message = client._recv() if message is None: summary("No response received") client.close() return if message.type != "urn:nfc:wkt:Hs": summary("Response was not Hs - received: " + message.type) client.close() return print "Received message" try: print message.pretty() except Exception, e: print e print str(message).encode("hex") message = nfc.ndef.HandoverSelectMessage(message) summary("Handover select received") try: print message.pretty() except Exception, e: print e for carrier in message.carriers: print "Remote carrier type: " + carrier.type if carrier.type == "application/vnd.wfa.p2p": print "P2P carrier type match - send to wpa_supplicant" if "OK" in wpas_report_handover(data, carrier.record, "INIT"): success_report("P2P handover reported successfully (initiator)") else: summary("P2P handover report rejected") break print "Remove peer" client.close() print "Done with handover" global only_one if only_one: print "only_one -> stop loop" global continue_loop continue_loop = False global no_wait if no_wait: print "Trying to exit.." global terminate_now terminate_now = True class HandoverServer(nfc.handover.HandoverServer): def __init__(self, llc): super(HandoverServer, self).__init__(llc) self.sent_carrier = None self.ho_server_processing = False self.success = False # override to avoid parser error in request/response.pretty() in nfcpy # due to new WSC handover format def _process_request(self, request): summary("received handover request {}".format(request.type)) response = nfc.ndef.Message("\xd1\x02\x01Hs\x12") if not request.type == 'urn:nfc:wkt:Hr': summary("not a handover request") else: try: request = nfc.ndef.HandoverRequestMessage(request) except nfc.ndef.DecodeError as e: summary("error decoding 'Hr' message: {}".format(e)) else: response = self.process_request(request) summary("send handover response {}".format(response.type)) return response def process_request(self, request): self.ho_server_processing = True clear_raw_mode() print "HandoverServer - request received" try: print "Parsed handover request: " + request.pretty() except Exception, e: print e sel = nfc.ndef.HandoverSelectMessage(version="1.2") found = False for carrier in request.carriers: print "Remote carrier type: " + carrier.type if carrier.type == "application/vnd.wfa.p2p": print "P2P carrier type match - add P2P carrier record" found = True self.received_carrier = carrier.record print "Carrier record:" try: print carrier.record.pretty() except Exception, e: print e data = wpas_get_handover_sel() if data is None: print "Could not get handover select carrier record from wpa_supplicant" continue print "Handover select carrier record from wpa_supplicant:" print data.encode("hex") self.sent_carrier = data if "OK" in wpas_report_handover(self.received_carrier, self.sent_carrier, "RESP"): success_report("P2P handover reported successfully (responder)") else: summary("P2P handover report rejected") break message = nfc.ndef.Message(data); sel.add_carrier(message[0], "active", message[1:]) break for carrier in request.carriers: if found: break print "Remote carrier type: " + carrier.type if carrier.type == "application/vnd.wfa.wsc": print "WSC carrier type match - add WSC carrier record" found = True self.received_carrier = carrier.record print "Carrier record:" try: print carrier.record.pretty() except Exception, e: print e data = wpas_get_handover_sel_wps() if data is None: print "Could not get handover select carrier record from wpa_supplicant" continue print "Handover select carrier record from wpa_supplicant:" print data.encode("hex") self.sent_carrier = data if "OK" in wpas_report_handover_wsc(self.received_carrier, self.sent_carrier, "RESP"): success_report("WSC handover reported successfully") else: summary("WSC handover report rejected") break message = nfc.ndef.Message(data); sel.add_carrier(message[0], "active", message[1:]) found = True break print "Handover select:" try: print sel.pretty() except Exception, e: print e print str(sel).encode("hex") summary("Sending handover select") self.success = True return sel def clear_raw_mode(): import sys, tty, termios global prev_tcgetattr, in_raw_mode if not in_raw_mode: return fd = sys.stdin.fileno() termios.tcsetattr(fd, termios.TCSADRAIN, prev_tcgetattr) in_raw_mode = False def getch(): import sys, tty, termios, select global prev_tcgetattr, in_raw_mode fd = sys.stdin.fileno() prev_tcgetattr = termios.tcgetattr(fd) ch = None try: tty.setraw(fd) in_raw_mode = True [i, o, e] = select.select([fd], [], [], 0.05) if i: ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, prev_tcgetattr) in_raw_mode = False return ch def p2p_tag_read(tag): success = False if len(tag.ndef.message): for record in tag.ndef.message: print "record type " + record.type if record.type == "application/vnd.wfa.wsc": summary("WPS tag - send to wpa_supplicant") success = wpas_tag_read(tag.ndef.message) break if record.type == "application/vnd.wfa.p2p": summary("P2P tag - send to wpa_supplicant") success = wpas_tag_read(tag.ndef.message) break else: summary("Empty tag") if success: success_report("Tag read succeeded") return success def rdwr_connected_p2p_write(tag): summary("Tag found - writing - " + str(tag)) global p2p_sel_data tag.ndef.message = str(p2p_sel_data) success_report("Tag write succeeded") print "Done - remove tag" global only_one if only_one: global continue_loop continue_loop = False global p2p_sel_wait_remove return p2p_sel_wait_remove def wps_write_p2p_handover_sel(clf, wait_remove=True): print "Write P2P handover select" data = wpas_get_handover_sel(tag=True) if (data == None): summary("Could not get P2P handover select from wpa_supplicant") return global p2p_sel_wait_remove p2p_sel_wait_remove = wait_remove global p2p_sel_data p2p_sel_data = nfc.ndef.HandoverSelectMessage(version="1.2") message = nfc.ndef.Message(data); p2p_sel_data.add_carrier(message[0], "active", message[1:]) print "Handover select:" try: print p2p_sel_data.pretty() except Exception, e: print e print str(p2p_sel_data).encode("hex") print "Touch an NFC tag" clf.connect(rdwr={'on-connect': rdwr_connected_p2p_write}) def rdwr_connected(tag): global only_one, no_wait summary("Tag connected: " + str(tag)) if tag.ndef: print "NDEF tag: " + tag.type try: print tag.ndef.message.pretty() except Exception, e: print e success = p2p_tag_read(tag) if only_one and success: global continue_loop continue_loop = False else: summary("Not an NDEF tag - remove tag") return True return not no_wait def llcp_worker(llc): global init_on_touch if init_on_touch: print "Starting handover client" p2p_handover_client(llc) return global no_input if no_input: print "Wait for handover to complete" else: print "Wait for handover to complete - press 'i' to initiate ('w' for WPS only, 'p' for P2P only)" global srv global wait_connection while not wait_connection and srv.sent_carrier is None: if srv.ho_server_processing: time.sleep(0.025) elif no_input: time.sleep(0.5) else: global include_wps_req, include_p2p_req res = getch() if res == 'i': include_wps_req = True include_p2p_req = True elif res == 'p': include_wps_req = False include_p2p_req = True elif res == 'w': include_wps_req = True include_p2p_req = False else: continue clear_raw_mode() print "Starting handover client" p2p_handover_client(llc) return clear_raw_mode() print "Exiting llcp_worker thread" def llcp_startup(clf, llc): print "Start LLCP server" global srv srv = HandoverServer(llc) return llc def llcp_connected(llc): print "P2P LLCP connected" global wait_connection wait_connection = False global init_on_touch if not init_on_touch: global srv srv.start() if init_on_touch or not no_input: threading.Thread(target=llcp_worker, args=(llc,)).start() return True def terminate_loop(): global terminate_now return terminate_now def main(): clf = nfc.ContactlessFrontend() parser = argparse.ArgumentParser(description='nfcpy to wpa_supplicant integration for P2P and WPS NFC operations') parser.add_argument('-d', const=logging.DEBUG, default=logging.INFO, action='store_const', dest='loglevel', help='verbose debug output') parser.add_argument('-q', const=logging.WARNING, action='store_const', dest='loglevel', help='be quiet') parser.add_argument('--only-one', '-1', action='store_true', help='run only one operation and exit') parser.add_argument('--init-on-touch', '-I', action='store_true', help='initiate handover on touch') parser.add_argument('--no-wait', action='store_true', help='do not wait for tag to be removed before exiting') parser.add_argument('--ifname', '-i', help='network interface name') parser.add_argument('--no-wps-req', '-N', action='store_true', help='do not include WPS carrier record in request') parser.add_argument('--no-input', '-a', action='store_true', help='do not use stdout input to initiate handover') parser.add_argument('--tag-read-only', '-t', action='store_true', help='tag read only (do not allow connection handover)') parser.add_argument('--handover-only', action='store_true', help='connection handover only (do not allow tag read)') parser.add_argument('--freq', '-f', help='forced frequency of operating channel in MHz') parser.add_argument('--summary', help='summary file for writing status updates') parser.add_argument('--success', help='success file for writing success update') parser.add_argument('command', choices=['write-p2p-sel'], nargs='?') args = parser.parse_args() global only_one only_one = args.only_one global no_wait no_wait = args.no_wait global force_freq force_freq = args.freq logging.basicConfig(level=args.loglevel) global init_on_touch init_on_touch = args.init_on_touch if args.ifname: global ifname ifname = args.ifname print "Selected ifname " + ifname if args.no_wps_req: global include_wps_req include_wps_req = False if args.summary: global summary_file summary_file = args.summary if args.success: global success_file success_file = args.success if args.no_input: global no_input no_input = True clf = nfc.ContactlessFrontend() global wait_connection try: if not clf.open("usb"): print "Could not open connection with an NFC device" raise SystemExit if args.command == "write-p2p-sel": wps_write_p2p_handover_sel(clf, wait_remove=not args.no_wait) raise SystemExit global continue_loop while continue_loop: print "Waiting for a tag or peer to be touched" wait_connection = True try: if args.tag_read_only: if not clf.connect(rdwr={'on-connect': rdwr_connected}): break elif args.handover_only: if not clf.connect(llcp={'on-startup': llcp_startup, 'on-connect': llcp_connected}, terminate=terminate_loop): break else: if not clf.connect(rdwr={'on-connect': rdwr_connected}, llcp={'on-startup': llcp_startup, 'on-connect': llcp_connected}, terminate=terminate_loop): break except Exception, e: print "clf.connect failed" global srv if only_one and srv and srv.success: raise SystemExit except KeyboardInterrupt: raise SystemExit finally: clf.close() raise SystemExit if __name__ == '__main__': main()
HazyTeam/platform_external_wpa_supplicant_8
wpa_supplicant/examples/p2p-nfc.py
Python
gpl-2.0
20,200
# Author: Mr_Orange <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of SickRage. # # SickRage is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SickRage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickRage. If not, see <http://www.gnu.org/licenses/>. import sickbeard from .generic import GenericClient from requests.auth import HTTPDigestAuth class qbittorrentAPI(GenericClient): def __init__(self, host=None, username=None, password=None): super(qbittorrentAPI, self).__init__('qbittorrent', host, username, password) self.url = self.host self.session.auth = HTTPDigestAuth(self.username, self.password); def _get_auth(self): try: self.response = self.session.get(self.host, verify=False) self.auth = self.response.content except: return None return self.auth if not self.response.status_code == 404 else None def _add_torrent_uri(self, result): self.url = self.host+'command/download' data = {'urls': result.url} return self._request(method='post', data=data) def _add_torrent_file(self, result): self.url = self.host+'command/upload' files = {'torrents': (result.name + '.torrent', result.content)} return self._request(method='post', files=files) def _set_torrent_priority(self, result): self.url = self.host+'command/decreasePrio ' if result.priority == 1: self.url = self.host+'command/increasePrio' data = {'hashes': result.hash} return self._request(method='post', data=data) def _set_torrent_pause(self, result): self.url = self.host+'command/resume' if sickbeard.TORRENT_PAUSED: self.url = self.host+'command/pause' data = {'hash': result.hash} return self._request(method='post', data=data) api = qbittorrentAPI()
guijomatos/SickRage
sickbeard/clients/qbittorrent_client.py
Python
gpl-3.0
2,391
# # Created by: Pearu Peterson, September 2002 # import sys import subprocess import time from functools import reduce from numpy.testing import (assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal) import pytest from pytest import raises as assert_raises import numpy as np from numpy import (eye, ones, zeros, zeros_like, triu, tril, tril_indices, triu_indices) from numpy.random import rand, randint, seed from scipy.linalg import (_flapack as flapack, lapack, inv, svd, cholesky, solve, ldl, norm, block_diag, qr, eigh) from scipy.linalg.lapack import _compute_lwork from scipy.stats import ortho_group, unitary_group import scipy.sparse as sps try: from scipy.linalg import _clapack as clapack except ImportError: clapack = None from scipy.linalg.lapack import get_lapack_funcs from scipy.linalg.blas import get_blas_funcs REAL_DTYPES = [np.float32, np.float64] COMPLEX_DTYPES = [np.complex64, np.complex128] DTYPES = REAL_DTYPES + COMPLEX_DTYPES def generate_random_dtype_array(shape, dtype): # generates a random matrix of desired data type of shape if dtype in COMPLEX_DTYPES: return (np.random.rand(*shape) + np.random.rand(*shape)*1.0j).astype(dtype) return np.random.rand(*shape).astype(dtype) def test_lapack_documented(): """Test that all entries are in the doc.""" if lapack.__doc__ is None: # just in case there is a python -OO pytest.skip('lapack.__doc__ is None') names = set(lapack.__doc__.split()) ignore_list = set([ 'absolute_import', 'clapack', 'division', 'find_best_lapack_type', 'flapack', 'print_function', 'HAS_ILP64', ]) missing = list() for name in dir(lapack): if (not name.startswith('_') and name not in ignore_list and name not in names): missing.append(name) assert missing == [], 'Name(s) missing from lapack.__doc__ or ignore_list' class TestFlapackSimple: def test_gebal(self): a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] a1 = [[1, 0, 0, 3e-4], [4, 0, 0, 2e-3], [7, 1, 0, 0], [0, 1, 0, 0]] for p in 'sdzc': f = getattr(flapack, p+'gebal', None) if f is None: continue ba, lo, hi, pivscale, info = f(a) assert_(not info, repr(info)) assert_array_almost_equal(ba, a) assert_equal((lo, hi), (0, len(a[0])-1)) assert_array_almost_equal(pivscale, np.ones(len(a))) ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1) assert_(not info, repr(info)) # print(a1) # print(ba, lo, hi, pivscale) def test_gehrd(self): a = [[-149, -50, -154], [537, 180, 546], [-27, -9, -25]] for p in 'd': f = getattr(flapack, p+'gehrd', None) if f is None: continue ht, tau, info = f(a) assert_(not info, repr(info)) def test_trsyl(self): a = np.array([[1, 2], [0, 4]]) b = np.array([[5, 6], [0, 8]]) c = np.array([[9, 10], [11, 12]]) trans = 'T' # Test single and double implementations, including most # of the options for dtype in 'fdFD': a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype) trsyl, = get_lapack_funcs(('trsyl',), (a1,)) if dtype.isupper(): # is complex dtype a1[0] += 1j trans = 'C' x, scale, info = trsyl(a1, b1, c1) assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1), scale * c1) x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans) assert_array_almost_equal( np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T), scale * c1, decimal=4) x, scale, info = trsyl(a1, b1, c1, isgn=-1) assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1), scale * c1, decimal=4) def test_lange(self): a = np.array([ [-149, -50, -154], [537, 180, 546], [-27, -9, -25]]) for dtype in 'fdFD': for norm_str in 'Mm1OoIiFfEe': a1 = a.astype(dtype) if dtype.isupper(): # is complex dtype a1[0, 0] += 1j lange, = get_lapack_funcs(('lange',), (a1,)) value = lange(norm_str, a1) if norm_str in 'FfEe': if dtype in 'Ff': decimal = 3 else: decimal = 7 ref = np.sqrt(np.sum(np.square(np.abs(a1)))) assert_almost_equal(value, ref, decimal) else: if norm_str in 'Mm': ref = np.max(np.abs(a1)) elif norm_str in '1Oo': ref = np.max(np.sum(np.abs(a1), axis=0)) elif norm_str in 'Ii': ref = np.max(np.sum(np.abs(a1), axis=1)) assert_equal(value, ref) class TestLapack: def test_flapack(self): if hasattr(flapack, 'empty_module'): # flapack module is empty pass def test_clapack(self): if hasattr(clapack, 'empty_module'): # clapack module is empty pass class TestLeastSquaresSolvers: def test_gels(self): seed(1234) # Test fat/tall matrix argument handling - gh-issue #8329 for ind, dtype in enumerate(DTYPES): m = 10 n = 20 nrhs = 1 a1 = rand(m, n).astype(dtype) b1 = rand(n).astype(dtype) gls, glslw = get_lapack_funcs(('gels', 'gels_lwork'), dtype=dtype) # Request of sizes lwork = _compute_lwork(glslw, m, n, nrhs) _, _, info = gls(a1, b1, lwork=lwork) assert_(info >= 0) _, _, info = gls(a1, b1, trans='TTCC'[ind], lwork=lwork) assert_(info >= 0) for dtype in REAL_DTYPES: a1 = np.array([[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]], dtype=dtype) b1 = np.array([16.0, 17.0, 20.0], dtype=dtype) gels, gels_lwork, geqrf = get_lapack_funcs( ('gels', 'gels_lwork', 'geqrf'), (a1, b1)) m, n = a1.shape if len(b1.shape) == 2: nrhs = b1.shape[1] else: nrhs = 1 # Request of sizes lwork = _compute_lwork(gels_lwork, m, n, nrhs) lqr, x, info = gels(a1, b1, lwork=lwork) assert_allclose(x[:-1], np.array([-14.333333333333323, 14.999999999999991], dtype=dtype), rtol=25*np.finfo(dtype).eps) lqr_truth, _, _, _ = geqrf(a1) assert_array_equal(lqr, lqr_truth) for dtype in COMPLEX_DTYPES: a1 = np.array([[1.0+4.0j, 2.0], [4.0+0.5j, 5.0-3.0j], [7.0-2.0j, 8.0+0.7j]], dtype=dtype) b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype) gels, gels_lwork, geqrf = get_lapack_funcs( ('gels', 'gels_lwork', 'geqrf'), (a1, b1)) m, n = a1.shape if len(b1.shape) == 2: nrhs = b1.shape[1] else: nrhs = 1 # Request of sizes lwork = _compute_lwork(gels_lwork, m, n, nrhs) lqr, x, info = gels(a1, b1, lwork=lwork) assert_allclose(x[:-1], np.array([1.161753632288328-1.901075709391912j, 1.735882340522193+1.521240901196909j], dtype=dtype), rtol=25*np.finfo(dtype).eps) lqr_truth, _, _, _ = geqrf(a1) assert_array_equal(lqr, lqr_truth) def test_gelsd(self): for dtype in REAL_DTYPES: a1 = np.array([[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]], dtype=dtype) b1 = np.array([16.0, 17.0, 20.0], dtype=dtype) gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'), (a1, b1)) m, n = a1.shape if len(b1.shape) == 2: nrhs = b1.shape[1] else: nrhs = 1 # Request of sizes work, iwork, info = gelsd_lwork(m, n, nrhs, -1) lwork = int(np.real(work)) iwork_size = iwork x, s, rank, info = gelsd(a1, b1, lwork, iwork_size, -1, False, False) assert_allclose(x[:-1], np.array([-14.333333333333323, 14.999999999999991], dtype=dtype), rtol=25*np.finfo(dtype).eps) assert_allclose(s, np.array([12.596017180511966, 0.583396253199685], dtype=dtype), rtol=25*np.finfo(dtype).eps) for dtype in COMPLEX_DTYPES: a1 = np.array([[1.0+4.0j, 2.0], [4.0+0.5j, 5.0-3.0j], [7.0-2.0j, 8.0+0.7j]], dtype=dtype) b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype) gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'), (a1, b1)) m, n = a1.shape if len(b1.shape) == 2: nrhs = b1.shape[1] else: nrhs = 1 # Request of sizes work, rwork, iwork, info = gelsd_lwork(m, n, nrhs, -1) lwork = int(np.real(work)) rwork_size = int(rwork) iwork_size = iwork x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size, -1, False, False) assert_allclose(x[:-1], np.array([1.161753632288328-1.901075709391912j, 1.735882340522193+1.521240901196909j], dtype=dtype), rtol=25*np.finfo(dtype).eps) assert_allclose(s, np.array([13.035514762572043, 4.337666985231382], dtype=dtype), rtol=25*np.finfo(dtype).eps) def test_gelss(self): for dtype in REAL_DTYPES: a1 = np.array([[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]], dtype=dtype) b1 = np.array([16.0, 17.0, 20.0], dtype=dtype) gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'), (a1, b1)) m, n = a1.shape if len(b1.shape) == 2: nrhs = b1.shape[1] else: nrhs = 1 # Request of sizes work, info = gelss_lwork(m, n, nrhs, -1) lwork = int(np.real(work)) v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False) assert_allclose(x[:-1], np.array([-14.333333333333323, 14.999999999999991], dtype=dtype), rtol=25*np.finfo(dtype).eps) assert_allclose(s, np.array([12.596017180511966, 0.583396253199685], dtype=dtype), rtol=25*np.finfo(dtype).eps) for dtype in COMPLEX_DTYPES: a1 = np.array([[1.0+4.0j, 2.0], [4.0+0.5j, 5.0-3.0j], [7.0-2.0j, 8.0+0.7j]], dtype=dtype) b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype) gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'), (a1, b1)) m, n = a1.shape if len(b1.shape) == 2: nrhs = b1.shape[1] else: nrhs = 1 # Request of sizes work, info = gelss_lwork(m, n, nrhs, -1) lwork = int(np.real(work)) v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False) assert_allclose(x[:-1], np.array([1.161753632288328-1.901075709391912j, 1.735882340522193+1.521240901196909j], dtype=dtype), rtol=25*np.finfo(dtype).eps) assert_allclose(s, np.array([13.035514762572043, 4.337666985231382], dtype=dtype), rtol=25*np.finfo(dtype).eps) def test_gelsy(self): for dtype in REAL_DTYPES: a1 = np.array([[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]], dtype=dtype) b1 = np.array([16.0, 17.0, 20.0], dtype=dtype) gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'), (a1, b1)) m, n = a1.shape if len(b1.shape) == 2: nrhs = b1.shape[1] else: nrhs = 1 # Request of sizes work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps) lwork = int(np.real(work)) jptv = np.zeros((a1.shape[1], 1), dtype=np.int32) v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps, lwork, False, False) assert_allclose(x[:-1], np.array([-14.333333333333323, 14.999999999999991], dtype=dtype), rtol=25*np.finfo(dtype).eps) for dtype in COMPLEX_DTYPES: a1 = np.array([[1.0+4.0j, 2.0], [4.0+0.5j, 5.0-3.0j], [7.0-2.0j, 8.0+0.7j]], dtype=dtype) b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype) gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'), (a1, b1)) m, n = a1.shape if len(b1.shape) == 2: nrhs = b1.shape[1] else: nrhs = 1 # Request of sizes work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps) lwork = int(np.real(work)) jptv = np.zeros((a1.shape[1], 1), dtype=np.int32) v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps, lwork, False, False) assert_allclose(x[:-1], np.array([1.161753632288328-1.901075709391912j, 1.735882340522193+1.521240901196909j], dtype=dtype), rtol=25*np.finfo(dtype).eps) @pytest.mark.parametrize('dtype', DTYPES) @pytest.mark.parametrize('shape', [(3, 4), (5, 2), (2**18, 2**18)]) def test_geqrf_lwork(dtype, shape): geqrf_lwork = get_lapack_funcs(('geqrf_lwork'), dtype=dtype) m, n = shape lwork, info = geqrf_lwork(m=m, n=n) assert_equal(info, 0) class TestRegression: def test_ticket_1645(self): # Check that RQ routines have correct lwork for dtype in DTYPES: a = np.zeros((300, 2), dtype=dtype) gerqf, = get_lapack_funcs(['gerqf'], [a]) assert_raises(Exception, gerqf, a, lwork=2) rq, tau, work, info = gerqf(a) if dtype in REAL_DTYPES: orgrq, = get_lapack_funcs(['orgrq'], [a]) assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1) orgrq(rq[-2:], tau, lwork=2) elif dtype in COMPLEX_DTYPES: ungrq, = get_lapack_funcs(['ungrq'], [a]) assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1) ungrq(rq[-2:], tau, lwork=2) class TestDpotr: def test_gh_2691(self): # 'lower' argument of dportf/dpotri for lower in [True, False]: for clean in [True, False]: np.random.seed(42) x = np.random.normal(size=(3, 3)) a = x.dot(x.T) dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, )) c, info = dpotrf(a, lower, clean=clean) dpt = dpotri(c, lower)[0] if lower: assert_allclose(np.tril(dpt), np.tril(inv(a))) else: assert_allclose(np.triu(dpt), np.triu(inv(a))) class TestDlasd4: def test_sing_val_update(self): sigmas = np.array([4., 3., 2., 0]) m_vec = np.array([3.12, 5.7, -4.8, -2.2]) M = np.hstack((np.vstack((np.diag(sigmas[0:-1]), np.zeros((1, len(m_vec) - 1)))), m_vec[:, np.newaxis])) SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False, check_finite=False) it_len = len(sigmas) sgm = np.concatenate((sigmas[::-1], [sigmas[0] + it_len*norm(m_vec)])) mvc = np.concatenate((m_vec[::-1], (0,))) lasd4 = get_lapack_funcs('lasd4', (sigmas,)) roots = [] for i in range(0, it_len): res = lasd4(i, sgm, mvc) roots.append(res[1]) assert_((res[3] <= 0), "LAPACK root finding dlasd4 failed to find \ the singular value %i" % i) roots = np.array(roots)[::-1] assert_((not np.any(np.isnan(roots)), "There are NaN roots")) assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps, rtol=100*np.finfo(np.float64).eps) class TestTbtrs: @pytest.mark.parametrize('dtype', DTYPES) def test_nag_example_f07vef_f07vsf(self, dtype): """Test real (f07vef) and complex (f07vsf) examples from NAG Examples available from: * https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vef.html * https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vsf.html """ if dtype in REAL_DTYPES: ab = np.array([[-4.16, 4.78, 6.32, 0.16], [-2.25, 5.86, -4.82, 0]], dtype=dtype) b = np.array([[-16.64, -4.16], [-13.78, -16.59], [13.10, -4.94], [-14.14, -9.96]], dtype=dtype) x_out = np.array([[4, 1], [-1, -3], [3, 2], [2, -2]], dtype=dtype) elif dtype in COMPLEX_DTYPES: ab = np.array([[-1.94+4.43j, 4.12-4.27j, 0.43-2.66j, 0.44+0.1j], [-3.39+3.44j, -1.84+5.52j, 1.74 - 0.04j, 0], [1.62+3.68j, -2.77-1.93j, 0, 0]], dtype=dtype) b = np.array([[-8.86 - 3.88j, -24.09 - 5.27j], [-15.57 - 23.41j, -57.97 + 8.14j], [-7.63 + 22.78j, 19.09 - 29.51j], [-14.74 - 2.40j, 19.17 + 21.33j]], dtype=dtype) x_out = np.array([[2j, 1 + 5j], [1 - 3j, -7 - 2j], [-4.001887 - 4.988417j, 3.026830 + 4.003182j], [1.996158 - 1.045105j, -6.103357 - 8.986653j]], dtype=dtype) else: raise ValueError(f"Datatype {dtype} not understood.") tbtrs = get_lapack_funcs(('tbtrs'), dtype=dtype) x, info = tbtrs(ab=ab, b=b, uplo='L') assert_equal(info, 0) assert_allclose(x, x_out, rtol=0, atol=1e-5) @pytest.mark.parametrize('dtype,trans', [(dtype, trans) for dtype in DTYPES for trans in ['N', 'T', 'C'] if not (trans == 'C' and dtype in REAL_DTYPES)]) @pytest.mark.parametrize('uplo', ['U', 'L']) @pytest.mark.parametrize('diag', ['N', 'U']) def test_random_matrices(self, dtype, trans, uplo, diag): seed(1724) # n, nrhs, kd are used to specify A and b. # A is of shape n x n with kd super/sub-diagonals # b is of shape n x nrhs matrix n, nrhs, kd = 4, 3, 2 tbtrs = get_lapack_funcs('tbtrs', dtype=dtype) is_upper = (uplo == 'U') ku = kd * is_upper kl = kd - ku # Construct the diagonal and kd super/sub diagonals of A with # the corresponding offsets. band_offsets = range(ku, -kl - 1, -1) band_widths = [n - abs(x) for x in band_offsets] bands = [generate_random_dtype_array((width,), dtype) for width in band_widths] if diag == 'U': # A must be unit triangular bands[ku] = np.ones(n, dtype=dtype) # Construct the diagonal banded matrix A from the bands and offsets. a = sps.diags(bands, band_offsets, format='dia') # Convert A into banded storage form ab = np.zeros((kd + 1, n), dtype) for row, k in enumerate(band_offsets): ab[row, max(k, 0):min(n+k, n)] = a.diagonal(k) # The RHS values. b = generate_random_dtype_array((n, nrhs), dtype) x, info = tbtrs(ab=ab, b=b, uplo=uplo, trans=trans, diag=diag) assert_equal(info, 0) if trans == 'N': assert_allclose(a @ x, b, rtol=5e-5) elif trans == 'T': assert_allclose(a.T @ x, b, rtol=5e-5) elif trans == 'C': assert_allclose(a.H @ x, b, rtol=5e-5) else: raise ValueError('Invalid trans argument') @pytest.mark.parametrize('uplo,trans,diag', [['U', 'N', 'Invalid'], ['U', 'Invalid', 'N'], ['Invalid', 'N', 'N']]) def test_invalid_argument_raises_exception(self, uplo, trans, diag): """Test if invalid values of uplo, trans and diag raise exceptions""" # Argument checks occur independently of used datatype. # This mean we must not parameterize all available datatypes. tbtrs = get_lapack_funcs('tbtrs', dtype=np.float64) ab = rand(4, 2) b = rand(2, 4) assert_raises(Exception, tbtrs, ab, b, uplo, trans, diag) def test_zero_element_in_diagonal(self): """Test if a matrix with a zero diagonal element is singular If the i-th diagonal of A is zero, ?tbtrs should return `i` in `info` indicating the provided matrix is singular. Note that ?tbtrs requires the matrix A to be stored in banded form. In this form the diagonal corresponds to the last row.""" ab = np.ones((3, 4), dtype=float) b = np.ones(4, dtype=float) tbtrs = get_lapack_funcs('tbtrs', dtype=float) ab[-1, 3] = 0 _, info = tbtrs(ab=ab, b=b, uplo='U') assert_equal(info, 4) @pytest.mark.parametrize('ldab,n,ldb,nrhs', [ (5, 5, 0, 5), (5, 5, 3, 5) ]) def test_invalid_matrix_shapes(self, ldab, n, ldb, nrhs): """Test ?tbtrs fails correctly if shapes are invalid.""" ab = np.ones((ldab, n), dtype=float) b = np.ones((ldb, nrhs), dtype=float) tbtrs = get_lapack_funcs('tbtrs', dtype=float) assert_raises(Exception, tbtrs, ab, b) def test_lartg(): for dtype in 'fdFD': lartg = get_lapack_funcs('lartg', dtype=dtype) f = np.array(3, dtype) g = np.array(4, dtype) if np.iscomplexobj(g): g *= 1j cs, sn, r = lartg(f, g) assert_allclose(cs, 3.0/5.0) assert_allclose(r, 5.0) if np.iscomplexobj(g): assert_allclose(sn, -4.0j/5.0) assert_(type(r) == complex) assert_(type(cs) == float) else: assert_allclose(sn, 4.0/5.0) def test_rot(): # srot, drot from blas and crot and zrot from lapack. for dtype in 'fdFD': c = 0.6 s = 0.8 u = np.full(4, 3, dtype) v = np.full(4, 4, dtype) atol = 10**-(np.finfo(dtype).precision-1) if dtype in 'fd': rot = get_blas_funcs('rot', dtype=dtype) f = 4 else: rot = get_lapack_funcs('rot', dtype=dtype) s *= -1j v *= 1j f = 4j assert_allclose(rot(u, v, c, s), [[5, 5, 5, 5], [0, 0, 0, 0]], atol=atol) assert_allclose(rot(u, v, c, s, n=2), [[5, 5, 3, 3], [0, 0, f, f]], atol=atol) assert_allclose(rot(u, v, c, s, offx=2, offy=2), [[3, 3, 5, 5], [f, f, 0, 0]], atol=atol) assert_allclose(rot(u, v, c, s, incx=2, offy=2, n=2), [[5, 3, 5, 3], [f, f, 0, 0]], atol=atol) assert_allclose(rot(u, v, c, s, offx=2, incy=2, n=2), [[3, 3, 5, 5], [0, f, 0, f]], atol=atol) assert_allclose(rot(u, v, c, s, offx=2, incx=2, offy=2, incy=2, n=1), [[3, 3, 5, 3], [f, f, 0, f]], atol=atol) assert_allclose(rot(u, v, c, s, incx=-2, incy=-2, n=2), [[5, 3, 5, 3], [0, f, 0, f]], atol=atol) a, b = rot(u, v, c, s, overwrite_x=1, overwrite_y=1) assert_(a is u) assert_(b is v) assert_allclose(a, [5, 5, 5, 5], atol=atol) assert_allclose(b, [0, 0, 0, 0], atol=atol) def test_larfg_larf(): np.random.seed(1234) a0 = np.random.random((4, 4)) a0 = a0.T.dot(a0) a0j = np.random.random((4, 4)) + 1j*np.random.random((4, 4)) a0j = a0j.T.conj().dot(a0j) # our test here will be to do one step of reducing a hermetian matrix to # tridiagonal form using householder transforms. for dtype in 'fdFD': larfg, larf = get_lapack_funcs(['larfg', 'larf'], dtype=dtype) if dtype in 'FD': a = a0j.copy() else: a = a0.copy() # generate a householder transform to clear a[2:,0] alpha, x, tau = larfg(a.shape[0]-1, a[1, 0], a[2:, 0]) # create expected output expected = np.zeros_like(a[:, 0]) expected[0] = a[0, 0] expected[1] = alpha # assemble householder vector v = np.zeros_like(a[1:, 0]) v[0] = 1.0 v[1:] = x # apply transform from the left a[1:, :] = larf(v, tau.conjugate(), a[1:, :], np.zeros(a.shape[1])) # apply transform from the right a[:, 1:] = larf(v, tau, a[:, 1:], np.zeros(a.shape[0]), side='R') assert_allclose(a[:, 0], expected, atol=1e-5) assert_allclose(a[0, :], expected, atol=1e-5) @pytest.mark.xslow def test_sgesdd_lwork_bug_workaround(): # Test that SGESDD lwork is sufficiently large for LAPACK. # # This checks that workaround around an apparent LAPACK bug # actually works. cf. gh-5401 # # xslow: requires 1GB+ of memory p = subprocess.Popen([sys.executable, '-c', 'import numpy as np; ' 'from scipy.linalg import svd; ' 'a = np.zeros([9537, 9537], dtype=np.float32); ' 'svd(a)'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # Check if it an error occurred within 5 sec; the computation can # take substantially longer, and we will not wait for it to finish for j in range(50): time.sleep(0.1) if p.poll() is not None: returncode = p.returncode break else: # Didn't exit in time -- probably entered computation. The # error is raised before entering computation, so things are # probably OK. returncode = 0 p.terminate() assert_equal(returncode, 0, "Code apparently failed: " + p.stdout.read().decode()) class TestSytrd: @pytest.mark.parametrize('dtype', REAL_DTYPES) def test_sytrd_with_zero_dim_array(self, dtype): # Assert that a 0x0 matrix raises an error A = np.zeros((0, 0), dtype=dtype) sytrd = get_lapack_funcs('sytrd', (A,)) assert_raises(ValueError, sytrd, A) @pytest.mark.parametrize('dtype', REAL_DTYPES) @pytest.mark.parametrize('n', (1, 3)) def test_sytrd(self, dtype, n): A = np.zeros((n, n), dtype=dtype) sytrd, sytrd_lwork = \ get_lapack_funcs(('sytrd', 'sytrd_lwork'), (A,)) # some upper triangular array A[np.triu_indices_from(A)] = \ np.arange(1, n*(n+1)//2+1, dtype=dtype) # query lwork lwork, info = sytrd_lwork(n) assert_equal(info, 0) # check lower=1 behavior (shouldn't do much since the matrix is # upper triangular) data, d, e, tau, info = sytrd(A, lower=1, lwork=lwork) assert_equal(info, 0) assert_allclose(data, A, atol=5*np.finfo(dtype).eps, rtol=1.0) assert_allclose(d, np.diag(A)) assert_allclose(e, 0.0) assert_allclose(tau, 0.0) # and now for the proper test (lower=0 is the default) data, d, e, tau, info = sytrd(A, lwork=lwork) assert_equal(info, 0) # assert Q^T*A*Q = tridiag(e, d, e) # build tridiagonal matrix T = np.zeros_like(A, dtype=dtype) k = np.arange(A.shape[0]) T[k, k] = d k2 = np.arange(A.shape[0]-1) T[k2+1, k2] = e T[k2, k2+1] = e # build Q Q = np.eye(n, n, dtype=dtype) for i in range(n-1): v = np.zeros(n, dtype=dtype) v[:i] = data[:i, i+1] v[i] = 1.0 H = np.eye(n, n, dtype=dtype) - tau[i] * np.outer(v, v) Q = np.dot(H, Q) # Make matrix fully symmetric i_lower = np.tril_indices(n, -1) A[i_lower] = A.T[i_lower] QTAQ = np.dot(Q.T, np.dot(A, Q)) # disable rtol here since some values in QTAQ and T are very close # to 0. assert_allclose(QTAQ, T, atol=5*np.finfo(dtype).eps, rtol=1.0) class TestHetrd: @pytest.mark.parametrize('complex_dtype', COMPLEX_DTYPES) def test_hetrd_with_zero_dim_array(self, complex_dtype): # Assert that a 0x0 matrix raises an error A = np.zeros((0, 0), dtype=complex_dtype) hetrd = get_lapack_funcs('hetrd', (A,)) assert_raises(ValueError, hetrd, A) @pytest.mark.parametrize('real_dtype,complex_dtype', zip(REAL_DTYPES, COMPLEX_DTYPES)) @pytest.mark.parametrize('n', (1, 3)) def test_hetrd(self, n, real_dtype, complex_dtype): A = np.zeros((n, n), dtype=complex_dtype) hetrd, hetrd_lwork = \ get_lapack_funcs(('hetrd', 'hetrd_lwork'), (A,)) # some upper triangular array A[np.triu_indices_from(A)] = ( np.arange(1, n*(n+1)//2+1, dtype=real_dtype) + 1j * np.arange(1, n*(n+1)//2+1, dtype=real_dtype) ) np.fill_diagonal(A, np.real(np.diag(A))) # test query lwork for x in [0, 1]: _, info = hetrd_lwork(n, lower=x) assert_equal(info, 0) # lwork returns complex which segfaults hetrd call (gh-10388) # use the safe and recommended option lwork = _compute_lwork(hetrd_lwork, n) # check lower=1 behavior (shouldn't do much since the matrix is # upper triangular) data, d, e, tau, info = hetrd(A, lower=1, lwork=lwork) assert_equal(info, 0) assert_allclose(data, A, atol=5*np.finfo(real_dtype).eps, rtol=1.0) assert_allclose(d, np.real(np.diag(A))) assert_allclose(e, 0.0) assert_allclose(tau, 0.0) # and now for the proper test (lower=0 is the default) data, d, e, tau, info = hetrd(A, lwork=lwork) assert_equal(info, 0) # assert Q^T*A*Q = tridiag(e, d, e) # build tridiagonal matrix T = np.zeros_like(A, dtype=real_dtype) k = np.arange(A.shape[0], dtype=int) T[k, k] = d k2 = np.arange(A.shape[0]-1, dtype=int) T[k2+1, k2] = e T[k2, k2+1] = e # build Q Q = np.eye(n, n, dtype=complex_dtype) for i in range(n-1): v = np.zeros(n, dtype=complex_dtype) v[:i] = data[:i, i+1] v[i] = 1.0 H = np.eye(n, n, dtype=complex_dtype) \ - tau[i] * np.outer(v, np.conj(v)) Q = np.dot(H, Q) # Make matrix fully Hermitian i_lower = np.tril_indices(n, -1) A[i_lower] = np.conj(A.T[i_lower]) QHAQ = np.dot(np.conj(Q.T), np.dot(A, Q)) # disable rtol here since some values in QTAQ and T are very close # to 0. assert_allclose( QHAQ, T, atol=10*np.finfo(real_dtype).eps, rtol=1.0 ) def test_gglse(): # Example data taken from NAG manual for ind, dtype in enumerate(DTYPES): # DTYPES = <s,d,c,z> gglse func, func_lwork = get_lapack_funcs(('gglse', 'gglse_lwork'), dtype=dtype) lwork = _compute_lwork(func_lwork, m=6, n=4, p=2) # For <s,d>gglse if ind < 2: a = np.array([[-0.57, -1.28, -0.39, 0.25], [-1.93, 1.08, -0.31, -2.14], [2.30, 0.24, 0.40, -0.35], [-1.93, 0.64, -0.66, 0.08], [0.15, 0.30, 0.15, -2.13], [-0.02, 1.03, -1.43, 0.50]], dtype=dtype) c = np.array([-1.50, -2.14, 1.23, -0.54, -1.68, 0.82], dtype=dtype) d = np.array([0., 0.], dtype=dtype) # For <s,d>gglse else: a = np.array([[0.96-0.81j, -0.03+0.96j, -0.91+2.06j, -0.05+0.41j], [-0.98+1.98j, -1.20+0.19j, -0.66+0.42j, -0.81+0.56j], [0.62-0.46j, 1.01+0.02j, 0.63-0.17j, -1.11+0.60j], [0.37+0.38j, 0.19-0.54j, -0.98-0.36j, 0.22-0.20j], [0.83+0.51j, 0.20+0.01j, -0.17-0.46j, 1.47+1.59j], [1.08-0.28j, 0.20-0.12j, -0.07+1.23j, 0.26+0.26j]]) c = np.array([[-2.54+0.09j], [1.65-2.26j], [-2.11-3.96j], [1.82+3.30j], [-6.41+3.77j], [2.07+0.66j]]) d = np.zeros(2, dtype=dtype) b = np.array([[1., 0., -1., 0.], [0., 1., 0., -1.]], dtype=dtype) _, _, _, result, _ = func(a, b, c, d, lwork=lwork) if ind < 2: expected = np.array([0.48904455, 0.99754786, 0.48904455, 0.99754786]) else: expected = np.array([1.08742917-1.96205783j, -0.74093902+3.72973919j, 1.08742917-1.96205759j, -0.74093896+3.72973895j]) assert_array_almost_equal(result, expected, decimal=4) def test_sycon_hecon(): seed(1234) for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES): # DTYPES + COMPLEX DTYPES = <s,d,c,z> sycon + <c,z>hecon n = 10 # For <s,d,c,z>sycon if ind < 4: func_lwork = get_lapack_funcs('sytrf_lwork', dtype=dtype) funcon, functrf = get_lapack_funcs(('sycon', 'sytrf'), dtype=dtype) A = (rand(n, n)).astype(dtype) # For <c,z>hecon else: func_lwork = get_lapack_funcs('hetrf_lwork', dtype=dtype) funcon, functrf = get_lapack_funcs(('hecon', 'hetrf'), dtype=dtype) A = (rand(n, n) + rand(n, n)*1j).astype(dtype) # Since sycon only refers to upper/lower part, conj() is safe here. A = (A + A.conj().T)/2 + 2*np.eye(n, dtype=dtype) anorm = norm(A, 1) lwork = _compute_lwork(func_lwork, n) ldu, ipiv, _ = functrf(A, lwork=lwork, lower=1) rcond, _ = funcon(a=ldu, ipiv=ipiv, anorm=anorm, lower=1) # The error is at most 1-fold assert_(abs(1/rcond - np.linalg.cond(A, p=1))*rcond < 1) def test_sygst(): seed(1234) for ind, dtype in enumerate(REAL_DTYPES): # DTYPES = <s,d> sygst n = 10 potrf, sygst, syevd, sygvd = get_lapack_funcs(('potrf', 'sygst', 'syevd', 'sygvd'), dtype=dtype) A = rand(n, n).astype(dtype) A = (A + A.T)/2 # B must be positive definite B = rand(n, n).astype(dtype) B = (B + B.T)/2 + 2 * np.eye(n, dtype=dtype) # Perform eig (sygvd) eig_gvd, _, info = sygvd(A, B) assert_(info == 0) # Convert to std problem potrf b, info = potrf(B) assert_(info == 0) a, info = sygst(A, b) assert_(info == 0) eig, _, info = syevd(a) assert_(info == 0) assert_allclose(eig, eig_gvd, rtol=1e-4) def test_hegst(): seed(1234) for ind, dtype in enumerate(COMPLEX_DTYPES): # DTYPES = <c,z> hegst n = 10 potrf, hegst, heevd, hegvd = get_lapack_funcs(('potrf', 'hegst', 'heevd', 'hegvd'), dtype=dtype) A = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype) A = (A + A.conj().T)/2 # B must be positive definite B = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype) B = (B + B.conj().T)/2 + 2 * np.eye(n, dtype=dtype) # Perform eig (hegvd) eig_gvd, _, info = hegvd(A, B) assert_(info == 0) # Convert to std problem potrf b, info = potrf(B) assert_(info == 0) a, info = hegst(A, b) assert_(info == 0) eig, _, info = heevd(a) assert_(info == 0) assert_allclose(eig, eig_gvd, rtol=1e-4) def test_tzrzf(): """ This test performs an RZ decomposition in which an m x n upper trapezoidal array M (m <= n) is factorized as M = [R 0] * Z where R is upper triangular and Z is unitary. """ seed(1234) m, n = 10, 15 for ind, dtype in enumerate(DTYPES): tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'), dtype=dtype) lwork = _compute_lwork(tzrzf_lw, m, n) if ind < 2: A = triu(rand(m, n).astype(dtype)) else: A = triu((rand(m, n) + rand(m, n)*1j).astype(dtype)) # assert wrong shape arg, f2py returns generic error assert_raises(Exception, tzrzf, A.T) rz, tau, info = tzrzf(A, lwork=lwork) # Check success assert_(info == 0) # Get Z manually for comparison R = np.hstack((rz[:, :m], np.zeros((m, n-m), dtype=dtype))) V = np.hstack((np.eye(m, dtype=dtype), rz[:, m:])) Id = np.eye(n, dtype=dtype) ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(m)] Z = reduce(np.dot, ref) assert_allclose(R.dot(Z) - A, zeros_like(A, dtype=dtype), atol=10*np.spacing(dtype(1.0).real), rtol=0.) def test_tfsm(): """ Test for solving a linear system with the coefficient matrix is a triangular array stored in Full Packed (RFP) format. """ seed(1234) for ind, dtype in enumerate(DTYPES): n = 20 if ind > 1: A = triu(rand(n, n) + rand(n, n)*1j + eye(n)).astype(dtype) trans = 'C' else: A = triu(rand(n, n) + eye(n)).astype(dtype) trans = 'T' trttf, tfttr, tfsm = get_lapack_funcs(('trttf', 'tfttr', 'tfsm'), dtype=dtype) Afp, _ = trttf(A) B = rand(n, 2).astype(dtype) soln = tfsm(-1, Afp, B) assert_array_almost_equal(soln, solve(-A, B), decimal=4 if ind % 2 == 0 else 6) soln = tfsm(-1, Afp, B, trans=trans) assert_array_almost_equal(soln, solve(-A.conj().T, B), decimal=4 if ind % 2 == 0 else 6) # Make A, unit diagonal A[np.arange(n), np.arange(n)] = dtype(1.) soln = tfsm(-1, Afp, B, trans=trans, diag='U') assert_array_almost_equal(soln, solve(-A.conj().T, B), decimal=4 if ind % 2 == 0 else 6) # Change side B2 = rand(3, n).astype(dtype) soln = tfsm(-1, Afp, B2, trans=trans, diag='U', side='R') assert_array_almost_equal(soln, solve(-A, B2.T).conj().T, decimal=4 if ind % 2 == 0 else 6) def test_ormrz_unmrz(): """ This test performs a matrix multiplication with an arbitrary m x n matric C and a unitary matrix Q without explicitly forming the array. The array data is encoded in the rectangular part of A which is obtained from ?TZRZF. Q size is inferred by m, n, side keywords. """ seed(1234) qm, qn, cn = 10, 15, 15 for ind, dtype in enumerate(DTYPES): tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'), dtype=dtype) lwork_rz = _compute_lwork(tzrzf_lw, qm, qn) if ind < 2: A = triu(rand(qm, qn).astype(dtype)) C = rand(cn, cn).astype(dtype) orun_mrz, orun_mrz_lw = get_lapack_funcs(('ormrz', 'ormrz_lwork'), dtype=dtype) else: A = triu((rand(qm, qn) + rand(qm, qn)*1j).astype(dtype)) C = (rand(cn, cn) + rand(cn, cn)*1j).astype(dtype) orun_mrz, orun_mrz_lw = get_lapack_funcs(('unmrz', 'unmrz_lwork'), dtype=dtype) lwork_mrz = _compute_lwork(orun_mrz_lw, cn, cn) rz, tau, info = tzrzf(A, lwork=lwork_rz) # Get Q manually for comparison V = np.hstack((np.eye(qm, dtype=dtype), rz[:, qm:])) Id = np.eye(qn, dtype=dtype) ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(qm)] Q = reduce(np.dot, ref) # Now that we have Q, we can test whether lapack results agree with # each case of CQ, CQ^H, QC, and QC^H trans = 'T' if ind < 2 else 'C' tol = 10*np.spacing(dtype(1.0).real) cq, info = orun_mrz(rz, tau, C, lwork=lwork_mrz) assert_(info == 0) assert_allclose(cq - Q.dot(C), zeros_like(C), atol=tol, rtol=0.) cq, info = orun_mrz(rz, tau, C, trans=trans, lwork=lwork_mrz) assert_(info == 0) assert_allclose(cq - Q.conj().T.dot(C), zeros_like(C), atol=tol, rtol=0.) cq, info = orun_mrz(rz, tau, C, side='R', lwork=lwork_mrz) assert_(info == 0) assert_allclose(cq - C.dot(Q), zeros_like(C), atol=tol, rtol=0.) cq, info = orun_mrz(rz, tau, C, side='R', trans=trans, lwork=lwork_mrz) assert_(info == 0) assert_allclose(cq - C.dot(Q.conj().T), zeros_like(C), atol=tol, rtol=0.) def test_tfttr_trttf(): """ Test conversion routines between the Rectengular Full Packed (RFP) format and Standard Triangular Array (TR) """ seed(1234) for ind, dtype in enumerate(DTYPES): n = 20 if ind > 1: A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype) transr = 'C' else: A_full = (rand(n, n)).astype(dtype) transr = 'T' trttf, tfttr = get_lapack_funcs(('trttf', 'tfttr'), dtype=dtype) A_tf_U, info = trttf(A_full) assert_(info == 0) A_tf_L, info = trttf(A_full, uplo='L') assert_(info == 0) A_tf_U_T, info = trttf(A_full, transr=transr, uplo='U') assert_(info == 0) A_tf_L_T, info = trttf(A_full, transr=transr, uplo='L') assert_(info == 0) # Create the RFP array manually (n is even!) A_tf_U_m = zeros((n+1, n//2), dtype=dtype) A_tf_U_m[:-1, :] = triu(A_full)[:, n//2:] A_tf_U_m[n//2+1:, :] += triu(A_full)[:n//2, :n//2].conj().T A_tf_L_m = zeros((n+1, n//2), dtype=dtype) A_tf_L_m[1:, :] = tril(A_full)[:, :n//2] A_tf_L_m[:n//2, :] += tril(A_full)[n//2:, n//2:].conj().T assert_array_almost_equal(A_tf_U, A_tf_U_m.reshape(-1, order='F')) assert_array_almost_equal(A_tf_U_T, A_tf_U_m.conj().T.reshape(-1, order='F')) assert_array_almost_equal(A_tf_L, A_tf_L_m.reshape(-1, order='F')) assert_array_almost_equal(A_tf_L_T, A_tf_L_m.conj().T.reshape(-1, order='F')) # Get the original array from RFP A_tr_U, info = tfttr(n, A_tf_U) assert_(info == 0) A_tr_L, info = tfttr(n, A_tf_L, uplo='L') assert_(info == 0) A_tr_U_T, info = tfttr(n, A_tf_U_T, transr=transr, uplo='U') assert_(info == 0) A_tr_L_T, info = tfttr(n, A_tf_L_T, transr=transr, uplo='L') assert_(info == 0) assert_array_almost_equal(A_tr_U, triu(A_full)) assert_array_almost_equal(A_tr_U_T, triu(A_full)) assert_array_almost_equal(A_tr_L, tril(A_full)) assert_array_almost_equal(A_tr_L_T, tril(A_full)) def test_tpttr_trttp(): """ Test conversion routines between the Rectengular Full Packed (RFP) format and Standard Triangular Array (TR) """ seed(1234) for ind, dtype in enumerate(DTYPES): n = 20 if ind > 1: A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype) else: A_full = (rand(n, n)).astype(dtype) trttp, tpttr = get_lapack_funcs(('trttp', 'tpttr'), dtype=dtype) A_tp_U, info = trttp(A_full) assert_(info == 0) A_tp_L, info = trttp(A_full, uplo='L') assert_(info == 0) # Create the TP array manually inds = tril_indices(n) A_tp_U_m = zeros(n*(n+1)//2, dtype=dtype) A_tp_U_m[:] = (triu(A_full).T)[inds] inds = triu_indices(n) A_tp_L_m = zeros(n*(n+1)//2, dtype=dtype) A_tp_L_m[:] = (tril(A_full).T)[inds] assert_array_almost_equal(A_tp_U, A_tp_U_m) assert_array_almost_equal(A_tp_L, A_tp_L_m) # Get the original array from TP A_tr_U, info = tpttr(n, A_tp_U) assert_(info == 0) A_tr_L, info = tpttr(n, A_tp_L, uplo='L') assert_(info == 0) assert_array_almost_equal(A_tr_U, triu(A_full)) assert_array_almost_equal(A_tr_L, tril(A_full)) def test_pftrf(): """ Test Cholesky factorization of a positive definite Rectengular Full Packed (RFP) format array """ seed(1234) for ind, dtype in enumerate(DTYPES): n = 20 if ind > 1: A = (rand(n, n) + rand(n, n)*1j).astype(dtype) A = A + A.conj().T + n*eye(n) else: A = (rand(n, n)).astype(dtype) A = A + A.T + n*eye(n) pftrf, trttf, tfttr = get_lapack_funcs(('pftrf', 'trttf', 'tfttr'), dtype=dtype) # Get the original array from TP Afp, info = trttf(A) Achol_rfp, info = pftrf(n, Afp) assert_(info == 0) A_chol_r, _ = tfttr(n, Achol_rfp) Achol = cholesky(A) assert_array_almost_equal(A_chol_r, Achol) def test_pftri(): """ Test Cholesky factorization of a positive definite Rectengular Full Packed (RFP) format array to find its inverse """ seed(1234) for ind, dtype in enumerate(DTYPES): n = 20 if ind > 1: A = (rand(n, n) + rand(n, n)*1j).astype(dtype) A = A + A.conj().T + n*eye(n) else: A = (rand(n, n)).astype(dtype) A = A + A.T + n*eye(n) pftri, pftrf, trttf, tfttr = get_lapack_funcs(('pftri', 'pftrf', 'trttf', 'tfttr'), dtype=dtype) # Get the original array from TP Afp, info = trttf(A) A_chol_rfp, info = pftrf(n, Afp) A_inv_rfp, info = pftri(n, A_chol_rfp) assert_(info == 0) A_inv_r, _ = tfttr(n, A_inv_rfp) Ainv = inv(A) assert_array_almost_equal(A_inv_r, triu(Ainv), decimal=4 if ind % 2 == 0 else 6) def test_pftrs(): """ Test Cholesky factorization of a positive definite Rectengular Full Packed (RFP) format array and solve a linear system """ seed(1234) for ind, dtype in enumerate(DTYPES): n = 20 if ind > 1: A = (rand(n, n) + rand(n, n)*1j).astype(dtype) A = A + A.conj().T + n*eye(n) else: A = (rand(n, n)).astype(dtype) A = A + A.T + n*eye(n) B = ones((n, 3), dtype=dtype) Bf1 = ones((n+2, 3), dtype=dtype) Bf2 = ones((n-2, 3), dtype=dtype) pftrs, pftrf, trttf, tfttr = get_lapack_funcs(('pftrs', 'pftrf', 'trttf', 'tfttr'), dtype=dtype) # Get the original array from TP Afp, info = trttf(A) A_chol_rfp, info = pftrf(n, Afp) # larger B arrays shouldn't segfault soln, info = pftrs(n, A_chol_rfp, Bf1) assert_(info == 0) assert_raises(Exception, pftrs, n, A_chol_rfp, Bf2) soln, info = pftrs(n, A_chol_rfp, B) assert_(info == 0) assert_array_almost_equal(solve(A, B), soln, decimal=4 if ind % 2 == 0 else 6) def test_sfrk_hfrk(): """ Test for performing a symmetric rank-k operation for matrix in RFP format. """ seed(1234) for ind, dtype in enumerate(DTYPES): n = 20 if ind > 1: A = (rand(n, n) + rand(n, n)*1j).astype(dtype) A = A + A.conj().T + n*eye(n) else: A = (rand(n, n)).astype(dtype) A = A + A.T + n*eye(n) prefix = 's'if ind < 2 else 'h' trttf, tfttr, shfrk = get_lapack_funcs(('trttf', 'tfttr', '{}frk' ''.format(prefix)), dtype=dtype) Afp, _ = trttf(A) C = np.random.rand(n, 2).astype(dtype) Afp_out = shfrk(n, 2, -1, C, 2, Afp) A_out, _ = tfttr(n, Afp_out) assert_array_almost_equal(A_out, triu(-C.dot(C.conj().T) + 2*A), decimal=4 if ind % 2 == 0 else 6) def test_syconv(): """ Test for going back and forth between the returned format of he/sytrf to L and D factors/permutations. """ seed(1234) for ind, dtype in enumerate(DTYPES): n = 10 if ind > 1: A = (randint(-30, 30, (n, n)) + randint(-30, 30, (n, n))*1j).astype(dtype) A = A + A.conj().T else: A = randint(-30, 30, (n, n)).astype(dtype) A = A + A.T + n*eye(n) tol = 100*np.spacing(dtype(1.0).real) syconv, trf, trf_lwork = get_lapack_funcs(('syconv', 'sytrf', 'sytrf_lwork'), dtype=dtype) lw = _compute_lwork(trf_lwork, n, lower=1) L, D, perm = ldl(A, lower=1, hermitian=False) lw = _compute_lwork(trf_lwork, n, lower=1) ldu, ipiv, info = trf(A, lower=1, lwork=lw) a, e, info = syconv(ldu, ipiv, lower=1) assert_allclose(tril(a, -1,), tril(L[perm, :], -1), atol=tol, rtol=0.) # Test also upper U, D, perm = ldl(A, lower=0, hermitian=False) ldu, ipiv, info = trf(A, lower=0) a, e, info = syconv(ldu, ipiv, lower=0) assert_allclose(triu(a, 1), triu(U[perm, :], 1), atol=tol, rtol=0.) class TestBlockedQR: """ Tests for the blocked QR factorization, namely through geqrt, gemqrt, tpqrt and tpmqr. """ def test_geqrt_gemqrt(self): seed(1234) for ind, dtype in enumerate(DTYPES): n = 20 if ind > 1: A = (rand(n, n) + rand(n, n)*1j).astype(dtype) else: A = (rand(n, n)).astype(dtype) tol = 100*np.spacing(dtype(1.0).real) geqrt, gemqrt = get_lapack_funcs(('geqrt', 'gemqrt'), dtype=dtype) a, t, info = geqrt(n, A) assert(info == 0) # Extract elementary reflectors from lower triangle, adding the # main diagonal of ones. v = np.tril(a, -1) + np.eye(n, dtype=dtype) # Generate the block Householder transform I - VTV^H Q = np.eye(n, dtype=dtype) - v @ t @ v.T.conj() R = np.triu(a) # Test columns of Q are orthogonal assert_allclose(Q.T.conj() @ Q, np.eye(n, dtype=dtype), atol=tol, rtol=0.) assert_allclose(Q @ R, A, atol=tol, rtol=0.) if ind > 1: C = (rand(n, n) + rand(n, n)*1j).astype(dtype) transpose = 'C' else: C = (rand(n, n)).astype(dtype) transpose = 'T' for side in ('L', 'R'): for trans in ('N', transpose): c, info = gemqrt(a, t, C, side=side, trans=trans) assert(info == 0) if trans == transpose: q = Q.T.conj() else: q = Q if side == 'L': qC = q @ C else: qC = C @ q assert_allclose(c, qC, atol=tol, rtol=0.) # Test default arguments if (side, trans) == ('L', 'N'): c_default, info = gemqrt(a, t, C) assert(info == 0) assert_equal(c_default, c) # Test invalid side/trans assert_raises(Exception, gemqrt, a, t, C, side='A') assert_raises(Exception, gemqrt, a, t, C, trans='A') def test_tpqrt_tpmqrt(self): seed(1234) for ind, dtype in enumerate(DTYPES): n = 20 if ind > 1: A = (rand(n, n) + rand(n, n)*1j).astype(dtype) B = (rand(n, n) + rand(n, n)*1j).astype(dtype) else: A = (rand(n, n)).astype(dtype) B = (rand(n, n)).astype(dtype) tol = 100*np.spacing(dtype(1.0).real) tpqrt, tpmqrt = get_lapack_funcs(('tpqrt', 'tpmqrt'), dtype=dtype) # Test for the range of pentagonal B, from square to upper # triangular for l in (0, n // 2, n): a, b, t, info = tpqrt(l, n, A, B) assert(info == 0) # Check that lower triangular part of A has not been modified assert_equal(np.tril(a, -1), np.tril(A, -1)) # Check that elements not part of the pentagonal portion of B # have not been modified. assert_equal(np.tril(b, l - n - 1), np.tril(B, l - n - 1)) # Extract pentagonal portion of B B_pent, b_pent = np.triu(B, l - n), np.triu(b, l - n) # Generate elementary reflectors v = np.concatenate((np.eye(n, dtype=dtype), b_pent)) # Generate the block Householder transform I - VTV^H Q = np.eye(2 * n, dtype=dtype) - v @ t @ v.T.conj() R = np.concatenate((np.triu(a), np.zeros_like(a))) # Test columns of Q are orthogonal assert_allclose(Q.T.conj() @ Q, np.eye(2 * n, dtype=dtype), atol=tol, rtol=0.) assert_allclose(Q @ R, np.concatenate((np.triu(A), B_pent)), atol=tol, rtol=0.) if ind > 1: C = (rand(n, n) + rand(n, n)*1j).astype(dtype) D = (rand(n, n) + rand(n, n)*1j).astype(dtype) transpose = 'C' else: C = (rand(n, n)).astype(dtype) D = (rand(n, n)).astype(dtype) transpose = 'T' for side in ('L', 'R'): for trans in ('N', transpose): c, d, info = tpmqrt(l, b, t, C, D, side=side, trans=trans) assert(info == 0) if trans == transpose: q = Q.T.conj() else: q = Q if side == 'L': cd = np.concatenate((c, d), axis=0) CD = np.concatenate((C, D), axis=0) qCD = q @ CD else: cd = np.concatenate((c, d), axis=1) CD = np.concatenate((C, D), axis=1) qCD = CD @ q assert_allclose(cd, qCD, atol=tol, rtol=0.) if (side, trans) == ('L', 'N'): c_default, d_default, info = tpmqrt(l, b, t, C, D) assert(info == 0) assert_equal(c_default, c) assert_equal(d_default, d) # Test invalid side/trans assert_raises(Exception, tpmqrt, l, b, t, C, D, side='A') assert_raises(Exception, tpmqrt, l, b, t, C, D, trans='A') def test_pstrf(): seed(1234) for ind, dtype in enumerate(DTYPES): # DTYPES = <s, d, c, z> pstrf n = 10 r = 2 pstrf = get_lapack_funcs('pstrf', dtype=dtype) # Create positive semidefinite A if ind > 1: A = rand(n, n-r).astype(dtype) + 1j * rand(n, n-r).astype(dtype) A = A @ A.conj().T else: A = rand(n, n-r).astype(dtype) A = A @ A.T c, piv, r_c, info = pstrf(A) U = triu(c) U[r_c - n:, r_c - n:] = 0. assert_equal(info, 1) # python-dbg 3.5.2 runs cause trouble with the following assertion. # assert_equal(r_c, n - r) single_atol = 1000 * np.finfo(np.float32).eps double_atol = 1000 * np.finfo(np.float64).eps atol = single_atol if ind in [0, 2] else double_atol assert_allclose(A[piv-1][:, piv-1], U.conj().T @ U, rtol=0., atol=atol) c, piv, r_c, info = pstrf(A, lower=1) L = tril(c) L[r_c - n:, r_c - n:] = 0. assert_equal(info, 1) # assert_equal(r_c, n - r) single_atol = 1000 * np.finfo(np.float32).eps double_atol = 1000 * np.finfo(np.float64).eps atol = single_atol if ind in [0, 2] else double_atol assert_allclose(A[piv-1][:, piv-1], L @ L.conj().T, rtol=0., atol=atol) def test_pstf2(): seed(1234) for ind, dtype in enumerate(DTYPES): # DTYPES = <s, d, c, z> pstf2 n = 10 r = 2 pstf2 = get_lapack_funcs('pstf2', dtype=dtype) # Create positive semidefinite A if ind > 1: A = rand(n, n-r).astype(dtype) + 1j * rand(n, n-r).astype(dtype) A = A @ A.conj().T else: A = rand(n, n-r).astype(dtype) A = A @ A.T c, piv, r_c, info = pstf2(A) U = triu(c) U[r_c - n:, r_c - n:] = 0. assert_equal(info, 1) # python-dbg 3.5.2 runs cause trouble with the commented assertions. # assert_equal(r_c, n - r) single_atol = 1000 * np.finfo(np.float32).eps double_atol = 1000 * np.finfo(np.float64).eps atol = single_atol if ind in [0, 2] else double_atol assert_allclose(A[piv-1][:, piv-1], U.conj().T @ U, rtol=0., atol=atol) c, piv, r_c, info = pstf2(A, lower=1) L = tril(c) L[r_c - n:, r_c - n:] = 0. assert_equal(info, 1) # assert_equal(r_c, n - r) single_atol = 1000 * np.finfo(np.float32).eps double_atol = 1000 * np.finfo(np.float64).eps atol = single_atol if ind in [0, 2] else double_atol assert_allclose(A[piv-1][:, piv-1], L @ L.conj().T, rtol=0., atol=atol) def test_geequ(): desired_real = np.array([[0.6250, 1.0000, 0.0393, -0.4269], [1.0000, -0.5619, -1.0000, -1.0000], [0.5874, -1.0000, -0.0596, -0.5341], [-1.0000, -0.5946, -0.0294, 0.9957]]) desired_cplx = np.array([[-0.2816+0.5359*1j, 0.0812+0.9188*1j, -0.7439-0.2561*1j], [-0.3562-0.2954*1j, 0.9566-0.0434*1j, -0.0174+0.1555*1j], [0.8607+0.1393*1j, -0.2759+0.7241*1j, -0.1642-0.1365*1j]]) for ind, dtype in enumerate(DTYPES): if ind < 2: # Use examples from the NAG documentation A = np.array([[1.80e+10, 2.88e+10, 2.05e+00, -8.90e+09], [5.25e+00, -2.95e+00, -9.50e-09, -3.80e+00], [1.58e+00, -2.69e+00, -2.90e-10, -1.04e+00], [-1.11e+00, -6.60e-01, -5.90e-11, 8.00e-01]]) A = A.astype(dtype) else: A = np.array([[-1.34e+00, 0.28e+10, -6.39e+00], [-1.70e+00, 3.31e+10, -0.15e+00], [2.41e-10, -0.56e+00, -0.83e-10]], dtype=dtype) A += np.array([[2.55e+00, 3.17e+10, -2.20e+00], [-1.41e+00, -0.15e+10, 1.34e+00], [0.39e-10, 1.47e+00, -0.69e-10]])*1j A = A.astype(dtype) geequ = get_lapack_funcs('geequ', dtype=dtype) r, c, rowcnd, colcnd, amax, info = geequ(A) if ind < 2: assert_allclose(desired_real.astype(dtype), r[:, None]*A*c, rtol=0, atol=1e-4) else: assert_allclose(desired_cplx.astype(dtype), r[:, None]*A*c, rtol=0, atol=1e-4) def test_syequb(): desired_log2s = np.array([0, 0, 0, 0, 0, 0, -1, -1, -2, -3]) for ind, dtype in enumerate(DTYPES): A = np.eye(10, dtype=dtype) alpha = dtype(1. if ind < 2 else 1.j) d = np.array([alpha * 2.**x for x in range(-5, 5)], dtype=dtype) A += np.rot90(np.diag(d)) syequb = get_lapack_funcs('syequb', dtype=dtype) s, scond, amax, info = syequb(A) assert_equal(np.log2(s).astype(int), desired_log2s) @pytest.mark.skipif(True, reason="Failing on some OpenBLAS version, see gh-12276") def test_heequb(): # zheequb has a bug for versions =< LAPACK 3.9.0 # See Reference-LAPACK gh-61 and gh-408 # Hence the zheequb test is customized accordingly to avoid # work scaling. A = np.diag([2]*5 + [1002]*5) + np.diag(np.ones(9), k=1)*1j s, scond, amax, info = lapack.zheequb(A) assert_equal(info, 0) assert_allclose(np.log2(s), [0., -1.]*2 + [0.] + [-4]*5) A = np.diag(2**np.abs(np.arange(-5, 6)) + 0j) A[5, 5] = 1024 A[5, 0] = 16j s, scond, amax, info = lapack.cheequb(A.astype(np.complex64), lower=1) assert_equal(info, 0) assert_allclose(np.log2(s), [-2, -1, -1, 0, 0, -5, 0, -1, -1, -2, -2]) def test_getc2_gesc2(): np.random.seed(42) n = 10 desired_real = np.random.rand(n) desired_cplx = np.random.rand(n) + np.random.rand(n)*1j for ind, dtype in enumerate(DTYPES): if ind < 2: A = np.random.rand(n, n) A = A.astype(dtype) b = A @ desired_real b = b.astype(dtype) else: A = np.random.rand(n, n) + np.random.rand(n, n)*1j A = A.astype(dtype) b = A @ desired_cplx b = b.astype(dtype) getc2 = get_lapack_funcs('getc2', dtype=dtype) gesc2 = get_lapack_funcs('gesc2', dtype=dtype) lu, ipiv, jpiv, info = getc2(A, overwrite_a=0) x, scale = gesc2(lu, b, ipiv, jpiv, overwrite_rhs=0) if ind < 2: assert_array_almost_equal(desired_real.astype(dtype), x/scale, decimal=4) else: assert_array_almost_equal(desired_cplx.astype(dtype), x/scale, decimal=4) @pytest.mark.parametrize('size', [(6, 5), (5, 5)]) @pytest.mark.parametrize('dtype', REAL_DTYPES) @pytest.mark.parametrize('joba', range(6)) # 'C', 'E', 'F', 'G', 'A', 'R' @pytest.mark.parametrize('jobu', range(4)) # 'U', 'F', 'W', 'N' @pytest.mark.parametrize('jobv', range(4)) # 'V', 'J', 'W', 'N' @pytest.mark.parametrize('jobr', [0, 1]) @pytest.mark.parametrize('jobp', [0, 1]) def test_gejsv_general(size, dtype, joba, jobu, jobv, jobr, jobp, jobt=0): """Test the lapack routine ?gejsv. This function tests that a singular value decomposition can be performed on the random M-by-N matrix A. The test performs the SVD using ?gejsv then performs the following checks: * ?gejsv exist successfully (info == 0) * The returned singular values are correct * `A` can be reconstructed from `u`, `SIGMA`, `v` * Ensure that u.T @ u is the identity matrix * Ensure that v.T @ v is the identity matrix * The reported matrix rank * The reported number of singular values * If denormalized floats are required Notes ----- joba specifies several choices effecting the calculation's accuracy Although all arguments are tested, the tests only check that the correct solution is returned - NOT that the prescribed actions are performed internally. jobt is, as of v3.9.0, still experimental and removed to cut down number of test cases. However keyword itself is tested externally. """ seed(42) # Define some constants for later use: m, n = size atol = 100 * np.finfo(dtype).eps A = generate_random_dtype_array(size, dtype) gejsv = get_lapack_funcs('gejsv', dtype=dtype) # Set up checks for invalid job? combinations # if an invalid combination occurs we set the appropriate # exit status. lsvec = jobu < 2 # Calculate left singular vectors rsvec = jobv < 2 # Calculate right singular vectors l2tran = (jobt == 1) and (m == n) is_complex = np.iscomplexobj(A) invalid_real_jobv = (jobv == 1) and (not lsvec) and (not is_complex) invalid_cplx_jobu = (jobu == 2) and not (rsvec and l2tran) and is_complex invalid_cplx_jobv = (jobv == 2) and not (lsvec and l2tran) and is_complex # Set the exit status to the expected value. # Here we only check for invalid combinations, not individual # parameters. if invalid_cplx_jobu: exit_status = -2 elif invalid_real_jobv or invalid_cplx_jobv: exit_status = -3 else: exit_status = 0 if (jobu > 1) and (jobv == 1): assert_raises(Exception, gejsv, A, joba, jobu, jobv, jobr, jobt, jobp) else: sva, u, v, work, iwork, info = gejsv(A, joba=joba, jobu=jobu, jobv=jobv, jobr=jobr, jobt=jobt, jobp=jobp) # Check that ?gejsv exited successfully/as expected assert_equal(info, exit_status) # If exit_status is non-zero the combination of jobs is invalid. # We test this above but no calculations are performed. if not exit_status: # Check the returned singular values sigma = (work[0] / work[1]) * sva[:n] assert_allclose(sigma, svd(A, compute_uv=False), atol=atol) if jobu == 1: # If JOBU = 'F', then u contains the M-by-M matrix of # the left singular vectors, including an ONB of the orthogonal # complement of the Range(A) # However, to recalculate A we are concerned about the # first n singular values and so can ignore the latter. # TODO: Add a test for ONB? u = u[:, :n] if lsvec and rsvec: assert_allclose(u @ np.diag(sigma) @ v.conj().T, A, atol=atol) if lsvec: assert_allclose(u.conj().T @ u, np.identity(n), atol=atol) if rsvec: assert_allclose(v.conj().T @ v, np.identity(n), atol=atol) assert_equal(iwork[0], np.linalg.matrix_rank(A)) assert_equal(iwork[1], np.count_nonzero(sigma)) # iwork[2] is non-zero if requested accuracy is not warranted for # the data. This should never occur for these tests. assert_equal(iwork[2], 0) @pytest.mark.parametrize('dtype', REAL_DTYPES) def test_gejsv_edge_arguments(dtype): """Test edge arguments return expected status""" gejsv = get_lapack_funcs('gejsv', dtype=dtype) # scalar A sva, u, v, work, iwork, info = gejsv(1.) assert_equal(info, 0) assert_equal(u.shape, (1, 1)) assert_equal(v.shape, (1, 1)) assert_equal(sva, np.array([1.], dtype=dtype)) # 1d A A = np.ones((1,), dtype=dtype) sva, u, v, work, iwork, info = gejsv(A) assert_equal(info, 0) assert_equal(u.shape, (1, 1)) assert_equal(v.shape, (1, 1)) assert_equal(sva, np.array([1.], dtype=dtype)) # 2d empty A A = np.ones((1, 0), dtype=dtype) sva, u, v, work, iwork, info = gejsv(A) assert_equal(info, 0) assert_equal(u.shape, (1, 0)) assert_equal(v.shape, (1, 0)) assert_equal(sva, np.array([], dtype=dtype)) # make sure "overwrite_a" is respected - user reported in gh-13191 A = np.sin(np.arange(100).reshape(10, 10)).astype(dtype) A = np.asfortranarray(A + A.T) # make it symmetric and column major Ac = A.copy('A') _ = gejsv(A) assert_allclose(A, Ac) @pytest.mark.parametrize(('kwargs'), ({'joba': 9}, {'jobu': 9}, {'jobv': 9}, {'jobr': 9}, {'jobt': 9}, {'jobp': 9}) ) def test_gejsv_invalid_job_arguments(kwargs): """Test invalid job arguments raise an Exception""" A = np.ones((2, 2), dtype=float) gejsv = get_lapack_funcs('gejsv', dtype=float) assert_raises(Exception, gejsv, A, **kwargs) @pytest.mark.parametrize("A,sva_expect,u_expect,v_expect", [(np.array([[2.27, -1.54, 1.15, -1.94], [0.28, -1.67, 0.94, -0.78], [-0.48, -3.09, 0.99, -0.21], [1.07, 1.22, 0.79, 0.63], [-2.35, 2.93, -1.45, 2.30], [0.62, -7.39, 1.03, -2.57]]), np.array([9.9966, 3.6831, 1.3569, 0.5000]), np.array([[0.2774, -0.6003, -0.1277, 0.1323], [0.2020, -0.0301, 0.2805, 0.7034], [0.2918, 0.3348, 0.6453, 0.1906], [-0.0938, -0.3699, 0.6781, -0.5399], [-0.4213, 0.5266, 0.0413, -0.0575], [0.7816, 0.3353, -0.1645, -0.3957]]), np.array([[0.1921, -0.8030, 0.0041, -0.5642], [-0.8794, -0.3926, -0.0752, 0.2587], [0.2140, -0.2980, 0.7827, 0.5027], [-0.3795, 0.3351, 0.6178, -0.6017]]))]) def test_gejsv_NAG(A, sva_expect, u_expect, v_expect): """ This test implements the example found in the NAG manual, f08khf. An example was not found for the complex case. """ # NAG manual provides accuracy up to 4 decimals atol = 1e-4 gejsv = get_lapack_funcs('gejsv', dtype=A.dtype) sva, u, v, work, iwork, info = gejsv(A) assert_allclose(sva_expect, sva, atol=atol) assert_allclose(u_expect, u, atol=atol) assert_allclose(v_expect, v, atol=atol) @pytest.mark.parametrize("dtype", DTYPES) def test_gttrf_gttrs(dtype): # The test uses ?gttrf and ?gttrs to solve a random system for each dtype, # tests that the output of ?gttrf define LU matricies, that input # parameters are unmodified, transposal options function correctly, that # incompatible matrix shapes raise an error, and singular matrices return # non zero info. seed(42) n = 10 atol = 100 * np.finfo(dtype).eps # create the matrix in accordance with the data type du = generate_random_dtype_array((n-1,), dtype=dtype) d = generate_random_dtype_array((n,), dtype=dtype) dl = generate_random_dtype_array((n-1,), dtype=dtype) diag_cpy = [dl.copy(), d.copy(), du.copy()] A = np.diag(d) + np.diag(dl, -1) + np.diag(du, 1) x = np.random.rand(n) b = A @ x gttrf, gttrs = get_lapack_funcs(('gttrf', 'gttrs'), dtype=dtype) _dl, _d, _du, du2, ipiv, info = gttrf(dl, d, du) # test to assure that the inputs of ?gttrf are unmodified assert_array_equal(dl, diag_cpy[0]) assert_array_equal(d, diag_cpy[1]) assert_array_equal(du, diag_cpy[2]) # generate L and U factors from ?gttrf return values # L/U are lower/upper triangular by construction (initially and at end) U = np.diag(_d, 0) + np.diag(_du, 1) + np.diag(du2, 2) L = np.eye(n, dtype=dtype) for i, m in enumerate(_dl): # L is given in a factored form. # See # www.hpcavf.uclan.ac.uk/softwaredoc/sgi_scsl_html/sgi_html/ch03.html piv = ipiv[i] - 1 # right multiply by permutation matrix L[:, [i, piv]] = L[:, [piv, i]] # right multiply by Li, rank-one modification of identity L[:, i] += L[:, i+1]*m # one last permutation i, piv = -1, ipiv[-1] - 1 # right multiply by final permutation matrix L[:, [i, piv]] = L[:, [piv, i]] # check that the outputs of ?gttrf define an LU decomposition of A assert_allclose(A, L @ U, atol=atol) b_cpy = b.copy() x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b) # test that the inputs of ?gttrs are unmodified assert_array_equal(b, b_cpy) # test that the result of ?gttrs matches the expected input assert_allclose(x, x_gttrs, atol=atol) # test that ?gttrf and ?gttrs work with transposal options if dtype in REAL_DTYPES: trans = "T" b_trans = A.T @ x else: trans = "C" b_trans = A.conj().T @ x x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b_trans, trans=trans) assert_allclose(x, x_gttrs, atol=atol) # test that ValueError is raised with incompatible matrix shapes with assert_raises(ValueError): gttrf(dl[:-1], d, du) with assert_raises(ValueError): gttrf(dl, d[:-1], du) with assert_raises(ValueError): gttrf(dl, d, du[:-1]) # test that matrix of size n=2 raises exception with assert_raises(Exception): gttrf(dl[0], d[:1], du[0]) # test that singular (row of all zeroes) matrix fails via info du[0] = 0 d[0] = 0 __dl, __d, __du, _du2, _ipiv, _info = gttrf(dl, d, du) np.testing.assert_(__d[info - 1] == 0, "?gttrf: _d[info-1] is {}, not the illegal value :0." .format(__d[info - 1])) @pytest.mark.parametrize("du, d, dl, du_exp, d_exp, du2_exp, ipiv_exp, b, x", [(np.array([2.1, -1.0, 1.9, 8.0]), np.array([3.0, 2.3, -5.0, -.9, 7.1]), np.array([3.4, 3.6, 7.0, -6.0]), np.array([2.3, -5, -.9, 7.1]), np.array([3.4, 3.6, 7, -6, -1.015373]), np.array([-1, 1.9, 8]), np.array([2, 3, 4, 5, 5]), np.array([[2.7, 6.6], [-0.5, 10.8], [2.6, -3.2], [0.6, -11.2], [2.7, 19.1] ]), np.array([[-4, 5], [7, -4], [3, -3], [-4, -2], [-3, 1]])), ( np.array([2 - 1j, 2 + 1j, -1 + 1j, 1 - 1j]), np.array([-1.3 + 1.3j, -1.3 + 1.3j, -1.3 + 3.3j, - .3 + 4.3j, -3.3 + 1.3j]), np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j]), # du exp np.array([-1.3 + 1.3j, -1.3 + 3.3j, -0.3 + 4.3j, -3.3 + 1.3j]), np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j, -1.3399 + 0.2875j]), np.array([2 + 1j, -1 + 1j, 1 - 1j]), np.array([2, 3, 4, 5, 5]), np.array([[2.4 - 5j, 2.7 + 6.9j], [3.4 + 18.2j, - 6.9 - 5.3j], [-14.7 + 9.7j, - 6 - .6j], [31.9 - 7.7j, -3.9 + 9.3j], [-1 + 1.6j, -3 + 12.2j]]), np.array([[1 + 1j, 2 - 1j], [3 - 1j, 1 + 2j], [4 + 5j, -1 + 1j], [-1 - 2j, 2 + 1j], [1 - 1j, 2 - 2j]]) )]) def test_gttrf_gttrs_NAG_f07cdf_f07cef_f07crf_f07csf(du, d, dl, du_exp, d_exp, du2_exp, ipiv_exp, b, x): # test to assure that wrapper is consistent with NAG Library Manual Mark 26 # example problems: f07cdf and f07cef (real) # examples: f07crf and f07csf (complex) # (Links may expire, so search for "NAG Library Manual Mark 26" online) gttrf, gttrs = get_lapack_funcs(('gttrf', "gttrs"), (du[0], du[0])) _dl, _d, _du, du2, ipiv, info = gttrf(dl, d, du) assert_allclose(du2, du2_exp) assert_allclose(_du, du_exp) assert_allclose(_d, d_exp, atol=1e-4) # NAG examples provide 4 decimals. assert_allclose(ipiv, ipiv_exp) x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b) assert_allclose(x_gttrs, x) @pytest.mark.parametrize('dtype', DTYPES) @pytest.mark.parametrize('shape', [(3, 7), (7, 3), (2**18, 2**18)]) def test_geqrfp_lwork(dtype, shape): geqrfp_lwork = get_lapack_funcs(('geqrfp_lwork'), dtype=dtype) m, n = shape lwork, info = geqrfp_lwork(m=m, n=n) assert_equal(info, 0) @pytest.mark.parametrize("ddtype,dtype", zip(REAL_DTYPES + REAL_DTYPES, DTYPES)) def test_pttrf_pttrs(ddtype, dtype): seed(42) # set test tolerance appropriate for dtype atol = 100*np.finfo(dtype).eps # n is the length diagonal of A n = 10 # create diagonals according to size and dtype # diagonal d should always be real. # add 4 to d so it will be dominant for all dtypes d = generate_random_dtype_array((n,), ddtype) + 4 # diagonal e may be real or complex. e = generate_random_dtype_array((n-1,), dtype) # assemble diagonals together into matrix A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1) # store a copy of diagonals to later verify diag_cpy = [d.copy(), e.copy()] pttrf = get_lapack_funcs('pttrf', dtype=dtype) _d, _e, info = pttrf(d, e) # test to assure that the inputs of ?pttrf are unmodified assert_array_equal(d, diag_cpy[0]) assert_array_equal(e, diag_cpy[1]) assert_equal(info, 0, err_msg="pttrf: info = {}, should be 0".format(info)) # test that the factors from pttrf can be recombined to make A L = np.diag(_e, -1) + np.diag(np.ones(n)) D = np.diag(_d) assert_allclose(A, L@[email protected]().T, atol=atol) # generate random solution x x = generate_random_dtype_array((n,), dtype) # determine accompanying b to get soln x b = A@x # determine _x from pttrs pttrs = get_lapack_funcs('pttrs', dtype=dtype) _x, info = pttrs(_d, _e.conj(), b) assert_equal(info, 0, err_msg="pttrs: info = {}, should be 0".format(info)) # test that _x from pttrs matches the expected x assert_allclose(x, _x, atol=atol) @pytest.mark.parametrize("ddtype,dtype", zip(REAL_DTYPES + REAL_DTYPES, DTYPES)) def test_pttrf_pttrs_errors_incompatible_shape(ddtype, dtype): n = 10 pttrf = get_lapack_funcs('pttrf', dtype=dtype) d = generate_random_dtype_array((n,), ddtype) + 2 e = generate_random_dtype_array((n-1,), dtype) # test that ValueError is raised with incompatible matrix shapes assert_raises(ValueError, pttrf, d[:-1], e) assert_raises(ValueError, pttrf, d, e[:-1]) @pytest.mark.parametrize("ddtype,dtype", zip(REAL_DTYPES + REAL_DTYPES, DTYPES)) def test_pttrf_pttrs_errors_singular_nonSPD(ddtype, dtype): n = 10 pttrf = get_lapack_funcs('pttrf', dtype=dtype) d = generate_random_dtype_array((n,), ddtype) + 2 e = generate_random_dtype_array((n-1,), dtype) # test that singular (row of all zeroes) matrix fails via info d[0] = 0 e[0] = 0 _d, _e, info = pttrf(d, e) assert_equal(_d[info - 1], 0, "?pttrf: _d[info-1] is {}, not the illegal value :0." .format(_d[info - 1])) # test with non-spd matrix d = generate_random_dtype_array((n,), ddtype) _d, _e, info = pttrf(d, e) assert_(info != 0, "?pttrf should fail with non-spd matrix, but didn't") @pytest.mark.parametrize(("d, e, d_expect, e_expect, b, x_expect"), [ (np.array([4, 10, 29, 25, 5]), np.array([-2, -6, 15, 8]), np.array([4, 9, 25, 16, 1]), np.array([-.5, -.6667, .6, .5]), np.array([[6, 10], [9, 4], [2, 9], [14, 65], [7, 23]]), np.array([[2.5, 2], [2, -1], [1, -3], [-1, 6], [3, -5]]) ), ( np.array([16, 41, 46, 21]), np.array([16 + 16j, 18 - 9j, 1 - 4j]), np.array([16, 9, 1, 4]), np.array([1+1j, 2-1j, 1-4j]), np.array([[64+16j, -16-32j], [93+62j, 61-66j], [78-80j, 71-74j], [14-27j, 35+15j]]), np.array([[2+1j, -3-2j], [1+1j, 1+1j], [1-2j, 1-2j], [1-1j, 2+1j]]) )]) def test_pttrf_pttrs_NAG(d, e, d_expect, e_expect, b, x_expect): # test to assure that wrapper is consistent with NAG Manual Mark 26 # example problems: f07jdf and f07jef (real) # examples: f07jrf and f07csf (complex) # NAG examples provide 4 decimals. # (Links expire, so please search for "NAG Library Manual Mark 26" online) atol = 1e-4 pttrf = get_lapack_funcs('pttrf', dtype=e[0]) _d, _e, info = pttrf(d, e) assert_allclose(_d, d_expect, atol=atol) assert_allclose(_e, e_expect, atol=atol) pttrs = get_lapack_funcs('pttrs', dtype=e[0]) _x, info = pttrs(_d, _e.conj(), b) assert_allclose(_x, x_expect, atol=atol) # also test option `lower` if e.dtype in COMPLEX_DTYPES: _x, info = pttrs(_d, _e, b, lower=1) assert_allclose(_x, x_expect, atol=atol) def pteqr_get_d_e_A_z(dtype, realtype, n, compute_z): # used by ?pteqr tests to build parameters # returns tuple of (d, e, A, z) if compute_z == 1: # build Hermitian A from Q**T * tri * Q = A by creating Q and tri A_eig = generate_random_dtype_array((n, n), dtype) A_eig = A_eig + np.diag(np.zeros(n) + 4*n) A_eig = (A_eig + A_eig.conj().T) / 2 # obtain right eigenvectors (orthogonal) vr = eigh(A_eig)[1] # create tridiagonal matrix d = generate_random_dtype_array((n,), realtype) + 4 e = generate_random_dtype_array((n-1,), realtype) tri = np.diag(d) + np.diag(e, 1) + np.diag(e, -1) # Build A using these factors that sytrd would: (Q**T * tri * Q = A) A = vr @ tri @ vr.conj().T # vr is orthogonal z = vr else: # d and e are always real per lapack docs. d = generate_random_dtype_array((n,), realtype) e = generate_random_dtype_array((n-1,), realtype) # make SPD d = d + 4 A = np.diag(d) + np.diag(e, 1) + np.diag(e, -1) z = np.diag(d) + np.diag(e, -1) + np.diag(e, 1) return (d, e, A, z) @pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES + REAL_DTYPES)) @pytest.mark.parametrize("compute_z", range(3)) def test_pteqr(dtype, realtype, compute_z): ''' Tests the ?pteqr lapack routine for all dtypes and compute_z parameters. It generates random SPD matrix diagonals d and e, and then confirms correct eigenvalues with scipy.linalg.eig. With applicable compute_z=2 it tests that z can reform A. ''' seed(42) atol = 1000*np.finfo(dtype).eps pteqr = get_lapack_funcs(('pteqr'), dtype=dtype) n = 10 d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z) d_pteqr, e_pteqr, z_pteqr, info = pteqr(d=d, e=e, z=z, compute_z=compute_z) assert_equal(info, 0, "info = {}, should be 0.".format(info)) # compare the routine's eigenvalues with scipy.linalg.eig's. assert_allclose(np.sort(eigh(A)[0]), np.sort(d_pteqr), atol=atol) if compute_z: # verify z_pteqr as orthogonal assert_allclose(z_pteqr @ np.conj(z_pteqr).T, np.identity(n), atol=atol) # verify that z_pteqr recombines to A assert_allclose(z_pteqr @ np.diag(d_pteqr) @ np.conj(z_pteqr).T, A, atol=atol) @pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES + REAL_DTYPES)) @pytest.mark.parametrize("compute_z", range(3)) def test_pteqr_error_non_spd(dtype, realtype, compute_z): seed(42) pteqr = get_lapack_funcs(('pteqr'), dtype=dtype) n = 10 d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z) # test with non-spd matrix d_pteqr, e_pteqr, z_pteqr, info = pteqr(d - 4, e, z=z, compute_z=compute_z) assert info > 0 @pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES + REAL_DTYPES)) @pytest.mark.parametrize("compute_z", range(3)) def test_pteqr_raise_error_wrong_shape(dtype, realtype, compute_z): seed(42) pteqr = get_lapack_funcs(('pteqr'), dtype=dtype) n = 10 d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z) # test with incorrect/incompatible array sizes assert_raises(ValueError, pteqr, d[:-1], e, z=z, compute_z=compute_z) assert_raises(ValueError, pteqr, d, e[:-1], z=z, compute_z=compute_z) if compute_z: assert_raises(ValueError, pteqr, d, e, z=z[:-1], compute_z=compute_z) @pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES + REAL_DTYPES)) @pytest.mark.parametrize("compute_z", range(3)) def test_pteqr_error_singular(dtype, realtype, compute_z): seed(42) pteqr = get_lapack_funcs(('pteqr'), dtype=dtype) n = 10 d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z) # test with singular matrix d[0] = 0 e[0] = 0 d_pteqr, e_pteqr, z_pteqr, info = pteqr(d, e, z=z, compute_z=compute_z) assert info > 0 @pytest.mark.parametrize("compute_z,d,e,d_expect,z_expect", [(2, # "I" np.array([4.16, 5.25, 1.09, .62]), np.array([3.17, -.97, .55]), np.array([8.0023, 1.9926, 1.0014, 0.1237]), np.array([[0.6326, 0.6245, -0.4191, 0.1847], [0.7668, -0.4270, 0.4176, -0.2352], [-0.1082, 0.6071, 0.4594, -0.6393], [-0.0081, 0.2432, 0.6625, 0.7084]])), ]) def test_pteqr_NAG_f08jgf(compute_z, d, e, d_expect, z_expect): ''' Implements real (f08jgf) example from NAG Manual Mark 26. Tests for correct outputs. ''' # the NAG manual has 4 decimals accuracy atol = 1e-4 pteqr = get_lapack_funcs(('pteqr'), dtype=d.dtype) z = np.diag(d) + np.diag(e, 1) + np.diag(e, -1) _d, _e, _z, info = pteqr(d=d, e=e, z=z, compute_z=compute_z) assert_allclose(_d, d_expect, atol=atol) assert_allclose(np.abs(_z), np.abs(z_expect), atol=atol) @pytest.mark.parametrize('dtype', DTYPES) @pytest.mark.parametrize('matrix_size', [(3, 4), (7, 6), (6, 6)]) def test_geqrfp(dtype, matrix_size): # Tests for all dytpes, tall, wide, and square matrices. # Using the routine with random matrix A, Q and R are obtained and then # tested such that R is upper triangular and non-negative on the diagonal, # and Q is an orthagonal matrix. Verifies that A=Q@R. It also # tests against a matrix that for which the linalg.qr method returns # negative diagonals, and for error messaging. # set test tolerance appropriate for dtype np.random.seed(42) rtol = 250*np.finfo(dtype).eps atol = 100*np.finfo(dtype).eps # get appropriate ?geqrfp for dtype geqrfp = get_lapack_funcs(('geqrfp'), dtype=dtype) gqr = get_lapack_funcs(("orgqr"), dtype=dtype) m, n = matrix_size # create random matrix of dimentions m x n A = generate_random_dtype_array((m, n), dtype=dtype) # create qr matrix using geqrfp qr_A, tau, info = geqrfp(A) # obtain r from the upper triangular area r = np.triu(qr_A) # obtain q from the orgqr lapack routine # based on linalg.qr's extraction strategy of q with orgqr if m > n: # this adds an extra column to the end of qr_A # let qqr be an empty m x m matrix qqr = np.zeros((m, m), dtype=dtype) # set first n columns of qqr to qr_A qqr[:, :n] = qr_A # determine q from this qqr # note that m is a sufficient for lwork based on LAPACK documentation q = gqr(qqr, tau=tau, lwork=m)[0] else: q = gqr(qr_A[:, :m], tau=tau, lwork=m)[0] # test that q and r still make A assert_allclose(q@r, A, rtol=rtol) # ensure that q is orthogonal (that q @ transposed q is the identity) assert_allclose(np.eye(q.shape[0]), q@(q.conj().T), rtol=rtol, atol=atol) # ensure r is upper tri by comparing original r to r as upper triangular assert_allclose(r, np.triu(r), rtol=rtol) # make sure diagonals of r are positive for this random solution assert_(np.all(np.diag(r) > np.zeros(len(np.diag(r))))) # ensure that info is zero for this success assert_(info == 0) # test that this routine gives r diagonals that are positive for a # matrix that returns negatives in the diagonal with scipy.linalg.rq A_negative = generate_random_dtype_array((n, m), dtype=dtype) * -1 r_rq_neg, q_rq_neg = qr(A_negative) rq_A_neg, tau_neg, info_neg = geqrfp(A_negative) # assert that any of the entries on the diagonal from linalg.qr # are negative and that all of geqrfp are positive. assert_(np.any(np.diag(r_rq_neg) < 0) and np.all(np.diag(r) > 0)) def test_geqrfp_errors_with_empty_array(): # check that empty array raises good error message A_empty = np.array([]) geqrfp = get_lapack_funcs('geqrfp', dtype=A_empty.dtype) assert_raises(Exception, geqrfp, A_empty) @pytest.mark.parametrize("driver", ['ev', 'evd', 'evr', 'evx']) @pytest.mark.parametrize("pfx", ['sy', 'he']) def test_standard_eigh_lworks(pfx, driver): n = 1200 # Some sufficiently big arbitrary number dtype = REAL_DTYPES if pfx == 'sy' else COMPLEX_DTYPES sc_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[0]) dz_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[1]) try: _compute_lwork(sc_dlw, n, lower=1) _compute_lwork(dz_dlw, n, lower=1) except Exception as e: pytest.fail("{}_lwork raised unexpected exception: {}" "".format(pfx+driver, e)) @pytest.mark.parametrize("driver", ['gv', 'gvx']) @pytest.mark.parametrize("pfx", ['sy', 'he']) def test_generalized_eigh_lworks(pfx, driver): n = 1200 # Some sufficiently big arbitrary number dtype = REAL_DTYPES if pfx == 'sy' else COMPLEX_DTYPES sc_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[0]) dz_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[1]) # Shouldn't raise any exceptions try: _compute_lwork(sc_dlw, n, uplo="L") _compute_lwork(dz_dlw, n, uplo="L") except Exception as e: pytest.fail("{}_lwork raised unexpected exception: {}" "".format(pfx+driver, e)) @pytest.mark.parametrize("dtype_", DTYPES) @pytest.mark.parametrize("m", [1, 10, 100, 1000]) def test_orcsd_uncsd_lwork(dtype_, m): seed(1234) p = randint(0, m) q = m - p pfx = 'or' if dtype_ in REAL_DTYPES else 'un' dlw = pfx + 'csd_lwork' lw = get_lapack_funcs(dlw, dtype=dtype_) lwval = _compute_lwork(lw, m, p, q) lwval = lwval if pfx == 'un' else (lwval,) assert all([x > 0 for x in lwval]) @pytest.mark.parametrize("dtype_", DTYPES) def test_orcsd_uncsd(dtype_): m, p, q = 250, 80, 170 pfx = 'or' if dtype_ in REAL_DTYPES else 'un' X = ortho_group.rvs(m) if pfx == 'or' else unitary_group.rvs(m) drv, dlw = get_lapack_funcs((pfx + 'csd', pfx + 'csd_lwork'), dtype=dtype_) lwval = _compute_lwork(dlw, m, p, q) lwvals = {'lwork': lwval} if pfx == 'or' else dict(zip(['lwork', 'lrwork'], lwval)) cs11, cs12, cs21, cs22, theta, u1, u2, v1t, v2t, info =\ drv(X[:p, :q], X[:p, q:], X[p:, :q], X[p:, q:], **lwvals) assert info == 0 U = block_diag(u1, u2) VH = block_diag(v1t, v2t) r = min(min(p, q), min(m-p, m-q)) n11 = min(p, q) - r n12 = min(p, m-q) - r n21 = min(m-p, q) - r n22 = min(m-p, m-q) - r S = np.zeros((m, m), dtype=dtype_) one = dtype_(1.) for i in range(n11): S[i, i] = one for i in range(n22): S[p+i, q+i] = one for i in range(n12): S[i+n11+r, i+n11+r+n21+n22+r] = -one for i in range(n21): S[p+n22+r+i, n11+r+i] = one for i in range(r): S[i+n11, i+n11] = np.cos(theta[i]) S[p+n22+i, i+r+n21+n22] = np.cos(theta[i]) S[i+n11, i+n11+n21+n22+r] = -np.sin(theta[i]) S[p+n22+i, i+n11] = np.sin(theta[i]) Xc = U @ S @ VH assert_allclose(X, Xc, rtol=0., atol=1e4*np.finfo(dtype_).eps) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("trans_bool", [False, True]) @pytest.mark.parametrize("fact", ["F", "N"]) def test_gtsvx(dtype, trans_bool, fact): """ These tests uses ?gtsvx to solve a random Ax=b system for each dtype. It tests that the outputs define an LU matrix, that inputs are unmodified, transposal options, incompatible shapes, singular matrices, and singular factorizations. It parametrizes DTYPES and the 'fact' value along with the fact related inputs. """ seed(42) # set test tolerance appropriate for dtype atol = 100 * np.finfo(dtype).eps # obtain routine gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype) # Generate random tridiagonal matrix A n = 10 dl = generate_random_dtype_array((n-1,), dtype=dtype) d = generate_random_dtype_array((n,), dtype=dtype) du = generate_random_dtype_array((n-1,), dtype=dtype) A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1) # generate random solution x x = generate_random_dtype_array((n, 2), dtype=dtype) # create b from x for equation Ax=b trans = ("T" if dtype in REAL_DTYPES else "C") if trans_bool else "N" b = (A.conj().T if trans_bool else A) @ x # store a copy of the inputs to check they haven't been modified later inputs_cpy = [dl.copy(), d.copy(), du.copy(), b.copy()] # set these to None if fact = 'N', or the output of gttrf is fact = 'F' dlf_, df_, duf_, du2f_, ipiv_, info_ = \ gttrf(dl, d, du) if fact == 'F' else [None]*6 gtsvx_out = gtsvx(dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_, duf=duf_, du2=du2f_, ipiv=ipiv_) dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out assert_(info == 0, "?gtsvx info = {}, should be zero".format(info)) # assure that inputs are unmodified assert_array_equal(dl, inputs_cpy[0]) assert_array_equal(d, inputs_cpy[1]) assert_array_equal(du, inputs_cpy[2]) assert_array_equal(b, inputs_cpy[3]) # test that x_soln matches the expected x assert_allclose(x, x_soln, atol=atol) # assert that the outputs are of correct type or shape # rcond should be a scalar assert_(hasattr(rcond, "__len__") is not True, "rcond should be scalar but is {}".format(rcond)) # ferr should be length of # of cols in x assert_(ferr.shape[0] == b.shape[1], "ferr.shape is {} but shoud be {}," .format(ferr.shape[0], b.shape[1])) # berr should be length of # of cols in x assert_(berr.shape[0] == b.shape[1], "berr.shape is {} but shoud be {}," .format(berr.shape[0], b.shape[1])) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("trans_bool", [0, 1]) @pytest.mark.parametrize("fact", ["F", "N"]) def test_gtsvx_error_singular(dtype, trans_bool, fact): seed(42) # obtain routine gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype) # Generate random tridiagonal matrix A n = 10 dl = generate_random_dtype_array((n-1,), dtype=dtype) d = generate_random_dtype_array((n,), dtype=dtype) du = generate_random_dtype_array((n-1,), dtype=dtype) A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1) # generate random solution x x = generate_random_dtype_array((n, 2), dtype=dtype) # create b from x for equation Ax=b trans = "T" if dtype in REAL_DTYPES else "C" b = (A.conj().T if trans_bool else A) @ x # set these to None if fact = 'N', or the output of gttrf is fact = 'F' dlf_, df_, duf_, du2f_, ipiv_, info_ = \ gttrf(dl, d, du) if fact == 'F' else [None]*6 gtsvx_out = gtsvx(dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_, duf=duf_, du2=du2f_, ipiv=ipiv_) dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out # test with singular matrix # no need to test inputs with fact "F" since ?gttrf already does. if fact == "N": # Construct a singular example manually d[-1] = 0 dl[-1] = 0 # solve using routine gtsvx_out = gtsvx(dl, d, du, b) dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out # test for the singular matrix. assert info > 0, "info should be > 0 for singular matrix" elif fact == 'F': # assuming that a singular factorization is input df_[-1] = 0 duf_[-1] = 0 du2f_[-1] = 0 gtsvx_out = gtsvx(dl, d, du, b, fact=fact, dlf=dlf_, df=df_, duf=duf_, du2=du2f_, ipiv=ipiv_) dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out # info should not be zero and should provide index of illegal value assert info > 0, "info should be > 0 for singular matrix" @pytest.mark.parametrize("dtype", DTYPES*2) @pytest.mark.parametrize("trans_bool", [False, True]) @pytest.mark.parametrize("fact", ["F", "N"]) def test_gtsvx_error_incompatible_size(dtype, trans_bool, fact): seed(42) # obtain routine gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype) # Generate random tridiagonal matrix A n = 10 dl = generate_random_dtype_array((n-1,), dtype=dtype) d = generate_random_dtype_array((n,), dtype=dtype) du = generate_random_dtype_array((n-1,), dtype=dtype) A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1) # generate random solution x x = generate_random_dtype_array((n, 2), dtype=dtype) # create b from x for equation Ax=b trans = "T" if dtype in REAL_DTYPES else "C" b = (A.conj().T if trans_bool else A) @ x # set these to None if fact = 'N', or the output of gttrf is fact = 'F' dlf_, df_, duf_, du2f_, ipiv_, info_ = \ gttrf(dl, d, du) if fact == 'F' else [None]*6 if fact == "N": assert_raises(ValueError, gtsvx, dl[:-1], d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_, duf=duf_, du2=du2f_, ipiv=ipiv_) assert_raises(ValueError, gtsvx, dl, d[:-1], du, b, fact=fact, trans=trans, dlf=dlf_, df=df_, duf=duf_, du2=du2f_, ipiv=ipiv_) assert_raises(ValueError, gtsvx, dl, d, du[:-1], b, fact=fact, trans=trans, dlf=dlf_, df=df_, duf=duf_, du2=du2f_, ipiv=ipiv_) assert_raises(Exception, gtsvx, dl, d, du, b[:-1], fact=fact, trans=trans, dlf=dlf_, df=df_, duf=duf_, du2=du2f_, ipiv=ipiv_) else: assert_raises(ValueError, gtsvx, dl, d, du, b, fact=fact, trans=trans, dlf=dlf_[:-1], df=df_, duf=duf_, du2=du2f_, ipiv=ipiv_) assert_raises(ValueError, gtsvx, dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_[:-1], duf=duf_, du2=du2f_, ipiv=ipiv_) assert_raises(ValueError, gtsvx, dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_, duf=duf_[:-1], du2=du2f_, ipiv=ipiv_) assert_raises(ValueError, gtsvx, dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_, duf=duf_, du2=du2f_[:-1], ipiv=ipiv_) @pytest.mark.parametrize("du,d,dl,b,x", [(np.array([2.1, -1.0, 1.9, 8.0]), np.array([3.0, 2.3, -5.0, -0.9, 7.1]), np.array([3.4, 3.6, 7.0, -6.0]), np.array([[2.7, 6.6], [-.5, 10.8], [2.6, -3.2], [.6, -11.2], [2.7, 19.1]]), np.array([[-4, 5], [7, -4], [3, -3], [-4, -2], [-3, 1]])), (np.array([2 - 1j, 2 + 1j, -1 + 1j, 1 - 1j]), np.array([-1.3 + 1.3j, -1.3 + 1.3j, -1.3 + 3.3j, -.3 + 4.3j, -3.3 + 1.3j]), np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j]), np.array([[2.4 - 5j, 2.7 + 6.9j], [3.4 + 18.2j, -6.9 - 5.3j], [-14.7 + 9.7j, -6 - .6j], [31.9 - 7.7j, -3.9 + 9.3j], [-1 + 1.6j, -3 + 12.2j]]), np.array([[1 + 1j, 2 - 1j], [3 - 1j, 1 + 2j], [4 + 5j, -1 + 1j], [-1 - 2j, 2 + 1j], [1 - 1j, 2 - 2j]]))]) def test_gtsvx_NAG(du, d, dl, b, x): # Test to ensure wrapper is consistent with NAG Manual Mark 26 # example problems: real (f07cbf) and complex (f07cpf) gtsvx = get_lapack_funcs('gtsvx', dtype=d.dtype) gtsvx_out = gtsvx(dl, d, du, b) dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out assert_array_almost_equal(x, x_soln) @pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES + REAL_DTYPES)) @pytest.mark.parametrize("fact,df_de_lambda", [("F", lambda d, e:get_lapack_funcs('pttrf', dtype=e.dtype)(d, e)), ("N", lambda d, e: (None, None, None))]) def test_ptsvx(dtype, realtype, fact, df_de_lambda): ''' This tests the ?ptsvx lapack routine wrapper to solve a random system Ax = b for all dtypes and input variations. Tests for: unmodified input parameters, fact options, incompatible matrix shapes raise an error, and singular matrices return info of illegal value. ''' seed(42) # set test tolerance appropriate for dtype atol = 100 * np.finfo(dtype).eps ptsvx = get_lapack_funcs('ptsvx', dtype=dtype) n = 5 # create diagonals according to size and dtype d = generate_random_dtype_array((n,), realtype) + 4 e = generate_random_dtype_array((n-1,), dtype) A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1) x_soln = generate_random_dtype_array((n, 2), dtype=dtype) b = A @ x_soln # use lambda to determine what df, ef are df, ef, info = df_de_lambda(d, e) # create copy to later test that they are unmodified diag_cpy = [d.copy(), e.copy(), b.copy()] # solve using routine df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b, fact=fact, df=df, ef=ef) # d, e, and b should be unmodified assert_array_equal(d, diag_cpy[0]) assert_array_equal(e, diag_cpy[1]) assert_array_equal(b, diag_cpy[2]) assert_(info == 0, "info should be 0 but is {}.".format(info)) assert_array_almost_equal(x_soln, x) # test that the factors from ptsvx can be recombined to make A L = np.diag(ef, -1) + np.diag(np.ones(n)) D = np.diag(df) assert_allclose(A, L@D@(np.conj(L).T), atol=atol) # assert that the outputs are of correct type or shape # rcond should be a scalar assert not hasattr(rcond, "__len__"), \ "rcond should be scalar but is {}".format(rcond) # ferr should be length of # of cols in x assert_(ferr.shape == (2,), "ferr.shape is {} but shoud be ({},)" .format(ferr.shape, x_soln.shape[1])) # berr should be length of # of cols in x assert_(berr.shape == (2,), "berr.shape is {} but shoud be ({},)" .format(berr.shape, x_soln.shape[1])) @pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES + REAL_DTYPES)) @pytest.mark.parametrize("fact,df_de_lambda", [("F", lambda d, e:get_lapack_funcs('pttrf', dtype=e.dtype)(d, e)), ("N", lambda d, e: (None, None, None))]) def test_ptsvx_error_raise_errors(dtype, realtype, fact, df_de_lambda): seed(42) ptsvx = get_lapack_funcs('ptsvx', dtype=dtype) n = 5 # create diagonals according to size and dtype d = generate_random_dtype_array((n,), realtype) + 4 e = generate_random_dtype_array((n-1,), dtype) A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1) x_soln = generate_random_dtype_array((n, 2), dtype=dtype) b = A @ x_soln # use lambda to determine what df, ef are df, ef, info = df_de_lambda(d, e) # test with malformatted array sizes assert_raises(ValueError, ptsvx, d[:-1], e, b, fact=fact, df=df, ef=ef) assert_raises(ValueError, ptsvx, d, e[:-1], b, fact=fact, df=df, ef=ef) assert_raises(Exception, ptsvx, d, e, b[:-1], fact=fact, df=df, ef=ef) @pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES + REAL_DTYPES)) @pytest.mark.parametrize("fact,df_de_lambda", [("F", lambda d, e:get_lapack_funcs('pttrf', dtype=e.dtype)(d, e)), ("N", lambda d, e: (None, None, None))]) def test_ptsvx_non_SPD_singular(dtype, realtype, fact, df_de_lambda): seed(42) ptsvx = get_lapack_funcs('ptsvx', dtype=dtype) n = 5 # create diagonals according to size and dtype d = generate_random_dtype_array((n,), realtype) + 4 e = generate_random_dtype_array((n-1,), dtype) A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1) x_soln = generate_random_dtype_array((n, 2), dtype=dtype) b = A @ x_soln # use lambda to determine what df, ef are df, ef, info = df_de_lambda(d, e) if fact == "N": d[3] = 0 # obtain new df, ef df, ef, info = df_de_lambda(d, e) # solve using routine df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b) # test for the singular matrix. assert info > 0 and info <= n # non SPD matrix d = generate_random_dtype_array((n,), realtype) df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b) assert info > 0 and info <= n else: # assuming that someone is using a singular factorization df, ef, info = df_de_lambda(d, e) df[0] = 0 ef[0] = 0 df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b, fact=fact, df=df, ef=ef) assert info > 0 @pytest.mark.parametrize('d,e,b,x', [(np.array([4, 10, 29, 25, 5]), np.array([-2, -6, 15, 8]), np.array([[6, 10], [9, 4], [2, 9], [14, 65], [7, 23]]), np.array([[2.5, 2], [2, -1], [1, -3], [-1, 6], [3, -5]])), (np.array([16, 41, 46, 21]), np.array([16 + 16j, 18 - 9j, 1 - 4j]), np.array([[64 + 16j, -16 - 32j], [93 + 62j, 61 - 66j], [78 - 80j, 71 - 74j], [14 - 27j, 35 + 15j]]), np.array([[2 + 1j, -3 - 2j], [1 + 1j, 1 + 1j], [1 - 2j, 1 - 2j], [1 - 1j, 2 + 1j]]))]) def test_ptsvx_NAG(d, e, b, x): # test to assure that wrapper is consistent with NAG Manual Mark 26 # example problemss: f07jbf, f07jpf # (Links expire, so please search for "NAG Library Manual Mark 26" online) # obtain routine with correct type based on e.dtype ptsvx = get_lapack_funcs('ptsvx', dtype=e.dtype) # solve using routine df, ef, x_ptsvx, rcond, ferr, berr, info = ptsvx(d, e, b) # determine ptsvx's solution and x are the same. assert_array_almost_equal(x, x_ptsvx) @pytest.mark.parametrize('lower', [False, True]) @pytest.mark.parametrize('dtype', DTYPES) def test_pptrs_pptri_pptrf_ppsv_ppcon(dtype, lower): seed(1234) atol = np.finfo(dtype).eps*100 # Manual conversion to/from packed format is feasible here. n, nrhs = 10, 4 a = generate_random_dtype_array([n, n], dtype=dtype) b = generate_random_dtype_array([n, nrhs], dtype=dtype) a = a.conj().T + a + np.eye(n, dtype=dtype) * dtype(5.) if lower: inds = ([x for y in range(n) for x in range(y, n)], [y for y in range(n) for x in range(y, n)]) else: inds = ([x for y in range(1, n+1) for x in range(y)], [y-1 for y in range(1, n+1) for x in range(y)]) ap = a[inds] ppsv, pptrf, pptrs, pptri, ppcon = get_lapack_funcs( ('ppsv', 'pptrf', 'pptrs', 'pptri', 'ppcon'), dtype=dtype, ilp64="preferred") ul, info = pptrf(n, ap, lower=lower) assert_equal(info, 0) aul = cholesky(a, lower=lower)[inds] assert_allclose(ul, aul, rtol=0, atol=atol) uli, info = pptri(n, ul, lower=lower) assert_equal(info, 0) auli = inv(a)[inds] assert_allclose(uli, auli, rtol=0, atol=atol) x, info = pptrs(n, ul, b, lower=lower) assert_equal(info, 0) bx = solve(a, b) assert_allclose(x, bx, rtol=0, atol=atol) xv, info = ppsv(n, ap, b, lower=lower) assert_equal(info, 0) assert_allclose(xv, bx, rtol=0, atol=atol) anorm = np.linalg.norm(a, 1) rcond, info = ppcon(n, ap, anorm=anorm, lower=lower) assert_equal(info, 0) assert_(abs(1/rcond - np.linalg.cond(a, p=1))*rcond < 1) @pytest.mark.parametrize('dtype', DTYPES) def test_gges_tgexc(dtype): seed(1234) atol = np.finfo(dtype).eps*100 n = 10 a = generate_random_dtype_array([n, n], dtype=dtype) b = generate_random_dtype_array([n, n], dtype=dtype) gges, tgexc = get_lapack_funcs(('gges', 'tgexc'), dtype=dtype) result = gges(lambda x: None, a, b, overwrite_a=False, overwrite_b=False) assert_equal(result[-1], 0) s = result[0] t = result[1] q = result[-4] z = result[-3] d1 = s[0, 0] / t[0, 0] d2 = s[6, 6] / t[6, 6] if dtype in COMPLEX_DTYPES: assert_allclose(s, np.triu(s), rtol=0, atol=atol) assert_allclose(t, np.triu(t), rtol=0, atol=atol) assert_allclose(q @ s @ z.conj().T, a, rtol=0, atol=atol) assert_allclose(q @ t @ z.conj().T, b, rtol=0, atol=atol) result = tgexc(s, t, q, z, 6, 0) assert_equal(result[-1], 0) s = result[0] t = result[1] q = result[2] z = result[3] if dtype in COMPLEX_DTYPES: assert_allclose(s, np.triu(s), rtol=0, atol=atol) assert_allclose(t, np.triu(t), rtol=0, atol=atol) assert_allclose(q @ s @ z.conj().T, a, rtol=0, atol=atol) assert_allclose(q @ t @ z.conj().T, b, rtol=0, atol=atol) assert_allclose(s[0, 0] / t[0, 0], d2, rtol=0, atol=atol) assert_allclose(s[1, 1] / t[1, 1], d1, rtol=0, atol=atol)
WarrenWeckesser/scipy
scipy/linalg/tests/test_lapack.py
Python
bsd-3-clause
116,267
''' Some tests for the documenting decorator and support functions ''' from __future__ import division, print_function, absolute_import import sys import pytest from numpy.testing import assert_equal from scipy.misc import doccer # python -OO strips docstrings DOCSTRINGS_STRIPPED = sys.flags.optimize > 1 docstring = \ """Docstring %(strtest1)s %(strtest2)s %(strtest3)s """ param_doc1 = \ """Another test with some indent""" param_doc2 = \ """Another test, one line""" param_doc3 = \ """ Another test with some indent""" doc_dict = {'strtest1':param_doc1, 'strtest2':param_doc2, 'strtest3':param_doc3} filled_docstring = \ """Docstring Another test with some indent Another test, one line Another test with some indent """ def test_unindent(): assert_equal(doccer.unindent_string(param_doc1), param_doc1) assert_equal(doccer.unindent_string(param_doc2), param_doc2) assert_equal(doccer.unindent_string(param_doc3), param_doc1) def test_unindent_dict(): d2 = doccer.unindent_dict(doc_dict) assert_equal(d2['strtest1'], doc_dict['strtest1']) assert_equal(d2['strtest2'], doc_dict['strtest2']) assert_equal(d2['strtest3'], doc_dict['strtest1']) def test_docformat(): udd = doccer.unindent_dict(doc_dict) formatted = doccer.docformat(docstring, udd) assert_equal(formatted, filled_docstring) single_doc = 'Single line doc %(strtest1)s' formatted = doccer.docformat(single_doc, doc_dict) # Note - initial indent of format string does not # affect subsequent indent of inserted parameter assert_equal(formatted, """Single line doc Another test with some indent""") @pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped") def test_decorator(): # with unindentation of parameters decorator = doccer.filldoc(doc_dict, True) @decorator def func(): """ Docstring %(strtest3)s """ assert_equal(func.__doc__, """ Docstring Another test with some indent """) # without unindentation of parameters decorator = doccer.filldoc(doc_dict, False) @decorator def func(): """ Docstring %(strtest3)s """ assert_equal(func.__doc__, """ Docstring Another test with some indent """) @pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped") def test_inherit_docstring_from(): class Foo(object): def func(self): '''Do something useful.''' return def func2(self): '''Something else.''' class Bar(Foo): @doccer.inherit_docstring_from(Foo) def func(self): '''%(super)sABC''' return @doccer.inherit_docstring_from(Foo) def func2(self): # No docstring. return assert_equal(Bar.func.__doc__, Foo.func.__doc__ + 'ABC') assert_equal(Bar.func2.__doc__, Foo.func2.__doc__) bar = Bar() assert_equal(bar.func.__doc__, Foo.func.__doc__ + 'ABC') assert_equal(bar.func2.__doc__, Foo.func2.__doc__)
mbayon/TFG-MachineLearning
venv/lib/python3.6/site-packages/scipy/misc/tests/test_doccer.py
Python
mit
3,171
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2017 Red Hat Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' module: manageiq_alerts short_description: Configuration of alerts in ManageIQ extends_documentation_fragment: manageiq version_added: '2.5' author: Elad Alfassa (@elad661) <[email protected] description: - The manageiq_alerts module supports adding, updating and deleting alerts in ManageIQ. options: state: description: - absent - alert should not exist, - present - alert should exist, required: False choices: ['absent', 'present'] default: 'present' description: description: - The unique alert description in ManageIQ. - Required when state is "absent" or "present". resource_type: description: - The entity type for the alert in ManageIQ. Required when state is "present". choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster', 'ExtManagementSystem', 'MiddlewareServer'] expression_type: description: - Expression type. default: hash choices: ["hash", "miq"] expression: description: - The alert expression for ManageIQ. - Can either be in the "Miq Expression" format or the "Hash Expression format". - Required if state is "present". enabled: description: - Enable or disable the alert. Required if state is "present". type: bool options: description: - Additional alert options, such as notification type and frequency ''' EXAMPLES = ''' - name: Add an alert with a "hash expression" to ManageIQ manageiq_alerts: state: present description: Test Alert 01 options: notifications: email: to: ["[email protected]"] from: "[email protected]" resource_type: ContainerNode expression: eval_method: hostd_log_threshold mode: internal options: {} enabled: true manageiq_connection: url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' validate_certs: False - name: Add an alert with a "miq expression" to ManageIQ manageiq_alerts: state: present description: Test Alert 02 options: notifications: email: to: ["[email protected]"] from: "[email protected]" resource_type: Vm expression_type: miq expression: and: - CONTAINS: tag: Vm.managed-environment value: prod - not: CONTAINS: tag: Vm.host.managed-environment value: prod enabled: true manageiq_connection: url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' validate_certs: False - name: Delete an alert from ManageIQ manageiq_alerts: state: absent description: Test Alert 01 manageiq_connection: url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' validate_certs: False ''' RETURN = ''' ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.manageiq import ManageIQ, manageiq_argument_spec class ManageIQAlert(object): """ Represent a ManageIQ alert. Can be initialized with both the format we recieve from the server and the format we get from the user. """ def __init__(self, alert): self.description = alert['description'] self.db = alert['db'] self.enabled = alert['enabled'] self.options = alert['options'] self.hash_expression = None self.miq_expressipn = None if 'hash_expression' in alert: self.hash_expression = alert['hash_expression'] if 'miq_expression' in alert: self.miq_expression = alert['miq_expression'] if 'exp' in self.miq_expression: # miq_expression is a field that needs a special case, because # it's returned surrounded by a dict named exp even though we don't # send it with that dict. self.miq_expression = self.miq_expression['exp'] def __eq__(self, other): """ Compare two ManageIQAlert objects """ return self.__dict__ == other.__dict__ class ManageIQAlerts(object): """ Object to execute alert management operations in manageiq. """ def __init__(self, manageiq): self.manageiq = manageiq self.module = self.manageiq.module self.api_url = self.manageiq.api_url self.client = self.manageiq.client self.alerts_url = '{api_url}/alert_definitions'.format(api_url=self.api_url) def get_alerts(self): """ Get all alerts from ManageIQ """ try: response = self.client.get(self.alerts_url + '?expand=resources') except Exception as e: self.module.fail_json(msg="Failed to query alerts: {error}".format(error=e)) return response.get('resources', []) def validate_hash_expression(self, expression): """ Validate a 'hash expression' alert definition """ # hash expressions must have the following fields for key in ['options', 'eval_method', 'mode']: if key not in expression: msg = "Hash expression is missing required field {key}".format(key=key) self.module.fail_json(msg) def create_alert_dict(self, params): """ Create a dict representing an alert """ if params['expression_type'] == 'hash': # hash expression supports depends on https://github.com/ManageIQ/manageiq-api/pull/76 self.validate_hash_expression(params['expression']) expression_type = 'hash_expression' else: # actually miq_expression, but we call it "expression" for backwards-compatibility expression_type = 'expression' # build the alret alert = dict(description=params['description'], db=params['resource_type'], options=params['options'], enabled=params['enabled']) # add the actual expression. alert.update({expression_type: params['expression']}) return alert def add_alert(self, alert): """ Add a new alert to ManageIQ """ try: result = self.client.post(self.alerts_url, action='create', resource=alert) msg = "Alert {description} created successfully: {details}" msg = msg.format(description=alert['description'], details=result) return dict(changed=True, msg=msg) except Exception as e: msg = "Creating alert {description} failed: {error}" if "Resource expression needs be specified" in str(e): # Running on an older version of ManageIQ and trying to create a hash expression msg = msg.format(description=alert['description'], error="Your version of ManageIQ does not support hash_expression") else: msg = msg.format(description=alert['description'], error=e) self.module.fail_json(msg=msg) def delete_alert(self, alert): """ Delete an alert """ try: result = self.client.post('{url}/{id}'.format(url=self.alerts_url, id=alert['id']), action="delete") msg = "Alert {description} deleted: {details}" msg = msg.format(description=alert['description'], details=result) return dict(changed=True, msg=msg) except Exception as e: msg = "Deleting alert {description} failed: {error}" msg = msg.format(description=alert['description'], error=e) self.module.fail_json(msg=msg) def update_alert(self, existing_alert, new_alert): """ Update an existing alert with the values from `new_alert` """ new_alert_obj = ManageIQAlert(new_alert) if new_alert_obj == ManageIQAlert(existing_alert): # no change needed - alerts are identical return dict(changed=False, msg="No update needed") else: try: url = '{url}/{id}'.format(url=self.alerts_url, id=existing_alert['id']) result = self.client.post(url, action="edit", resource=new_alert) # make sure that the update was indeed successful by comparing # the result to the expected result. if new_alert_obj == ManageIQAlert(result): # success! msg = "Alert {description} upated successfully: {details}" msg = msg.format(description=existing_alert['description'], details=result) return dict(changed=True, msg=msg) else: # unexpected result msg = "Updating alert {description} failed, unexpected result {details}" msg = msg.format(description=existing_alert['description'], details=result) self.module.fail_json(msg=msg) except Exception as e: msg = "Updating alert {description} failed: {error}" if "Resource expression needs be specified" in str(e): # Running on an older version of ManageIQ and trying to update a hash expression msg = msg.format(description=existing_alert['description'], error="Your version of ManageIQ does not support hash_expression") else: msg = msg.format(description=existing_alert['description'], error=e) self.module.fail_json(msg=msg) def main(): argument_spec = dict( description=dict(type='str'), resource_type=dict(type='str', choices=['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster', 'ExtManagementSystem', 'MiddlewareServer']), expression_type=dict(type='str', default='hash', choices=['miq', 'hash']), expression=dict(type='dict'), options=dict(type='dict'), enabled=dict(type='bool'), state=dict(require=False, default='present', choices=['present', 'absent']), ) # add the manageiq connection arguments to the arguments argument_spec.update(manageiq_argument_spec()) module = AnsibleModule(argument_spec=argument_spec, required_if=[('state', 'present', ['description', 'resource_type', 'expression', 'enabled', 'options']), ('state', 'absent', ['description'])]) state = module.params['state'] description = module.params['description'] manageiq = ManageIQ(module) manageiq_alerts = ManageIQAlerts(manageiq) existing_alert = manageiq.find_collection_resource_by("alert_definitions", description=description) # we need to add or update the alert if state == "present": alert = manageiq_alerts.create_alert_dict(module.params) if not existing_alert: # an alert with this description doesn't exist yet, let's create it res_args = manageiq_alerts.add_alert(alert) else: # an alert with this description exists, we might need to update it res_args = manageiq_alerts.update_alert(existing_alert, alert) # this alert should not exist elif state == "absent": # if we have an alert with this description, delete it if existing_alert: res_args = manageiq_alerts.delete_alert(existing_alert) else: # it doesn't exist, and that's okay msg = "Alert '{description}' does not exist in ManageIQ" msg = msg.format(description=description) res_args = dict(changed=False, msg=msg) module.exit_json(**res_args) if __name__ == "__main__": main()
alxgu/ansible
lib/ansible/modules/remote_management/manageiq/manageiq_alerts.py
Python
gpl-3.0
12,976
class A(B): pass class A(object): pass class A(x.y()): pass class A(B, C): pass
warner83/micropython
tests/bytecode/mp-tests/class5.py
Python
mit
96
# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import compute from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) authorize = extensions.soft_extension_authorizer('compute', 'server_usage') class ServerUsageController(wsgi.Controller): def __init__(self, *args, **kwargs): super(ServerUsageController, self).__init__(*args, **kwargs) self.compute_api = compute.API() def _extend_server(self, server, instance): for k in ['launched_at', 'terminated_at']: key = "%s:%s" % (Server_usage.alias, k) # NOTE(danms): Historically, this timestamp has been generated # merely by grabbing str(datetime) of a TZ-naive object. The # only way we can keep that with instance objects is to strip # the tzinfo from the stamp and str() it. server[key] = (instance[k].replace(tzinfo=None) if instance[k] else None) @wsgi.extends def show(self, req, resp_obj, id): context = req.environ['nova.context'] if authorize(context): # Attach our slave template to the response object resp_obj.attach(xml=ServerUsageTemplate()) server = resp_obj.obj['server'] db_instance = req.get_db_instance(server['id']) # server['id'] is guaranteed to be in the cache due to # the core API adding it in its 'show' method. self._extend_server(server, db_instance) @wsgi.extends def detail(self, req, resp_obj): context = req.environ['nova.context'] if authorize(context): # Attach our slave template to the response object resp_obj.attach(xml=ServerUsagesTemplate()) servers = list(resp_obj.obj['servers']) for server in servers: db_instance = req.get_db_instance(server['id']) # server['id'] is guaranteed to be in the cache due to # the core API adding it in its 'detail' method. self._extend_server(server, db_instance) class Server_usage(extensions.ExtensionDescriptor): """Adds launched_at and terminated_at on Servers.""" name = "ServerUsage" alias = "OS-SRV-USG" namespace = ("http://docs.openstack.org/compute/ext/" "server_usage/api/v1.1") updated = "2013-04-29T00:00:00Z" def get_controller_extensions(self): controller = ServerUsageController() extension = extensions.ControllerExtension(self, 'servers', controller) return [extension] def make_server(elem): elem.set('{%s}launched_at' % Server_usage.namespace, '%s:launched_at' % Server_usage.alias) elem.set('{%s}terminated_at' % Server_usage.namespace, '%s:terminated_at' % Server_usage.alias) class ServerUsageTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('server', selector='server') make_server(root) return xmlutil.SlaveTemplate(root, 1, nsmap={ Server_usage.alias: Server_usage.namespace}) class ServerUsagesTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('servers') elem = xmlutil.SubTemplateElement(root, 'server', selector='servers') make_server(elem) return xmlutil.SlaveTemplate(root, 1, nsmap={ Server_usage.alias: Server_usage.namespace})
ChinaMassClouds/copenstack-server
openstack/src/nova-2014.2/nova/api/openstack/compute/contrib/server_usage.py
Python
gpl-2.0
4,148
import pytest from tests.support.asserts import assert_success """ Tests that WebDriver can transcend site origins. Many modern browsers impose strict cross-origin checks, and WebDriver should be able to transcend these. Although an implementation detail, certain browsers also enforce process isolation based on site origin. This is known to sometimes cause problems for WebDriver implementations. """ @pytest.fixture def frame_doc(inline): return inline("<title>cheese</title><p>frame") @pytest.fixture def one_frame_doc(inline, frame_doc): return inline("<title>bar</title><iframe src='%s'></iframe>" % frame_doc) @pytest.fixture def nested_frames_doc(inline, one_frame_doc): return inline("<title>foo</title><iframe src='%s'></iframe>" % one_frame_doc) def get_title(session): return session.transport.send( "GET", "session/{session_id}/title".format(**vars(session))) def test_no_iframe(session, inline): session.url = inline("<title>Foobar</title><h2>Hello</h2>") result = get_title(session) assert_success(result, "Foobar") def test_iframe(session, one_frame_doc): session.url = one_frame_doc frame = session.find.css("iframe", all=False) session.switch_frame(frame) session.find.css("p", all=False) response = get_title(session) assert_success(response, "bar") def test_nested_iframe(session, nested_frames_doc): session.url = nested_frames_doc outer_frame = session.find.css("iframe", all=False) session.switch_frame(outer_frame) inner_frame = session.find.css("iframe", all=False) session.switch_frame(inner_frame) session.find.css("p", all=False) response = get_title(session) assert_success(response, "foo") @pytest.mark.parametrize("domain", ["", "alt"], ids=["same_origin", "cross_origin"]) def test_origin(session, inline, iframe, domain): session.url = inline("<title>foo</title>{}".format( iframe("<title>bar</title><p>frame", domain=domain))) frame = session.find.css("iframe", all=False) session.switch_frame(frame) session.find.css("p", all=False) response = get_title(session) assert_success(response, "foo")
scheib/chromium
third_party/blink/web_tests/external/wpt/webdriver/tests/get_title/iframe.py
Python
bsd-3-clause
2,183
#!/usr/bin/python # Copyright 2016 Google Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcpubsub_info version_added: "2.3" short_description: List Topics/Subscriptions and Messages from Google PubSub. description: - List Topics/Subscriptions from Google PubSub. Use the gcpubsub module for topic/subscription management. See U(https://cloud.google.com/pubsub/docs) for an overview. - This module was called C(gcpubsub_facts) before Ansible 2.9. The usage did not change. requirements: - "python >= 2.6" - "google-auth >= 0.5.0" - "google-cloud-pubsub >= 0.22.0" notes: - list state enables user to list topics or subscriptions in the project. See examples for details. author: - "Tom Melendez (@supertom) <[email protected]>" options: topic: description: - GCP pubsub topic name. Only the name, not the full path, is required. required: False view: description: - Choices are 'topics' or 'subscriptions' required: True state: description: - list is the only valid option. required: False ''' EXAMPLES = ''' ## List all Topics in a project - gcpubsub_info: view: topics state: list ## List all Subscriptions in a project - gcpubsub_info: view: subscriptions state: list ## List all Subscriptions for a Topic in a project - gcpubsub_info: view: subscriptions topic: my-topic state: list ''' RETURN = ''' subscriptions: description: List of subscriptions. returned: When view is set to subscriptions. type: list sample: ["mysubscription", "mysubscription2"] topic: description: Name of topic. Used to filter subscriptions. returned: Always type: str sample: "mytopic" topics: description: List of topics. returned: When view is set to topics. type: list sample: ["mytopic", "mytopic2"] ''' try: from ast import literal_eval HAS_PYTHON26 = True except ImportError: HAS_PYTHON26 = False try: from google.cloud import pubsub HAS_GOOGLE_CLOUD_PUBSUB = True except ImportError as e: HAS_GOOGLE_CLOUD_PUBSUB = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials def list_func(data, member='name'): """Used for state=list.""" return [getattr(x, member) for x in data] def main(): module = AnsibleModule(argument_spec=dict( view=dict(choices=['topics', 'subscriptions'], default='topics'), topic=dict(required=False), state=dict(choices=['list'], default='list'), service_account_email=dict(), credentials_file=dict(), project_id=dict(), ),) if module._name == 'gcpubsub_facts': module.deprecate("The 'gcpubsub_facts' module has been renamed to 'gcpubsub_info'", version='2.13') if not HAS_PYTHON26: module.fail_json( msg="GCE module requires python's 'ast' module, python v2.6+") if not HAS_GOOGLE_CLOUD_PUBSUB: module.fail_json(msg="Please install google-cloud-pubsub library.") CLIENT_MINIMUM_VERSION = '0.22.0' if not check_min_pkg_version('google-cloud-pubsub', CLIENT_MINIMUM_VERSION): module.fail_json(msg="Please install google-cloud-pubsub library version %s" % CLIENT_MINIMUM_VERSION) mod_params = {} mod_params['state'] = module.params.get('state') mod_params['topic'] = module.params.get('topic') mod_params['view'] = module.params.get('view') creds, params = get_google_cloud_credentials(module) pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False) pubsub_client.user_agent = 'ansible-pubsub-0.1' json_output = {} if mod_params['view'] == 'topics': json_output['topics'] = list_func(pubsub_client.list_topics()) elif mod_params['view'] == 'subscriptions': if mod_params['topic']: t = pubsub_client.topic(mod_params['topic']) json_output['subscriptions'] = list_func(t.list_subscriptions()) else: json_output['subscriptions'] = list_func(pubsub_client.list_subscriptions()) json_output['changed'] = False json_output.update(mod_params) module.exit_json(**json_output) if __name__ == '__main__': main()
anryko/ansible
lib/ansible/modules/cloud/google/gcpubsub_info.py
Python
gpl-3.0
4,597
import datetime from django.contrib.admin.templatetags.admin_urls import add_preserved_filters from django.contrib.admin.utils import ( display_for_field, display_for_value, label_for_field, lookup_field, ) from django.contrib.admin.views.main import ( ALL_VAR, ORDER_VAR, PAGE_VAR, SEARCH_VAR, ) from django.core.exceptions import ObjectDoesNotExist from django.db import models from django.template import Library from django.template.loader import get_template from django.templatetags.static import static from django.urls import NoReverseMatch from django.utils import formats from django.utils.html import format_html from django.utils.safestring import mark_safe from django.utils.text import capfirst from django.utils.translation import gettext as _ from .base import InclusionAdminNode register = Library() DOT = '.' @register.simple_tag def paginator_number(cl, i): """ Generate an individual page index link in a paginated list. """ if i == DOT: return '… ' elif i == cl.page_num: return format_html('<span class="this-page">{}</span> ', i + 1) else: return format_html( '<a href="{}"{}>{}</a> ', cl.get_query_string({PAGE_VAR: i}), mark_safe(' class="end"' if i == cl.paginator.num_pages - 1 else ''), i + 1, ) def pagination(cl): """ Generate the series of links to the pages in a paginated list. """ paginator, page_num = cl.paginator, cl.page_num pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page if not pagination_required: page_range = [] else: ON_EACH_SIDE = 3 ON_ENDS = 2 # If there are 10 or fewer pages, display links to every page. # Otherwise, do some fancy if paginator.num_pages <= 10: page_range = range(paginator.num_pages) else: # Insert "smart" pagination links, so that there are always ON_ENDS # links at either end of the list of pages, and there are always # ON_EACH_SIDE links at either end of the "current page" link. page_range = [] if page_num > (ON_EACH_SIDE + ON_ENDS): page_range += [ *range(0, ON_ENDS), DOT, *range(page_num - ON_EACH_SIDE, page_num + 1), ] else: page_range.extend(range(0, page_num + 1)) if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1): page_range += [ *range(page_num + 1, page_num + ON_EACH_SIDE + 1), DOT, *range(paginator.num_pages - ON_ENDS, paginator.num_pages) ] else: page_range.extend(range(page_num + 1, paginator.num_pages)) need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page return { 'cl': cl, 'pagination_required': pagination_required, 'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}), 'page_range': page_range, 'ALL_VAR': ALL_VAR, '1': 1, } @register.tag(name='pagination') def pagination_tag(parser, token): return InclusionAdminNode( parser, token, func=pagination, template_name='pagination.html', takes_context=False, ) def result_headers(cl): """ Generate the list column headers. """ ordering_field_columns = cl.get_ordering_field_columns() for i, field_name in enumerate(cl.list_display): text, attr = label_for_field( field_name, cl.model, model_admin=cl.model_admin, return_attr=True ) is_field_sortable = cl.sortable_by is None or field_name in cl.sortable_by if attr: field_name = _coerce_field_name(field_name, i) # Potentially not sortable # if the field is the action checkbox: no sorting and special class if field_name == 'action_checkbox': yield { "text": text, "class_attrib": mark_safe(' class="action-checkbox-column"'), "sortable": False, } continue admin_order_field = getattr(attr, "admin_order_field", None) # Set ordering for attr that is a property, if defined. if isinstance(attr, property) and hasattr(attr, 'fget'): admin_order_field = getattr(attr.fget, 'admin_order_field', None) if not admin_order_field: is_field_sortable = False if not is_field_sortable: # Not sortable yield { 'text': text, 'class_attrib': format_html(' class="column-{}"', field_name), 'sortable': False, } continue # OK, it is sortable if we got this far th_classes = ['sortable', 'column-{}'.format(field_name)] order_type = '' new_order_type = 'asc' sort_priority = 0 # Is it currently being sorted on? is_sorted = i in ordering_field_columns if is_sorted: order_type = ordering_field_columns.get(i).lower() sort_priority = list(ordering_field_columns).index(i) + 1 th_classes.append('sorted %sending' % order_type) new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type] # build new ordering param o_list_primary = [] # URL for making this field the primary sort o_list_remove = [] # URL for removing this field from sort o_list_toggle = [] # URL for toggling order type for this field def make_qs_param(t, n): return ('-' if t == 'desc' else '') + str(n) for j, ot in ordering_field_columns.items(): if j == i: # Same column param = make_qs_param(new_order_type, j) # We want clicking on this header to bring the ordering to the # front o_list_primary.insert(0, param) o_list_toggle.append(param) # o_list_remove - omit else: param = make_qs_param(ot, j) o_list_primary.append(param) o_list_toggle.append(param) o_list_remove.append(param) if i not in ordering_field_columns: o_list_primary.insert(0, make_qs_param(new_order_type, i)) yield { "text": text, "sortable": True, "sorted": is_sorted, "ascending": order_type == "asc", "sort_priority": sort_priority, "url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}), "url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}), "url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}), "class_attrib": format_html(' class="{}"', ' '.join(th_classes)) if th_classes else '', } def _boolean_icon(field_val): icon_url = static('admin/img/icon-%s.svg' % {True: 'yes', False: 'no', None: 'unknown'}[field_val]) return format_html('<img src="{}" alt="{}">', icon_url, field_val) def _coerce_field_name(field_name, field_index): """ Coerce a field_name (which may be a callable) to a string. """ if callable(field_name): if field_name.__name__ == '<lambda>': return 'lambda' + str(field_index) else: return field_name.__name__ return field_name def items_for_result(cl, result, form): """ Generate the actual list of data. """ def link_in_col(is_first, field_name, cl): if cl.list_display_links is None: return False if is_first and not cl.list_display_links: return True return field_name in cl.list_display_links first = True pk = cl.lookup_opts.pk.attname for field_index, field_name in enumerate(cl.list_display): empty_value_display = cl.model_admin.get_empty_value_display() row_classes = ['field-%s' % _coerce_field_name(field_name, field_index)] try: f, attr, value = lookup_field(field_name, result, cl.model_admin) except ObjectDoesNotExist: result_repr = empty_value_display else: empty_value_display = getattr(attr, 'empty_value_display', empty_value_display) if f is None or f.auto_created: if field_name == 'action_checkbox': row_classes = ['action-checkbox'] boolean = getattr(attr, 'boolean', False) result_repr = display_for_value(value, empty_value_display, boolean) if isinstance(value, (datetime.date, datetime.time)): row_classes.append('nowrap') else: if isinstance(f.remote_field, models.ManyToOneRel): field_val = getattr(result, f.name) if field_val is None: result_repr = empty_value_display else: result_repr = field_val else: result_repr = display_for_field(value, f, empty_value_display) if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)): row_classes.append('nowrap') if str(result_repr) == '': result_repr = mark_safe('&nbsp;') row_class = mark_safe(' class="%s"' % ' '.join(row_classes)) # If list_display_links not defined, add the link tag to the first field if link_in_col(first, field_name, cl): table_tag = 'th' if first else 'td' first = False # Display link to the result's change_view if the url exists, else # display just the result's representation. try: url = cl.url_for_result(result) except NoReverseMatch: link_or_text = result_repr else: url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url) # Convert the pk to something that can be used in Javascript. # Problem cases are non-ASCII strings. if cl.to_field: attr = str(cl.to_field) else: attr = pk value = result.serializable_value(attr) link_or_text = format_html( '<a href="{}"{}>{}</a>', url, format_html( ' data-popup-opener="{}"', value ) if cl.is_popup else '', result_repr) yield format_html('<{}{}>{}</{}>', table_tag, row_class, link_or_text, table_tag) else: # By default the fields come from ModelAdmin.list_editable, but if we pull # the fields out of the form instead of list_editable custom admins # can provide fields on a per request basis if (form and field_name in form.fields and not ( field_name == cl.model._meta.pk.name and form[cl.model._meta.pk.name].is_hidden)): bf = form[field_name] result_repr = mark_safe(str(bf.errors) + str(bf)) yield format_html('<td{}>{}</td>', row_class, result_repr) if form and not form[cl.model._meta.pk.name].is_hidden: yield format_html('<td>{}</td>', form[cl.model._meta.pk.name]) class ResultList(list): """ Wrapper class used to return items in a list_editable changelist, annotated with the form object for error reporting purposes. Needed to maintain backwards compatibility with existing admin templates. """ def __init__(self, form, *items): self.form = form super().__init__(*items) def results(cl): if cl.formset: for res, form in zip(cl.result_list, cl.formset.forms): yield ResultList(form, items_for_result(cl, res, form)) else: for res in cl.result_list: yield ResultList(None, items_for_result(cl, res, None)) def result_hidden_fields(cl): if cl.formset: for res, form in zip(cl.result_list, cl.formset.forms): if form[cl.model._meta.pk.name].is_hidden: yield mark_safe(form[cl.model._meta.pk.name]) def result_list(cl): """ Display the headers and data list together. """ headers = list(result_headers(cl)) num_sorted_fields = 0 for h in headers: if h['sortable'] and h['sorted']: num_sorted_fields += 1 return { 'cl': cl, 'result_hidden_fields': list(result_hidden_fields(cl)), 'result_headers': headers, 'num_sorted_fields': num_sorted_fields, 'results': list(results(cl)), } @register.tag(name='result_list') def result_list_tag(parser, token): return InclusionAdminNode( parser, token, func=result_list, template_name='change_list_results.html', takes_context=False, ) def date_hierarchy(cl): """ Display the date hierarchy for date drill-down functionality. """ if cl.date_hierarchy: field_name = cl.date_hierarchy year_field = '%s__year' % field_name month_field = '%s__month' % field_name day_field = '%s__day' % field_name field_generic = '%s__' % field_name year_lookup = cl.params.get(year_field) month_lookup = cl.params.get(month_field) day_lookup = cl.params.get(day_field) def link(filters): return cl.get_query_string(filters, [field_generic]) if not (year_lookup or month_lookup or day_lookup): # select appropriate start level date_range = cl.queryset.aggregate(first=models.Min(field_name), last=models.Max(field_name)) if date_range['first'] and date_range['last']: if date_range['first'].year == date_range['last'].year: year_lookup = date_range['first'].year if date_range['first'].month == date_range['last'].month: month_lookup = date_range['first'].month if year_lookup and month_lookup and day_lookup: day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup)) return { 'show': True, 'back': { 'link': link({year_field: year_lookup, month_field: month_lookup}), 'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT')) }, 'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}] } elif year_lookup and month_lookup: days = getattr(cl.queryset, 'dates')(field_name, 'day') return { 'show': True, 'back': { 'link': link({year_field: year_lookup}), 'title': str(year_lookup) }, 'choices': [{ 'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}), 'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT')) } for day in days] } elif year_lookup: months = getattr(cl.queryset, 'dates')(field_name, 'month') return { 'show': True, 'back': { 'link': link({}), 'title': _('All dates') }, 'choices': [{ 'link': link({year_field: year_lookup, month_field: month.month}), 'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT')) } for month in months] } else: years = getattr(cl.queryset, 'dates')(field_name, 'year') return { 'show': True, 'back': None, 'choices': [{ 'link': link({year_field: str(year.year)}), 'title': str(year.year), } for year in years] } @register.tag(name='date_hierarchy') def date_hierarchy_tag(parser, token): return InclusionAdminNode( parser, token, func=date_hierarchy, template_name='date_hierarchy.html', takes_context=False, ) def search_form(cl): """ Display a search form for searching the list. """ return { 'cl': cl, 'show_result_count': cl.result_count != cl.full_result_count, 'search_var': SEARCH_VAR } @register.tag(name='search_form') def search_form_tag(parser, token): return InclusionAdminNode(parser, token, func=search_form, template_name='search_form.html', takes_context=False) @register.simple_tag def admin_list_filter(cl, spec): tpl = get_template(spec.template) return tpl.render({ 'title': spec.title, 'choices': list(spec.choices(cl)), 'spec': spec, }) def admin_actions(context): """ Track the number of times the action field has been rendered on the page, so we know which value to use. """ context['action_index'] = context.get('action_index', -1) + 1 return context @register.tag(name='admin_actions') def admin_actions_tag(parser, token): return InclusionAdminNode(parser, token, func=admin_actions, template_name='actions.html') @register.tag(name='change_list_object_tools') def change_list_object_tools_tag(parser, token): """Display the row of change list object tools.""" return InclusionAdminNode( parser, token, func=lambda context: context, template_name='change_list_object_tools.html', )
georgemarshall/django
django/contrib/admin/templatetags/admin_list.py
Python
bsd-3-clause
18,018
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('email_marketing', '0003_auto_20160715_1145'), ] operations = [ migrations.AddField( model_name='emailmarketingconfiguration', name='welcome_email_send_delay', field=models.IntegerField(default=600, help_text='Number of seconds to delay the sending of User Welcome email after user has been activated'), ), ]
ahmedaljazzar/edx-platform
lms/djangoapps/email_marketing/migrations/0004_emailmarketingconfiguration_welcome_email_send_delay.py
Python
agpl-3.0
552
#! /usr/bin/env python import requests import sys import urllib from requests.auth import HTTPBasicAuth if len(sys.argv) != 5: print "usage: verify-topo-links onos-node cluster-id first-index last-index" sys.exit(1) node = sys.argv[1] cluster = sys.argv[2] first = int(sys.argv[3]) last = int(sys.argv[4]) found = 0 topoRequest = requests.get('http://' + node + ':8181/onos/v1/topology/clusters/' + cluster + "/devices", auth=HTTPBasicAuth('onos', 'rocks')) if topoRequest.status_code != 200: print topoRequest.text sys.exit(1) topoJson = topoRequest.json() for deviceIndex in range(first, last+1): lookingFor = "of:" + format(deviceIndex, '016x') print lookingFor for arrayIndex in range(0, len(topoJson["devices"])): device = topoJson["devices"][arrayIndex] if device == lookingFor: found = found + 1 print "Match found for " + device break if found == last - first: sys.exit(0) print "Found " + str(found) + " matches, need " + str(last - first) sys.exit(2)
planoAccess/clonedONOS
tools/test/scenarios/bin/verify-topo-devices.py
Python
apache-2.0
1,143
from __future__ import division, absolute_import, print_function import sys from itertools import product import numpy as np from numpy.core import zeros, float64 from numpy.testing import dec, TestCase, assert_almost_equal, assert_, \ assert_raises, assert_array_equal, assert_allclose, assert_equal from numpy.core.multiarray import inner as inner_ DECPREC = 14 class TestInner(TestCase): def test_vecself(self): """Ticket 844.""" # Inner product of a vector with itself segfaults or give meaningless # result a = zeros(shape = (1, 80), dtype = float64) p = inner_(a, a) assert_almost_equal(p, 0, decimal = DECPREC) try: import numpy.core._dotblas as _dotblas except ImportError: _dotblas = None @dec.skipif(_dotblas is None, "Numpy is not compiled with _dotblas") def test_blasdot_used(): from numpy.core import dot, vdot, inner, alterdot, restoredot assert_(dot is _dotblas.dot) assert_(vdot is _dotblas.vdot) assert_(inner is _dotblas.inner) assert_(alterdot is _dotblas.alterdot) assert_(restoredot is _dotblas.restoredot) def test_dot_2args(): from numpy.core import dot a = np.array([[1, 2], [3, 4]], dtype=float) b = np.array([[1, 0], [1, 1]], dtype=float) c = np.array([[3, 2], [7, 4]], dtype=float) d = dot(a, b) assert_allclose(c, d) def test_dot_3args(): np.random.seed(22) f = np.random.random_sample((1024, 16)) v = np.random.random_sample((16, 32)) r = np.empty((1024, 32)) for i in range(12): np.dot(f, v, r) assert_equal(sys.getrefcount(r), 2) r2 = np.dot(f, v, out=None) assert_array_equal(r2, r) assert_(r is np.dot(f, v, out=r)) v = v[:, 0].copy() # v.shape == (16,) r = r[:, 0].copy() # r.shape == (1024,) r2 = np.dot(f, v) assert_(r is np.dot(f, v, r)) assert_array_equal(r2, r) def test_dot_3args_errors(): np.random.seed(22) f = np.random.random_sample((1024, 16)) v = np.random.random_sample((16, 32)) r = np.empty((1024, 31)) assert_raises(ValueError, np.dot, f, v, r) r = np.empty((1024,)) assert_raises(ValueError, np.dot, f, v, r) r = np.empty((32,)) assert_raises(ValueError, np.dot, f, v, r) r = np.empty((32, 1024)) assert_raises(ValueError, np.dot, f, v, r) assert_raises(ValueError, np.dot, f, v, r.T) r = np.empty((1024, 64)) assert_raises(ValueError, np.dot, f, v, r[:, ::2]) assert_raises(ValueError, np.dot, f, v, r[:, :32]) r = np.empty((1024, 32), dtype=np.float32) assert_raises(ValueError, np.dot, f, v, r) r = np.empty((1024, 32), dtype=int) assert_raises(ValueError, np.dot, f, v, r) def test_dot_array_order(): """ Test numpy dot with different order C, F Comparing results with multiarray dot. Double and single precisions array are compared using relative precision of 7 and 5 decimals respectively. Use 30 decimal when comparing exact operations like: (a.b)' = b'.a' """ _dot = np.core.multiarray.dot a_dim, b_dim, c_dim = 10, 4, 7 orders = ["C", "F"] dtypes_prec = {np.float64: 7, np.float32: 5} np.random.seed(7) for arr_type, prec in dtypes_prec.items(): for a_order in orders: a = np.asarray(np.random.randn(a_dim, a_dim), dtype=arr_type, order=a_order) assert_array_equal(np.dot(a, a), a.dot(a)) # (a.a)' = a'.a', note that mse~=1e-31 needs almost_equal assert_almost_equal(a.dot(a), a.T.dot(a.T).T, decimal=prec) # # Check with making explicit copy # a_T = a.T.copy(order=a_order) assert_almost_equal(a_T.dot(a_T), a.T.dot(a.T), decimal=prec) assert_almost_equal(a.dot(a_T), a.dot(a.T), decimal=prec) assert_almost_equal(a_T.dot(a), a.T.dot(a), decimal=prec) # # Compare with multiarray dot # assert_almost_equal(a.dot(a), _dot(a, a), decimal=prec) assert_almost_equal(a.T.dot(a), _dot(a.T, a), decimal=prec) assert_almost_equal(a.dot(a.T), _dot(a, a.T), decimal=prec) assert_almost_equal(a.T.dot(a.T), _dot(a.T, a.T), decimal=prec) for res in a.dot(a), a.T.dot(a), a.dot(a.T), a.T.dot(a.T): assert res.flags.c_contiguous for b_order in orders: b = np.asarray(np.random.randn(a_dim, b_dim), dtype=arr_type, order=b_order) b_T = b.T.copy(order=b_order) assert_almost_equal(a_T.dot(b), a.T.dot(b), decimal=prec) assert_almost_equal(b_T.dot(a), b.T.dot(a), decimal=prec) # (b'.a)' = a'.b assert_almost_equal(b.T.dot(a), a.T.dot(b).T, decimal=prec) assert_almost_equal(a.dot(b), _dot(a, b), decimal=prec) assert_almost_equal(b.T.dot(a), _dot(b.T, a), decimal=prec) for c_order in orders: c = np.asarray(np.random.randn(b_dim, c_dim), dtype=arr_type, order=c_order) c_T = c.T.copy(order=c_order) assert_almost_equal(c.T.dot(b.T), c_T.dot(b_T), decimal=prec) assert_almost_equal(c.T.dot(b.T).T, b.dot(c), decimal=prec) assert_almost_equal(b.dot(c), _dot(b, c), decimal=prec) assert_almost_equal(c.T.dot(b.T), _dot(c.T, b.T), decimal=prec) @dec.skipif(True) # ufunc override disabled for 1.9 def test_dot_override(): class A(object): def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): return "A" class B(object): def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): return NotImplemented a = A() b = B() c = np.array([[1]]) assert_equal(np.dot(a, b), "A") assert_equal(c.dot(a), "A") assert_raises(TypeError, np.dot, b, c) assert_raises(TypeError, c.dot, b) def test_npdot_segfault(): if sys.platform != 'darwin': return # Test for float32 np.dot segfault # https://github.com/numpy/numpy/issues/4007 def aligned_array(shape, align, dtype, order='C'): # Make array shape `shape` with aligned at `align` bytes d = dtype() # Make array of correct size with `align` extra bytes N = np.prod(shape) tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8) address = tmp.__array_interface__["data"][0] # Find offset into array giving desired alignment for offset in range(align): if (address + offset) % align == 0: break tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype) return tmp.reshape(shape, order=order) def as_aligned(arr, align, dtype, order='C'): # Copy `arr` into an aligned array with same shape aligned = aligned_array(arr.shape, align, dtype, order) aligned[:] = arr[:] return aligned def assert_dot_close(A, X, desired): assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7) m = aligned_array(100, 15, np.float32) s = aligned_array((100, 100), 15, np.float32) # This always segfaults when the sgemv alignment bug is present np.dot(s, m) # test the sanity of np.dot after applying patch for align, m, n, a_order in product( (15, 32), (10000,), (200, 89), ('C', 'F')): # Calculation in double precision A_d = np.random.rand(m, n) X_d = np.random.rand(n) desired = np.dot(A_d, X_d) # Calculation with aligned single precision A_f = as_aligned(A_d, align, np.float32, order=a_order) X_f = as_aligned(X_d, align, np.float32) assert_dot_close(A_f, X_f, desired) # Strided A rows A_d_2 = A_d[::2] desired = np.dot(A_d_2, X_d) A_f_2 = A_f[::2] assert_dot_close(A_f_2, X_f, desired) # Strided A columns, strided X vector A_d_22 = A_d_2[:, ::2] X_d_2 = X_d[::2] desired = np.dot(A_d_22, X_d_2) A_f_22 = A_f_2[:, ::2] X_f_2 = X_f[::2] assert_dot_close(A_f_22, X_f_2, desired) # Check the strides are as expected if a_order == 'F': assert_equal(A_f_22.strides, (8, 8 * m)) else: assert_equal(A_f_22.strides, (8 * n, 8)) assert_equal(X_f_2.strides, (8,)) # Strides in A rows + cols only X_f_2c = as_aligned(X_f_2, align, np.float32) assert_dot_close(A_f_22, X_f_2c, desired) # Strides just in A cols A_d_12 = A_d[:, ::2] desired = np.dot(A_d_12, X_d_2) A_f_12 = A_f[:, ::2] assert_dot_close(A_f_12, X_f_2c, desired) # Strides in A cols and X assert_dot_close(A_f_12, X_f_2, desired)
Reagankm/KnockKnock
venv/lib/python3.4/site-packages/numpy/core/tests/test_blasdot.py
Python
gpl-2.0
8,880
# Copyright 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import logging from google.appengine.ext import db as models import appengine_django.models as aed_models from oauth import oauth from django.conf import settings from django.db import models as django_models from common import profile from common import properties from common import util import settings PRIVACY_PRIVATE = 1 PRIVACY_CONTACTS = 2 PRIVACY_PUBLIC = 3 ACTOR_ALLOWED_EXTRA = ('contact_count', 'follower_count', 'icon', 'description', 'member_count', 'admin_count', 'given_name', 'family_name' 'homepage' ) ACTOR_LIMITED_EXTRA = ('icon', 'description', 'given_name', 'family_name' ) # Internal Utility Functions def _get_actor_type_from_nick(nick): if nick[0] == "#": return "channel" return "user" def _get_actor_urlnick_from_nick(nick): parts = nick.split('@') nick = parts[0] if nick[0] == "#": nick = nick[1:] return nick def _to_api(v): if hasattr(v, 'to_api'): v = v.to_api() elif isinstance(v, type([])): v = [_to_api(x) for x in v] elif isinstance(v, type({})): v = dict([(key, _to_api(value)) for (key, value) in v.iteritems()]) elif isinstance(v, datetime.datetime): v = str(v) return v # Base Models, Internal Only class ApiMixinModel(aed_models.BaseModel): def to_api(self): o = {} for prop in self.properties().keys(): value = getattr(self, prop) o[prop] = _to_api(value) return o class CachingModel(ApiMixinModel): """A simple caching layer for model objects: caches any item read with get_by_key_name and removes from the cache on put() and delete() You must call reset_cache() in the beginning of any HTTP request or test. The design idea is that this should give a consistent view of the data within the processing a single request. """ # TODO(mikie): appengine has non-Model put() and delete() that act on a bunch # of items at once. To be correct this should hook those as well. # TODO(mikie): should hook to the django sync_db signal so that the cache is # reset when database is (to support fixtures in tests correctly). # TODO(mikie): should cache items read through methods other than # get_by_key_name() _cache = { } _cache_enabled = False _get_count = 0 def __init__(self, parent=None, key_name=None, _app=None, **kw): if not key_name and 'key' not in kw: key_name = self.key_from(**kw) super(CachingModel, self).__init__( parent, key_name=key_name, _app=_app, **kw) if not key_name: key_name = self.key_from(**kw) self._cache_keyname__ = (key_name, parent) @classmethod def key_from(cls, **kw): if hasattr(cls, 'key_template'): try: return cls.key_template % kw except KeyError: logging.warn('Automatic key_name generation failed: %s <- %s', cls.key_template, kw) return None def _remove_from_cache(self): clsname = self.__class__.__name__ if CachingModel._cache_enabled: if CachingModel._cache.has_key(clsname): if CachingModel._cache[clsname].has_key(self._cache_keyname__): CachingModel._cache[clsname].pop(self._cache_keyname__) @profile.log_write def put(self): self._remove_from_cache() ret = super(CachingModel, self).put() self._cache_keyname__ = (self.key().name(), self.parent_key()) self._remove_from_cache() return ret def save(self): return self.put() @profile.log_write def delete(self): self._remove_from_cache() return super(CachingModel, self).delete() @classmethod @profile.log_call('threadlocal_cached_read') def get_by_key_name(cls, key_names, parent=None): if not key_names: return # Only caches when called with a single key if CachingModel._cache_enabled and ( isinstance(key_names, str) or isinstance(key_names, unicode)): clsname = cls.__name__ if not CachingModel._cache.has_key(clsname): CachingModel._cache[clsname] = { } elif CachingModel._cache[clsname].has_key((key_names, parent)): profile.store_call(cls, 'get_by_key_name', 'threadlocal_cache_hit') return CachingModel._cache[clsname][(key_names, parent)] profile.store_call(cls, 'get_by_key_name', 'threadlocal_cache_miss') ret = super(CachingModel, cls).get_by_key_name(key_names, parent) CachingModel._get_count += 1 CachingModel._cache[clsname][(key_names, parent)] = ret if ret: ret._cache_keyname__ = (key_names, parent) return ret else: CachingModel._get_count += len(key_names) return super(CachingModel, cls).get_by_key_name(key_names, parent) @classmethod def db_get_count(cls): return CachingModel._get_count @classmethod def reset_cache(cls): CachingModel._cache = { } @classmethod def enable_cache(cls, enabled = True): CachingModel._cache_enabled = enabled if not enabled: CachingModel._cache = { } @classmethod def reset_get_count(cls): CachingModel._get_count = 0 @classmethod @profile.log_read def gql(cls, *args, **kw): return super(CachingModel, cls).gql(*args, **kw) @classmethod @profile.log_read def Query(cls): # TODO(termie): I don't like that this module is called "models" here, # I'd prefer to be accessing it by "db" return models.Query(cls) class DeletedMarkerModel(CachingModel): deleted_at = properties.DateTimeProperty() def mark_as_deleted(self): self.deleted_at = datetime.datetime.utcnow() self.put() def is_deleted(self): return self.deleted_at # Public Models class AbuseReport(CachingModel): entry = models.StringProperty() # ref - entry actor = models.StringProperty() # ref - actor for entry reports = models.StringListProperty() # the actors who have reported this count = models.IntegerProperty() # the count of the number of reports so far key_template = '%(entry)s' class Activation(CachingModel): actor = models.StringProperty() content = models.StringProperty() code = models.StringProperty() type = models.StringProperty() key_template = 'activation/%(actor)s/%(type)s/%(content)s' def actor_url(nick, actor_type, path='', request=None, mobile=False): """ returns a url, with optional path appended NOTE: if appending a path, it should start with '/' """ prefix = "" mobile = mobile or (request and request.mobile) if mobile: prefix = "m." if (settings.WILDCARD_USER_SUBDOMAINS_ENABLED and actor_type == 'user' and not mobile): return 'http://%s.%s%s' % (nick, settings.HOSTED_DOMAIN, path) elif mobile and settings.SUBDOMAINS_ENABLED: return 'http://%s%s/%s/%s%s' % (prefix, settings.HOSTED_DOMAIN, actor_type, nick, path) else: return 'http://%s/%s/%s%s' % (settings.DOMAIN, actor_type, nick, path) class Actor(DeletedMarkerModel): """ extra: channel_count - int; number of channels contact_count - int; number of contacts follower_count - int; number of followers icon - string; avatar path bg_image - string; image for background (takes precedence over bg_color) bg_color - string; color for background bg_repeat - whether to repeat bg_image description [channel] - string; Channel description external_url [channel] - string; External url related to channel member_count [channel] - int; number of members admin_count [channel] - int; number of admins email_notify [user] - boolean; does the user want email notifications? given_name [user] - string; First name family_name [user] - string; Last Name comments_hide [user] - boolean; Whether comments should be hidden on overview """ nick = models.StringProperty() # the appengine datastore is case-sensitive whereas human brains are not, # Paul is not different from paul to regular people so we need a way to # prevent duplicate names from cropping up, this adds an additional indexed # property to support that normalized_nick = models.StringProperty() password = models.StringProperty() privacy = models.IntegerProperty() type = models.StringProperty() extra = properties.DictProperty() # avatar_updated_at is used by DJabberd to get a list of changed avatar. We # set the default to a date before the launch so that initial avatars have an # updated_at that is less than any real changes. avatar_updated_at = properties.DateTimeProperty( default=datetime.datetime(2009, 01, 01)) key_template = 'actor/%(nick)s' def url(self, path="", request=None, mobile=False): """ returns a url, with optional path appended NOTE: if appending a path, it should start with '/' """ return actor_url(_get_actor_urlnick_from_nick(self.nick), self.type, path=path, request=request, mobile=mobile) def shortnick(self): return _get_actor_urlnick_from_nick(self.nick) def display_nick(self): return self.nick.split("@")[0] return _get_actor_urlnick_from_nick(self.nick) def to_api(self): rv = super(Actor, self).to_api() del rv['password'] del rv['normalized_nick'] extra = {} for k, v in rv['extra'].iteritems(): if k in ACTOR_ALLOWED_EXTRA: extra[k] = v rv['extra'] = extra return rv def to_api_limited(self): rv = self.to_api() extra = {} for k, v in rv['extra'].iteritems(): if k in ACTOR_LIMITED_EXTRA: extra[k] = v rv['extra'] = extra return rv def is_channel(self): return self.type == 'channel' def is_public(self): return self.privacy == PRIVACY_PUBLIC def is_restricted(self): return self.privacy == PRIVACY_CONTACTS def __repr__(self): # Get all properties, but not directly as property objects, because # constructor requires values to be passed in. d = dict([(k, self.__getattribute__(k)) for k in self.properties().keys()]) return "%s(**%s)" % (self.__class__.__name__, repr(d)) class Image(CachingModel): actor = models.StringProperty() # whose image is this? content = models.BlobProperty() # the image itself size = models.StringProperty() # see api.avatar_upload # TODO(termie): key_template plans don't really work very well here # because we haven't been storing the path :/ class InboxEntry(CachingModel): """This is the inbox index for an entry. the index allows us to quickly pull the overview for a user. There may be items in the results that are later filtered out - deleted items or items whose privacy has changed. """ inbox = models.StringListProperty() # ref - who this is the inbox for stream = models.StringProperty() # ref - the stream this belongs to stream_type = models.StringProperty() # separate because we may filter on it entry = models.StringProperty() # ref - the entry if this is a comment created_at = properties.DateTimeProperty() uuid = models.StringProperty() shard = models.StringProperty() # an identifier for this portion of # inboxes key_template = 'inboxentry/%(stream)s/%(uuid)s/%(shard)s' def stream_entry_keyname(self): """Returns the key name of the corresponding StreamEntry""" return "%s/%s" % (self.stream, self.uuid) class Invite(CachingModel): code = models.StringProperty() # the code for the invite email = models.StringProperty() # the email this invite went to to_actor = models.StringProperty() # ref - the actor this invite was sent to from_actor = models.StringProperty() # ref - who sent this invite for_actor = models.StringProperty() # ref - invited to what, probs a channel status = models.StringProperty(default="active") # enum - active, blocked key_template = 'invite/%(code)s' class KeyValue(CachingModel): actor = models.StringProperty() keyname = models.StringProperty() value = models.TextProperty() key_template = 'keyvalue/%(actor)s/%(keyname)s' class OAuthAccessToken(CachingModel): key_ = models.StringProperty() # the token key secret = models.StringProperty() # the token secret consumer = models.StringProperty() # the consumer this key is assigned to actor = models.StringProperty() # the actor this key authenticates for created_at = properties.DateTimeProperty(auto_now_add=True) # when this was created perms = models.StringProperty() # read / write / delete key_template = 'oauth/accesstoken/%(key_)s' def to_string(self): token = oauth.OAuthToken(self.key_, self.secret) return token.to_string() class OAuthConsumer(CachingModel): key_ = models.StringProperty() # the consumer key secret = models.StringProperty() # the consumer secret actor = models.StringProperty() # the actor who owns this status = models.StringProperty() # active / pending / inactive type = models.StringProperty() # web / desktop / mobile commercial = models.IntegerProperty() # is this a commercial key? app_name = models.StringProperty() # the name of the app this is for, # to be displayed to the user created_at = properties.DateTimeProperty(auto_now_add=True) key_template = 'oauth/consumer/%(key_)s' def url(self): return '/api/keys/%s' % self.key_ class OAuthNonce(CachingModel): nonce = models.StringProperty() # the nonce consumer = models.StringProperty() # the consumer this nonce is for token = models.StringProperty() # the token this nonce is for created_at = properties.DateTimeProperty(auto_now_add=True) # when this was created class OAuthRequestToken(CachingModel): key_ = models.StringProperty() # the token key secret = models.StringProperty() # the token secret consumer = models.StringProperty() # the consumer this key is assigned to actor = models.StringProperty() # the actor this key authenticates for authorized = models.IntegerProperty() # has the actor authorized this token? created_at = properties.DateTimeProperty(auto_now_add=True) # when this was created perms = models.StringProperty() # read / write / delete key_template = 'oauth/requesttoken/%(key_)s' def to_string(self): token = oauth.OAuthToken(self.key_, self.secret) return token.to_string() class Presence(CachingModel): """This represents all the presence data for an actor at a moment in time. extra: status - string; message (like an "away message") location - string; TODO(tyler): Consider gps / cell / structured data availability - string; TODO(tyler): Define structure """ actor = models.StringProperty() # The actor whose presence this is updated_at = properties.DateTimeProperty(auto_now_add=True) # The moment we got the update uuid = models.StringProperty() extra = properties.DictProperty() # All the rich presence # TODO(termie): can't do key_template here yet because we include # current and history keys :/ class Task(CachingModel): actor = models.StringProperty() # ref - the owner of this queue item action = models.StringProperty() # api call we are iterating through action_id = models.StringProperty() # unique identifier for this queue item args = models.StringListProperty() # *args kw = properties.DictProperty() # *kw expire = properties.DateTimeProperty() # when our lock will expire progress = models.StringProperty() # a string representing the offset to # which we've progressed so far created_at = properties.DateTimeProperty(auto_now_add=True) key_template = 'task/%(actor)s/%(action)s/%(action_id)s' class Relation(CachingModel): owner = models.StringProperty() # ref - actor nick relation = models.StringProperty() # what type of relationship this is target = models.StringProperty() # ref - actor nick key_template = 'relation/%(relation)s/%(owner)s/%(target)s' class Stream(DeletedMarkerModel): """ extra: see api.stream_create() """ owner = models.StringProperty() # ref title = models.StringProperty() type = models.StringProperty() slug = models.StringProperty() read = models.IntegerProperty() # TODO: document this write = models.IntegerProperty() extra = properties.DictProperty() key_template = 'stream/%(owner)s/%(slug)s' def is_public(self): return self.read == PRIVACY_PUBLIC def is_restricted(self): return self.read == PRIVACY_CONTACTS def keyname(self): """Returns the key name""" return self.key().name() class StreamEntry(DeletedMarkerModel): """ extra : title - location - icon - content - entry_stream - entry_stream_type - entry_title - entry_uuid - comment_count - """ stream = models.StringProperty() # ref - the stream this belongs to owner = models.StringProperty() # ref - the actor who owns the stream actor = models.StringProperty() # ref - the actor who wrote this entry = models.StringProperty() # ref - the parent of this, # should it be a comment uuid = models.StringProperty() created_at = properties.DateTimeProperty(auto_now_add=True) extra = properties.DictProperty() key_template = '%(stream)s/%(uuid)s' def url(self, with_anchor=True, request=None, mobile=False): if self.entry: # TODO bad? slug = self.entry.split("/")[-1] anchor = "#c-%s" % self.uuid else: # TODO(termie): add slug property slug = self.uuid anchor = "" path = "/%s/%s" % ('presence', slug) if with_anchor: path = "%s%s" % (path, anchor) return actor_url(_get_actor_urlnick_from_nick(self.owner), _get_actor_type_from_nick(self.owner), path=path, request=request, mobile=mobile) def keyname(self): """Returns the key name""" return self.key().name() def title(self): """ build a title for this entry, for a presence entry it will just be the title, but for a comment it will look like: Comment from [commenter nick] on [entry title] by [nick] Comment from [commenter nick] on [entry title] by [nick] to #[channel name] """ if not self.is_comment(): return self.extra.get('title') template = "Comment from %(actor)s on %(entry_title)s by %(entry_actor)s" actor = _get_actor_urlnick_from_nick(self.actor) entry_title = self.extra.get('entry_title') entry_actor = _get_actor_urlnick_from_nick(self.extra.get('entry_actor')) entry_owner_nick = util.get_user_from_topic(self.entry) entry_type = _get_actor_type_from_nick(entry_owner_nick) v = {'actor': actor, 'entry_title': entry_title, 'entry_actor': entry_actor, } if entry_type == 'channel': template += ' to #%(channel)s' channel = _get_actor_urlnick_from_nick(entry_owner_nick) v['channel'] = channel return template % v def is_comment(self): return (self.entry != None) def is_channel(self): return self.owner.startswith('#') def entry_actor(self): if self.entry: return util.get_user_from_topic(self.entry) return None class Subscription(CachingModel): """this represents a topic, usually a stream, that a subscriber (usually an inbox) would like to receive updates to """ topic = models.StringProperty() # ref - the stream being subscribed to subscriber = models.StringProperty() # ref - the subscriber (actor) target = models.StringProperty() # where to dump this state = models.StringProperty() # The status of remote subs, see XEP-0060 # sect 4.2. The 'pending' state is ignored if # the target of the subscription is used. # The design is for performance: on public # entries # the state is ignored and listing the # subscriptions is a single query; for # contacts-only entries the state is used but # it is also kept up-to-date regarding buddy # relationships, so a single query for # state='subscribed' can again be used. extra = properties.DictProperty() # holds a bunch of stuff created_at = properties.DateTimeProperty(auto_now_add=True) # for ordering someday key_template = '%(topic)s/%(target)s' def is_subscribed(self): # LEGACY COMPAT: the 'or' here is for legacy compat return (self.state == 'subscribed' or self.state == None) #class ActorMobile(models.Model): # nick = models.TextField() # mobile = models.TextField() # country_code = models.TextField() # confirmed = models.BooleanField() #class ActorEmail(models.Model): # nick = models.TextField() # email = models.EmailField()
chheplo/jaikuengine
common/models.py
Python
apache-2.0
22,503
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt def execute(): import webnotes entries = webnotes.conn.sql("""select voucher_type, voucher_no from `tabGL Entry` group by voucher_type, voucher_no""", as_dict=1) for entry in entries: try: cancelled_voucher = webnotes.conn.sql("""select name from `tab%s` where name = %s and docstatus=2""" % (entry['voucher_type'], "%s"), entry['voucher_no']) if cancelled_voucher: webnotes.conn.sql("""delete from `tabGL Entry` where voucher_type = %s and voucher_no = %s""", (entry['voucher_type'], entry['voucher_no'])) except: pass
saurabh6790/test-med-app
patches/october_2013/p05_delete_gl_entries_for_cancelled_vouchers.py
Python
agpl-3.0
683
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import webnotes def execute(): try: webnotes.conn.sql("""delete from `tabSearch Criteria` where ifnull(standard, 'No') = 'Yes'""") except Exception, e: pass
saurabh6790/test-med-app
patches/june_2013/p05_remove_search_criteria_reports.py
Python
agpl-3.0
334
"""Mycroft AI notification platform.""" import logging from mycroftapi import MycroftAPI from homeassistant.components.notify import BaseNotificationService _LOGGER = logging.getLogger(__name__) def get_service(hass, config, discovery_info=None): """Get the Mycroft notification service.""" return MycroftNotificationService(hass.data["mycroft"]) class MycroftNotificationService(BaseNotificationService): """The Mycroft Notification Service.""" def __init__(self, mycroft_ip): """Initialize the service.""" self.mycroft_ip = mycroft_ip def send_message(self, message="", **kwargs): """Send a message mycroft to speak on instance.""" text = message mycroft = MycroftAPI(self.mycroft_ip) if mycroft is not None: mycroft.speak_text(text) else: _LOGGER.log("Could not reach this instance of mycroft")
nkgilley/home-assistant
homeassistant/components/mycroft/notify.py
Python
apache-2.0
908
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from compiled_file_system import CompiledFileSystem from file_system import FileNotFoundError class ChainedCompiledFileSystem(object): ''' A CompiledFileSystem implementation that fetches data from a chain of CompiledFileSystems that have different file systems and separate cache namespaces. The rules for the compiled file system chain are: - Versions are fetched from the first compiled file system's underlying file system. - Each compiled file system is read in the reverse order (the last one is read first). If the version matches, return the data. Otherwise, read from the previous compiled file system until the first one is read. It is used to chain compiled file systems whose underlying file systems are slightly different. This makes it possible to reuse cached compiled data in one of them without recompiling everything that is shared by them. ''' class Factory(CompiledFileSystem.Factory): def __init__(self, factory_and_fs_chain): self._factory_and_fs_chain = factory_and_fs_chain def Create(self, populate_function, cls, category=None): return ChainedCompiledFileSystem( [(factory.Create(populate_function, cls, category), fs) for factory, fs in self._factory_and_fs_chain]) def __init__(self, compiled_fs_chain): assert len(compiled_fs_chain) > 0 self._compiled_fs_chain = compiled_fs_chain def GetFromFile(self, path, binary=False): # It's possible that a new file is added in the first compiled file system # and it doesn't exist in other compiled file systems. try: first_compiled_fs, first_file_system = self._compiled_fs_chain[0] # The first file system contains both files of a newer version and files # shared with other compiled file systems. We are going to try each # compiled file system in the reverse order and return the data when # version matches. Data cached in other compiled file system will be # reused whenever possible so that we don't need to recompile things that # are not changed across these file systems. version = first_file_system.Stat(path).version for compiled_fs, _ in reversed(self._compiled_fs_chain): if compiled_fs.StatFile(path) == version: return compiled_fs.GetFromFile(path, binary) except FileNotFoundError: pass # Try first operation again to generate the correct stack trace return first_compiled_fs.GetFromFile(path, binary) def GetFromFileListing(self, path): if not path.endswith('/'): path += '/' try: first_compiled_fs, first_file_system = self._compiled_fs_chain[0] version = first_file_system.Stat(path).version for compiled_fs, _ in reversed(self._compiled_fs_chain): if compiled_fs.StatFileListing(path) == version: return compiled_fs.GetFromFileListing(path) except FileNotFoundError: pass # Try first operation again to generate the correct stack trace return first_compiled_fs.GetFromFileListing(path)
windyuuy/opera
chromium/src/chrome/common/extensions/docs/server2/chained_compiled_file_system.py
Python
bsd-3-clause
3,233
# TODO: use a unit-testing library for asserts # invoke with: # ./pox.py --script=tests.topology.topology topology # # Maybe there is a less awkward way to invoke tests... from pox.core import core from pox.lib.revent import * topology = core.components['topology'] def autobinds_correctly(): topology.listenTo(core) return True if not autobinds_correctly(): raise AssertionError("Did no autobind correctly")
sstjohn/pox
tests/topology/topology.py
Python
gpl-3.0
423
#!/usr/bin/env python # ---------------------------------------------------------------------------- # pyglet # Copyright (c) 2006-2008 Alex Holkner # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of pyglet nor the names of its # contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------------- '''Demonstrates how to handle a platform-specific event not defined in pyglet by subclassing Window. This is not for the faint-hearted! A message will be printed to stdout when the following events are caught: - On Mac OS X, the window drag region is clicked. - On Windows, the display resolution is changed. - On Linux, the window properties are changed. ''' import pyglet # Check for Carbon (OS X) try: from pyglet.window.carbon import * _have_carbon = True except ImportError: _have_carbon = False # Check for Win32 try: from pyglet.window.win32 import * from pyglet.window.win32.constants import * _have_win32 = True except ImportError: _have_win32 = False # Check for Xlib (Linux) try: from pyglet.window.xlib import * _have_xlib = True except ImportError: _have_xlib = False # Subclass Window class MyWindow(pyglet.window.Window): if _have_carbon: @CarbonEventHandler(kEventClassWindow, kEventWindowClickDragRgn) def _on_window_click_drag_rgn(self, next_handler, event, data): print 'Clicked drag rgn.' carbon.CallNextEventHandler(next_handler, event) return noErr if _have_win32: @Win32EventHandler(WM_DISPLAYCHANGE) def _on_window_display_change(self, msg, lParam, wParam): print 'Display resolution changed.' return 0 if _have_xlib: @XlibEventHandler(xlib.PropertyNotify) def _on_window_property_notify(self, event): print 'Property notify.' if __name__ == '__main__': window = MyWindow() pyglet.app.run()
sangh/LaserShow
pyglet-hg/examples/window_platform_event.py
Python
bsd-3-clause
3,351
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: edgeos_facts version_added: "2.5" author: - Nathaniel Case (@Qalthos) - Sam Doran (@samdoran) short_description: Collect facts from remote devices running EdgeOS description: - Collects a base set of device facts from a remote device that is running EdgeOS. This module prepends all of the base network fact keys with U(ansible_net_<fact>). The facts module will always collect a base set of facts from the device and can enable or disable collection of additional facts. notes: - Tested against EdgeOS 1.9.7 options: gather_subset: description: - When supplied, this argument will restrict the facts collected to a given subset. Possible values for this argument include all, default, config, and neighbors. Can specify a list of values to include a larger subset. Values can also be used with an initial C(M(!)) to specify that a specific subset should not be collected. required: false default: "!config" """ EXAMPLES = """ - name: collect all facts from the device edgeos_facts: gather_subset: all - name: collect only the config and default facts edgeos_facts: gather_subset: config - name: collect everything exception the config edgeos_facts: gather_subset: "!config" """ RETURN = """ ansible_net_config: description: The running-config from the device returned: when config is configured type: str ansible_net_commits: description: The set of available configuration revisions returned: when present type: list ansible_net_hostname: description: The configured system hostname returned: always type: str ansible_net_model: description: The device model string returned: always type: str ansible_net_serialnum: description: The serial number of the device returned: always type: str ansible_net_version: description: The version of the software running returned: always type: str ansible_net_neighbors: description: The set of LLDP neighbors returned: when interface is configured type: list ansible_net_gather_subset: description: The list of subsets gathered by the module returned: always type: list """ import re from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six import iteritems from ansible.module_utils.network.edgeos.edgeos import run_commands class FactsBase(object): COMMANDS = frozenset() def __init__(self, module): self.module = module self.facts = dict() self.responses = None def populate(self): self.responses = run_commands(self.module, list(self.COMMANDS)) class Default(FactsBase): COMMANDS = [ 'show version', 'show host name', ] def populate(self): super(Default, self).populate() data = self.responses[0] self.facts['version'] = self.parse_version(data) self.facts['serialnum'] = self.parse_serialnum(data) self.facts['model'] = self.parse_model(data) self.facts['hostname'] = self.responses[1] def parse_version(self, data): match = re.search(r'Version:\s*v(\S+)', data) if match: return match.group(1) def parse_model(self, data): match = re.search(r'HW model:\s*([A-Za-z0-9- ]+)', data) if match: return match.group(1) def parse_serialnum(self, data): match = re.search(r'HW S/N:\s+(\S+)', data) if match: return match.group(1) class Config(FactsBase): COMMANDS = [ 'show configuration commands', 'show system commit', ] def populate(self): super(Config, self).populate() self.facts['config'] = self.responses commits = self.responses[1] entries = list() entry = None for line in commits.split('\n'): match = re.match(r'(\d+)\s+(.+)by(.+)via(.+)', line) if match: if entry: entries.append(entry) entry = dict(revision=match.group(1), datetime=match.group(2), by=str(match.group(3)).strip(), via=str(match.group(4)).strip(), comment=None) elif entry: entry['comment'] = line.strip() self.facts['commits'] = entries class Neighbors(FactsBase): COMMANDS = [ 'show lldp neighbors', 'show lldp neighbors detail', ] def populate(self): super(Neighbors, self).populate() all_neighbors = self.responses[0] if 'LLDP not configured' not in all_neighbors: neighbors = self.parse( self.responses[1] ) self.facts['neighbors'] = self.parse_neighbors(neighbors) def parse(self, data): parsed = list() values = None for line in data.split('\n'): if not line: continue elif line[0] == ' ': values += '\n%s' % line elif line.startswith('Interface'): if values: parsed.append(values) values = line if values: parsed.append(values) return parsed def parse_neighbors(self, data): facts = dict() for item in data: interface = self.parse_interface(item) host = self.parse_host(item) port = self.parse_port(item) if interface not in facts: facts[interface] = list() facts[interface].append(dict(host=host, port=port)) return facts def parse_interface(self, data): match = re.search(r'^Interface:\s+(\S+),', data) return match.group(1) def parse_host(self, data): match = re.search(r'SysName:\s+(.+)$', data, re.M) if match: return match.group(1) def parse_port(self, data): match = re.search(r'PortDescr:\s+(.+)$', data, re.M) if match: return match.group(1) FACT_SUBSETS = dict( default=Default, neighbors=Neighbors, config=Config ) VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) def main(): spec = dict( gather_subset=dict(default=['!config'], type='list') ) module = AnsibleModule(argument_spec=spec, supports_check_mode=True) warnings = list() gather_subset = module.params['gather_subset'] runable_subsets = set() exclude_subsets = set() for subset in gather_subset: if subset == 'all': runable_subsets.update(VALID_SUBSETS) continue if subset.startswith('!'): subset = subset[1:] if subset == 'all': exclude_subsets.update(VALID_SUBSETS) continue exclude = True else: exclude = False if subset not in VALID_SUBSETS: module.fail_json(msg='Subset must be one of [%s], got %s' % (', '.join(VALID_SUBSETS), subset)) if exclude: exclude_subsets.add(subset) else: runable_subsets.add(subset) if not runable_subsets: runable_subsets.update(VALID_SUBSETS) runable_subsets.difference_update(exclude_subsets) runable_subsets.add('default') facts = dict() facts['gather_subset'] = list(runable_subsets) instances = list() for key in runable_subsets: instances.append(FACT_SUBSETS[key](module)) for inst in instances: inst.populate() facts.update(inst.facts) ansible_facts = dict() for key, value in iteritems(facts): key = 'ansible_net_%s' % key ansible_facts[key] = value module.exit_json(ansible_facts=ansible_facts, warnings=warnings) if __name__ == '__main__': main()
kvar/ansible
lib/ansible/modules/network/edgeos/edgeos_facts.py
Python
gpl-3.0
8,337
from __future__ import absolute_import from django import template from django.utils.unittest import TestCase from .templatetags import custom class CustomFilterTests(TestCase): def test_filter(self): t = template.Template("{% load custom %}{{ string|trim:5 }}") self.assertEqual( t.render(template.Context({"string": "abcdefghijklmnopqrstuvwxyz"})), u"abcde" ) class CustomTagTests(TestCase): def verify_tag(self, tag, name): self.assertEqual(tag.__name__, name) self.assertEqual(tag.__doc__, 'Expected %s __doc__' % name) self.assertEqual(tag.__dict__['anything'], 'Expected %s __dict__' % name) def test_simple_tags(self): c = template.Context({'value': 42}) t = template.Template('{% load custom %}{% no_params %}') self.assertEqual(t.render(c), u'no_params - Expected result') t = template.Template('{% load custom %}{% one_param 37 %}') self.assertEqual(t.render(c), u'one_param - Expected result: 37') t = template.Template('{% load custom %}{% explicit_no_context 37 %}') self.assertEqual(t.render(c), u'explicit_no_context - Expected result: 37') t = template.Template('{% load custom %}{% no_params_with_context %}') self.assertEqual(t.render(c), u'no_params_with_context - Expected result (context value: 42)') t = template.Template('{% load custom %}{% params_and_context 37 %}') self.assertEqual(t.render(c), u'params_and_context - Expected result (context value: 42): 37') t = template.Template('{% load custom %}{% simple_two_params 37 42 %}') self.assertEqual(t.render(c), u'simple_two_params - Expected result: 37, 42') t = template.Template('{% load custom %}{% simple_one_default 37 %}') self.assertEqual(t.render(c), u'simple_one_default - Expected result: 37, hi') t = template.Template('{% load custom %}{% simple_one_default 37 two="hello" %}') self.assertEqual(t.render(c), u'simple_one_default - Expected result: 37, hello') t = template.Template('{% load custom %}{% simple_one_default one=99 two="hello" %}') self.assertEqual(t.render(c), u'simple_one_default - Expected result: 99, hello') self.assertRaisesRegexp(template.TemplateSyntaxError, "'simple_one_default' received unexpected keyword argument 'three'", template.Template, '{% load custom %}{% simple_one_default 99 two="hello" three="foo" %}') t = template.Template('{% load custom %}{% simple_one_default 37 42 %}') self.assertEqual(t.render(c), u'simple_one_default - Expected result: 37, 42') t = template.Template('{% load custom %}{% simple_unlimited_args 37 %}') self.assertEqual(t.render(c), u'simple_unlimited_args - Expected result: 37, hi') t = template.Template('{% load custom %}{% simple_unlimited_args 37 42 56 89 %}') self.assertEqual(t.render(c), u'simple_unlimited_args - Expected result: 37, 42, 56, 89') t = template.Template('{% load custom %}{% simple_only_unlimited_args %}') self.assertEqual(t.render(c), u'simple_only_unlimited_args - Expected result: ') t = template.Template('{% load custom %}{% simple_only_unlimited_args 37 42 56 89 %}') self.assertEqual(t.render(c), u'simple_only_unlimited_args - Expected result: 37, 42, 56, 89') self.assertRaisesRegexp(template.TemplateSyntaxError, "'simple_two_params' received too many positional arguments", template.Template, '{% load custom %}{% simple_two_params 37 42 56 %}') self.assertRaisesRegexp(template.TemplateSyntaxError, "'simple_one_default' received too many positional arguments", template.Template, '{% load custom %}{% simple_one_default 37 42 56 %}') t = template.Template('{% load custom %}{% simple_unlimited_args_kwargs 37 40|add:2 56 eggs="scrambled" four=1|add:3 %}') self.assertEqual(t.render(c), u'simple_unlimited_args_kwargs - Expected result: 37, 42, 56 / eggs=scrambled, four=4') self.assertRaisesRegexp(template.TemplateSyntaxError, "'simple_unlimited_args_kwargs' received some positional argument\(s\) after some keyword argument\(s\)", template.Template, '{% load custom %}{% simple_unlimited_args_kwargs 37 40|add:2 eggs="scrambled" 56 four=1|add:3 %}') self.assertRaisesRegexp(template.TemplateSyntaxError, "'simple_unlimited_args_kwargs' received multiple values for keyword argument 'eggs'", template.Template, '{% load custom %}{% simple_unlimited_args_kwargs 37 eggs="scrambled" eggs="scrambled" %}') def test_simple_tag_registration(self): # Test that the decorators preserve the decorated function's docstring, name and attributes. self.verify_tag(custom.no_params, 'no_params') self.verify_tag(custom.one_param, 'one_param') self.verify_tag(custom.explicit_no_context, 'explicit_no_context') self.verify_tag(custom.no_params_with_context, 'no_params_with_context') self.verify_tag(custom.params_and_context, 'params_and_context') self.verify_tag(custom.simple_unlimited_args_kwargs, 'simple_unlimited_args_kwargs') self.verify_tag(custom.simple_tag_without_context_parameter, 'simple_tag_without_context_parameter') def test_simple_tag_missing_context(self): # The 'context' parameter must be present when takes_context is True self.assertRaisesRegexp(template.TemplateSyntaxError, "'simple_tag_without_context_parameter' is decorated with takes_context=True so it must have a first argument of 'context'", template.Template, '{% load custom %}{% simple_tag_without_context_parameter 123 %}') def test_inclusion_tags(self): c = template.Context({'value': 42}) t = template.Template('{% load custom %}{% inclusion_no_params %}') self.assertEqual(t.render(c), u'inclusion_no_params - Expected result\n') t = template.Template('{% load custom %}{% inclusion_one_param 37 %}') self.assertEqual(t.render(c), u'inclusion_one_param - Expected result: 37\n') t = template.Template('{% load custom %}{% inclusion_explicit_no_context 37 %}') self.assertEqual(t.render(c), u'inclusion_explicit_no_context - Expected result: 37\n') t = template.Template('{% load custom %}{% inclusion_no_params_with_context %}') self.assertEqual(t.render(c), u'inclusion_no_params_with_context - Expected result (context value: 42)\n') t = template.Template('{% load custom %}{% inclusion_params_and_context 37 %}') self.assertEqual(t.render(c), u'inclusion_params_and_context - Expected result (context value: 42): 37\n') t = template.Template('{% load custom %}{% inclusion_two_params 37 42 %}') self.assertEqual(t.render(c), u'inclusion_two_params - Expected result: 37, 42\n') t = template.Template('{% load custom %}{% inclusion_one_default 37 %}') self.assertEqual(t.render(c), u'inclusion_one_default - Expected result: 37, hi\n') t = template.Template('{% load custom %}{% inclusion_one_default 37 two="hello" %}') self.assertEqual(t.render(c), u'inclusion_one_default - Expected result: 37, hello\n') t = template.Template('{% load custom %}{% inclusion_one_default one=99 two="hello" %}') self.assertEqual(t.render(c), u'inclusion_one_default - Expected result: 99, hello\n') self.assertRaisesRegexp(template.TemplateSyntaxError, "'inclusion_one_default' received unexpected keyword argument 'three'", template.Template, '{% load custom %}{% inclusion_one_default 99 two="hello" three="foo" %}') t = template.Template('{% load custom %}{% inclusion_one_default 37 42 %}') self.assertEqual(t.render(c), u'inclusion_one_default - Expected result: 37, 42\n') t = template.Template('{% load custom %}{% inclusion_unlimited_args 37 %}') self.assertEqual(t.render(c), u'inclusion_unlimited_args - Expected result: 37, hi\n') t = template.Template('{% load custom %}{% inclusion_unlimited_args 37 42 56 89 %}') self.assertEqual(t.render(c), u'inclusion_unlimited_args - Expected result: 37, 42, 56, 89\n') t = template.Template('{% load custom %}{% inclusion_only_unlimited_args %}') self.assertEqual(t.render(c), u'inclusion_only_unlimited_args - Expected result: \n') t = template.Template('{% load custom %}{% inclusion_only_unlimited_args 37 42 56 89 %}') self.assertEqual(t.render(c), u'inclusion_only_unlimited_args - Expected result: 37, 42, 56, 89\n') self.assertRaisesRegexp(template.TemplateSyntaxError, "'inclusion_two_params' received too many positional arguments", template.Template, '{% load custom %}{% inclusion_two_params 37 42 56 %}') self.assertRaisesRegexp(template.TemplateSyntaxError, "'inclusion_one_default' received too many positional arguments", template.Template, '{% load custom %}{% inclusion_one_default 37 42 56 %}') self.assertRaisesRegexp(template.TemplateSyntaxError, "'inclusion_one_default' did not receive value\(s\) for the argument\(s\): 'one'", template.Template, '{% load custom %}{% inclusion_one_default %}') self.assertRaisesRegexp(template.TemplateSyntaxError, "'inclusion_unlimited_args' did not receive value\(s\) for the argument\(s\): 'one'", template.Template, '{% load custom %}{% inclusion_unlimited_args %}') t = template.Template('{% load custom %}{% inclusion_unlimited_args_kwargs 37 40|add:2 56 eggs="scrambled" four=1|add:3 %}') self.assertEqual(t.render(c), u'inclusion_unlimited_args_kwargs - Expected result: 37, 42, 56 / eggs=scrambled, four=4\n') self.assertRaisesRegexp(template.TemplateSyntaxError, "'inclusion_unlimited_args_kwargs' received some positional argument\(s\) after some keyword argument\(s\)", template.Template, '{% load custom %}{% inclusion_unlimited_args_kwargs 37 40|add:2 eggs="scrambled" 56 four=1|add:3 %}') self.assertRaisesRegexp(template.TemplateSyntaxError, "'inclusion_unlimited_args_kwargs' received multiple values for keyword argument 'eggs'", template.Template, '{% load custom %}{% inclusion_unlimited_args_kwargs 37 eggs="scrambled" eggs="scrambled" %}') def test_include_tag_missing_context(self): # The 'context' parameter must be present when takes_context is True self.assertRaisesRegexp(template.TemplateSyntaxError, "'inclusion_tag_without_context_parameter' is decorated with takes_context=True so it must have a first argument of 'context'", template.Template, '{% load custom %}{% inclusion_tag_without_context_parameter 123 %}') def test_inclusion_tags_from_template(self): c = template.Context({'value': 42}) t = template.Template('{% load custom %}{% inclusion_no_params_from_template %}') self.assertEqual(t.render(c), u'inclusion_no_params_from_template - Expected result\n') t = template.Template('{% load custom %}{% inclusion_one_param_from_template 37 %}') self.assertEqual(t.render(c), u'inclusion_one_param_from_template - Expected result: 37\n') t = template.Template('{% load custom %}{% inclusion_explicit_no_context_from_template 37 %}') self.assertEqual(t.render(c), u'inclusion_explicit_no_context_from_template - Expected result: 37\n') t = template.Template('{% load custom %}{% inclusion_no_params_with_context_from_template %}') self.assertEqual(t.render(c), u'inclusion_no_params_with_context_from_template - Expected result (context value: 42)\n') t = template.Template('{% load custom %}{% inclusion_params_and_context_from_template 37 %}') self.assertEqual(t.render(c), u'inclusion_params_and_context_from_template - Expected result (context value: 42): 37\n') t = template.Template('{% load custom %}{% inclusion_two_params_from_template 37 42 %}') self.assertEqual(t.render(c), u'inclusion_two_params_from_template - Expected result: 37, 42\n') t = template.Template('{% load custom %}{% inclusion_one_default_from_template 37 %}') self.assertEqual(t.render(c), u'inclusion_one_default_from_template - Expected result: 37, hi\n') t = template.Template('{% load custom %}{% inclusion_one_default_from_template 37 42 %}') self.assertEqual(t.render(c), u'inclusion_one_default_from_template - Expected result: 37, 42\n') t = template.Template('{% load custom %}{% inclusion_unlimited_args_from_template 37 %}') self.assertEqual(t.render(c), u'inclusion_unlimited_args_from_template - Expected result: 37, hi\n') t = template.Template('{% load custom %}{% inclusion_unlimited_args_from_template 37 42 56 89 %}') self.assertEqual(t.render(c), u'inclusion_unlimited_args_from_template - Expected result: 37, 42, 56, 89\n') t = template.Template('{% load custom %}{% inclusion_only_unlimited_args_from_template %}') self.assertEqual(t.render(c), u'inclusion_only_unlimited_args_from_template - Expected result: \n') t = template.Template('{% load custom %}{% inclusion_only_unlimited_args_from_template 37 42 56 89 %}') self.assertEqual(t.render(c), u'inclusion_only_unlimited_args_from_template - Expected result: 37, 42, 56, 89\n') def test_inclusion_tag_registration(self): # Test that the decorators preserve the decorated function's docstring, name and attributes. self.verify_tag(custom.inclusion_no_params, 'inclusion_no_params') self.verify_tag(custom.inclusion_one_param, 'inclusion_one_param') self.verify_tag(custom.inclusion_explicit_no_context, 'inclusion_explicit_no_context') self.verify_tag(custom.inclusion_no_params_with_context, 'inclusion_no_params_with_context') self.verify_tag(custom.inclusion_params_and_context, 'inclusion_params_and_context') self.verify_tag(custom.inclusion_two_params, 'inclusion_two_params') self.verify_tag(custom.inclusion_one_default, 'inclusion_one_default') self.verify_tag(custom.inclusion_unlimited_args, 'inclusion_unlimited_args') self.verify_tag(custom.inclusion_only_unlimited_args, 'inclusion_only_unlimited_args') self.verify_tag(custom.inclusion_tag_without_context_parameter, 'inclusion_tag_without_context_parameter') self.verify_tag(custom.inclusion_tag_use_l10n, 'inclusion_tag_use_l10n') self.verify_tag(custom.inclusion_tag_current_app, 'inclusion_tag_current_app') self.verify_tag(custom.inclusion_unlimited_args_kwargs, 'inclusion_unlimited_args_kwargs') def test_15070_current_app(self): """ Test that inclusion tag passes down `current_app` of context to the Context of the included/rendered template as well. """ c = template.Context({}) t = template.Template('{% load custom %}{% inclusion_tag_current_app %}') self.assertEqual(t.render(c).strip(), u'None') c.current_app = 'advanced' self.assertEqual(t.render(c).strip(), u'advanced') def test_15070_use_l10n(self): """ Test that inclusion tag passes down `use_l10n` of context to the Context of the included/rendered template as well. """ c = template.Context({}) t = template.Template('{% load custom %}{% inclusion_tag_use_l10n %}') self.assertEqual(t.render(c).strip(), u'None') c.use_l10n = True self.assertEqual(t.render(c).strip(), u'True') def test_assignment_tags(self): c = template.Context({'value': 42}) t = template.Template('{% load custom %}{% assignment_no_params as var %}The result is: {{ var }}') self.assertEqual(t.render(c), u'The result is: assignment_no_params - Expected result') t = template.Template('{% load custom %}{% assignment_one_param 37 as var %}The result is: {{ var }}') self.assertEqual(t.render(c), u'The result is: assignment_one_param - Expected result: 37') t = template.Template('{% load custom %}{% assignment_explicit_no_context 37 as var %}The result is: {{ var }}') self.assertEqual(t.render(c), u'The result is: assignment_explicit_no_context - Expected result: 37') t = template.Template('{% load custom %}{% assignment_no_params_with_context as var %}The result is: {{ var }}') self.assertEqual(t.render(c), u'The result is: assignment_no_params_with_context - Expected result (context value: 42)') t = template.Template('{% load custom %}{% assignment_params_and_context 37 as var %}The result is: {{ var }}') self.assertEqual(t.render(c), u'The result is: assignment_params_and_context - Expected result (context value: 42): 37') t = template.Template('{% load custom %}{% assignment_two_params 37 42 as var %}The result is: {{ var }}') self.assertEqual(t.render(c), u'The result is: assignment_two_params - Expected result: 37, 42') t = template.Template('{% load custom %}{% assignment_one_default 37 as var %}The result is: {{ var }}') self.assertEqual(t.render(c), u'The result is: assignment_one_default - Expected result: 37, hi') t = template.Template('{% load custom %}{% assignment_one_default 37 two="hello" as var %}The result is: {{ var }}') self.assertEqual(t.render(c), u'The result is: assignment_one_default - Expected result: 37, hello') t = template.Template('{% load custom %}{% assignment_one_default one=99 two="hello" as var %}The result is: {{ var }}') self.assertEqual(t.render(c), u'The result is: assignment_one_default - Expected result: 99, hello') self.assertRaisesRegexp(template.TemplateSyntaxError, "'assignment_one_default' received unexpected keyword argument 'three'", template.Template, '{% load custom %}{% assignment_one_default 99 two="hello" three="foo" as var %}') t = template.Template('{% load custom %}{% assignment_one_default 37 42 as var %}The result is: {{ var }}') self.assertEqual(t.render(c), u'The result is: assignment_one_default - Expected result: 37, 42') t = template.Template('{% load custom %}{% assignment_unlimited_args 37 as var %}The result is: {{ var }}') self.assertEqual(t.render(c), u'The result is: assignment_unlimited_args - Expected result: 37, hi') t = template.Template('{% load custom %}{% assignment_unlimited_args 37 42 56 89 as var %}The result is: {{ var }}') self.assertEqual(t.render(c), u'The result is: assignment_unlimited_args - Expected result: 37, 42, 56, 89') t = template.Template('{% load custom %}{% assignment_only_unlimited_args as var %}The result is: {{ var }}') self.assertEqual(t.render(c), u'The result is: assignment_only_unlimited_args - Expected result: ') t = template.Template('{% load custom %}{% assignment_only_unlimited_args 37 42 56 89 as var %}The result is: {{ var }}') self.assertEqual(t.render(c), u'The result is: assignment_only_unlimited_args - Expected result: 37, 42, 56, 89') self.assertRaisesRegexp(template.TemplateSyntaxError, "'assignment_one_param' tag takes at least 2 arguments and the second last argument must be 'as'", template.Template, '{% load custom %}{% assignment_one_param 37 %}The result is: {{ var }}') self.assertRaisesRegexp(template.TemplateSyntaxError, "'assignment_one_param' tag takes at least 2 arguments and the second last argument must be 'as'", template.Template, '{% load custom %}{% assignment_one_param 37 as %}The result is: {{ var }}') self.assertRaisesRegexp(template.TemplateSyntaxError, "'assignment_one_param' tag takes at least 2 arguments and the second last argument must be 'as'", template.Template, '{% load custom %}{% assignment_one_param 37 ass var %}The result is: {{ var }}') self.assertRaisesRegexp(template.TemplateSyntaxError, "'assignment_two_params' received too many positional arguments", template.Template, '{% load custom %}{% assignment_two_params 37 42 56 as var %}The result is: {{ var }}') self.assertRaisesRegexp(template.TemplateSyntaxError, "'assignment_one_default' received too many positional arguments", template.Template, '{% load custom %}{% assignment_one_default 37 42 56 as var %}The result is: {{ var }}') self.assertRaisesRegexp(template.TemplateSyntaxError, "'assignment_one_default' did not receive value\(s\) for the argument\(s\): 'one'", template.Template, '{% load custom %}{% assignment_one_default as var %}The result is: {{ var }}') self.assertRaisesRegexp(template.TemplateSyntaxError, "'assignment_unlimited_args' did not receive value\(s\) for the argument\(s\): 'one'", template.Template, '{% load custom %}{% assignment_unlimited_args as var %}The result is: {{ var }}') t = template.Template('{% load custom %}{% assignment_unlimited_args_kwargs 37 40|add:2 56 eggs="scrambled" four=1|add:3 as var %}The result is: {{ var }}') self.assertEqual(t.render(c), u'The result is: assignment_unlimited_args_kwargs - Expected result: 37, 42, 56 / eggs=scrambled, four=4') self.assertRaisesRegexp(template.TemplateSyntaxError, "'assignment_unlimited_args_kwargs' received some positional argument\(s\) after some keyword argument\(s\)", template.Template, '{% load custom %}{% assignment_unlimited_args_kwargs 37 40|add:2 eggs="scrambled" 56 four=1|add:3 as var %}The result is: {{ var }}') self.assertRaisesRegexp(template.TemplateSyntaxError, "'assignment_unlimited_args_kwargs' received multiple values for keyword argument 'eggs'", template.Template, '{% load custom %}{% assignment_unlimited_args_kwargs 37 eggs="scrambled" eggs="scrambled" as var %}The result is: {{ var }}') def test_assignment_tag_registration(self): # Test that the decorators preserve the decorated function's docstring, name and attributes. self.verify_tag(custom.assignment_no_params, 'assignment_no_params') self.verify_tag(custom.assignment_one_param, 'assignment_one_param') self.verify_tag(custom.assignment_explicit_no_context, 'assignment_explicit_no_context') self.verify_tag(custom.assignment_no_params_with_context, 'assignment_no_params_with_context') self.verify_tag(custom.assignment_params_and_context, 'assignment_params_and_context') self.verify_tag(custom.assignment_one_default, 'assignment_one_default') self.verify_tag(custom.assignment_two_params, 'assignment_two_params') self.verify_tag(custom.assignment_unlimited_args, 'assignment_unlimited_args') self.verify_tag(custom.assignment_only_unlimited_args, 'assignment_only_unlimited_args') self.verify_tag(custom.assignment_unlimited_args, 'assignment_unlimited_args') self.verify_tag(custom.assignment_unlimited_args_kwargs, 'assignment_unlimited_args_kwargs') self.verify_tag(custom.assignment_tag_without_context_parameter, 'assignment_tag_without_context_parameter') def test_assignment_tag_missing_context(self): # The 'context' parameter must be present when takes_context is True self.assertRaisesRegexp(template.TemplateSyntaxError, "'assignment_tag_without_context_parameter' is decorated with takes_context=True so it must have a first argument of 'context'", template.Template, '{% load custom %}{% assignment_tag_without_context_parameter 123 as var %}')
LethusTI/supportcenter
vendor/django/tests/regressiontests/templates/custom.py
Python
gpl-3.0
24,013
#! /usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ Generate artificial datasets for the multi-step prediction experiments """ import os import random import datetime from optparse import OptionParser from nupic.data.file_record_stream import FileRecordStream def _generateSimple(filename="simple.csv", numSequences=1, elementsPerSeq=3, numRepeats=10): """ Generate a simple dataset. This contains a bunch of non-overlapping sequences. At the end of the dataset, we introduce missing records so that test code can insure that the model didn't get confused by them. Parameters: ---------------------------------------------------- filename: name of the file to produce, including extension. It will be created in a 'datasets' sub-directory within the directory containing this script. numSequences: how many sequences to generate elementsPerSeq: length of each sequence numRepeats: how many times to repeat each sequence in the output """ # Create the output file scriptDir = os.path.dirname(__file__) pathname = os.path.join(scriptDir, 'datasets', filename) print "Creating %s..." % (pathname) fields = [('timestamp', 'datetime', 'T'), ('field1', 'string', ''), ('field2', 'float', '')] outFile = FileRecordStream(pathname, write=True, fields=fields) # Create the sequences sequences = [] for i in range(numSequences): seq = [x for x in range(i*elementsPerSeq, (i+1)*elementsPerSeq)] sequences.append(seq) # Write out the sequences in random order seqIdxs = [] for i in range(numRepeats): seqIdxs += range(numSequences) random.shuffle(seqIdxs) # Put 1 hour between each record timestamp = datetime.datetime(year=2012, month=1, day=1, hour=0, minute=0, second=0) timeDelta = datetime.timedelta(hours=1) # Write out the sequences without missing records for seqIdx in seqIdxs: seq = sequences[seqIdx] for x in seq: outFile.appendRecord([timestamp, str(x), x]) timestamp += timeDelta # Now, write some out with missing records for seqIdx in seqIdxs: seq = sequences[seqIdx] for i,x in enumerate(seq): if i != 1: outFile.appendRecord([timestamp, str(x), x]) timestamp += timeDelta for seqIdx in seqIdxs: seq = sequences[seqIdx] for i,x in enumerate(seq): if i != 1: outFile.appendRecord([timestamp, str(x), x]) timestamp += timeDelta # Write out some more of the sequences *without* missing records for seqIdx in seqIdxs: seq = sequences[seqIdx] for x in seq: outFile.appendRecord([timestamp, str(x), x]) timestamp += timeDelta outFile.close() if __name__ == '__main__': helpString = \ """%prog [options] Generate artificial datasets for testing multi-step prediction """ # ============================================================================ # Process command line arguments parser = OptionParser(helpString) parser.add_option("--verbosity", default=0, type="int", help="Verbosity level, either 0, 1, 2, or 3 [default: %default].") (options, args) = parser.parse_args() if len(args) != 0: parser.error("No arguments accepted") # Set random seed random.seed(42) # Create the dataset directory if necessary datasetsDir = os.path.join(os.path.dirname(__file__), 'datasets') if not os.path.exists(datasetsDir): os.mkdir(datasetsDir) # Generate the sample datasets _generateSimple('simple_0.csv', numSequences=1, elementsPerSeq=3, numRepeats=10)
cngo-github/nupic
examples/opf/experiments/missing_record/make_datasets.py
Python
agpl-3.0
4,661
__author__ = 'Tom Schaul, [email protected]' from pybrain.rl.learners.learner import Learner class MetaLearner(Learner): """ Learners that make use of other Learners, or learn how to learn. """
hassaanm/stock-trading
src/pybrain/rl/learners/meta/meta.py
Python
apache-2.0
196
# Unix SMB/CIFS implementation. # Copyright (C) Sean Dague <[email protected]> 2011 # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # This provides a wrapper around the cmd interface so that tests can # easily be built on top of it and have minimal code to run basic tests # of the commands. A list of the environmental variables can be found in # ~/selftest/selftest.pl # # These can all be accesses via os.environ["VARIBLENAME"] when needed import random import string from samba.auth import system_session from samba.samdb import SamDB from cStringIO import StringIO from samba.netcmd.main import cmd_sambatool import samba.tests class SambaToolCmdTest(samba.tests.TestCaseInTempDir): def getSamDB(self, *argv): """a convenience function to get a samdb instance so that we can query it""" # We build a fake command to get the options created the same # way the command classes do it. It would be better if the command # classes had a way to more cleanly do this, but this lets us write # tests for now cmd = cmd_sambatool.subcommands["user"].subcommands["setexpiry"] parser, optiongroups = cmd._create_parser("user") opts, args = parser.parse_args(list(argv)) # Filter out options from option groups args = args[1:] kwargs = dict(opts.__dict__) for option_group in parser.option_groups: for option in option_group.option_list: if option.dest is not None: del kwargs[option.dest] kwargs.update(optiongroups) H = kwargs.get("H", None) sambaopts = kwargs.get("sambaopts", None) credopts = kwargs.get("credopts", None) lp = sambaopts.get_loadparm() creds = credopts.get_credentials(lp, fallback_machine=True) samdb = SamDB(url=H, session_info=system_session(), credentials=creds, lp=lp) return samdb def runcmd(self, name, *args): """run a single level command""" cmd = cmd_sambatool.subcommands[name] cmd.outf = StringIO() cmd.errf = StringIO() result = cmd._run(name, *args) return (result, cmd.outf.getvalue(), cmd.errf.getvalue()) def runsubcmd(self, name, sub, *args): """run a command with sub commands""" # The reason we need this function separate from runcmd is # that the .outf StringIO assignment is overriden if we use # runcmd, so we can't capture stdout and stderr cmd = cmd_sambatool.subcommands[name].subcommands[sub] cmd.outf = StringIO() cmd.errf = StringIO() result = cmd._run(name, *args) return (result, cmd.outf.getvalue(), cmd.errf.getvalue()) def assertCmdSuccess(self, val, msg=""): self.assertIsNone(val, msg) def assertCmdFail(self, val, msg=""): self.assertIsNotNone(val, msg) def assertMatch(self, base, string, msg=""): self.assertTrue(string in base, msg) def randomName(self, count=8): """Create a random name, cap letters and numbers, and always starting with a letter""" name = random.choice(string.ascii_uppercase) name += ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase+ string.digits) for x in range(count - 1)) return name def randomPass(self, count=16): name = random.choice(string.ascii_uppercase) name += random.choice(string.digits) name += random.choice(string.ascii_lowercase) name += ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase+ string.digits) for x in range(count - 3)) return name def randomXid(self): # pick some hopefully unused, high UID/GID range to avoid interference # from the system the test runs on xid = random.randint(4711000, 4799000) return xid def assertWithin(self, val1, val2, delta, msg=""): """Assert that val1 is within delta of val2, useful for time computations""" self.assertTrue(((val1 + delta) > val2) and ((val1 - delta) < val2), msg)
yasoob/PythonRSSReader
venv/lib/python2.7/dist-packages/samba/tests/samba_tool/base.py
Python
mit
4,702
""" This module collects helper functions and classes that "span" multiple levels of MVC. In other words, these functions/classes introduce controlled coupling for convenience's sake. """ from django.http import ( Http404, HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect, ) from django.template import loader from django.urls import NoReverseMatch, reverse from django.utils.functional import Promise def render(request, template_name, context=None, content_type=None, status=None, using=None): """ Return a HttpResponse whose content is filled with the result of calling django.template.loader.render_to_string() with the passed arguments. """ content = loader.render_to_string(template_name, context, request, using=using) return HttpResponse(content, content_type, status) def redirect(to, *args, permanent=False, **kwargs): """ Return an HttpResponseRedirect to the appropriate URL for the arguments passed. The arguments could be: * A model: the model's `get_absolute_url()` function will be called. * A view name, possibly with arguments: `urls.reverse()` will be used to reverse-resolve the name. * A URL, which will be used as-is for the redirect location. Issues a temporary redirect by default; pass permanent=True to issue a permanent redirect. """ redirect_class = HttpResponsePermanentRedirect if permanent else HttpResponseRedirect return redirect_class(resolve_url(to, *args, **kwargs)) def _get_queryset(klass): """ Return a QuerySet or a Manager. Duck typing in action: any class with a `get()` method (for get_object_or_404) or a `filter()` method (for get_list_or_404) might do the job. """ # If it is a model class or anything else with ._default_manager if hasattr(klass, '_default_manager'): return klass._default_manager.all() return klass def get_object_or_404(klass, *args, **kwargs): """ Use get() to return an object, or raise a Http404 exception if the object does not exist. klass may be a Model, Manager, or QuerySet object. All other passed arguments and keyword arguments are used in the get() query. Like with QuerySet.get(), MultipleObjectsReturned is raised if more than one object is found. """ queryset = _get_queryset(klass) if not hasattr(queryset, 'get'): klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__ raise ValueError( "First argument to get_object_or_404() must be a Model, Manager, " "or QuerySet, not '%s'." % klass__name ) try: return queryset.get(*args, **kwargs) except queryset.model.DoesNotExist: raise Http404('No %s matches the given query.' % queryset.model._meta.object_name) def get_list_or_404(klass, *args, **kwargs): """ Use filter() to return a list of objects, or raise a Http404 exception if the list is empty. klass may be a Model, Manager, or QuerySet object. All other passed arguments and keyword arguments are used in the filter() query. """ queryset = _get_queryset(klass) if not hasattr(queryset, 'filter'): klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__ raise ValueError( "First argument to get_list_or_404() must be a Model, Manager, or " "QuerySet, not '%s'." % klass__name ) obj_list = list(queryset.filter(*args, **kwargs)) if not obj_list: raise Http404('No %s matches the given query.' % queryset.model._meta.object_name) return obj_list def resolve_url(to, *args, **kwargs): """ Return a URL appropriate for the arguments passed. The arguments could be: * A model: the model's `get_absolute_url()` function will be called. * A view name, possibly with arguments: `urls.reverse()` will be used to reverse-resolve the name. * A URL, which will be returned as-is. """ # If it's a model, use get_absolute_url() if hasattr(to, 'get_absolute_url'): return to.get_absolute_url() if isinstance(to, Promise): # Expand the lazy instance, as it can cause issues when it is passed # further to some Python functions like urlparse. to = str(to) if isinstance(to, str): # Handle relative URLs if to.startswith(('./', '../')): return to # Next try a reverse URL resolution. try: return reverse(to, args=args, kwargs=kwargs) except NoReverseMatch: # If this is a callable, re-raise. if callable(to): raise # If this doesn't "feel" like a URL, re-raise. if '/' not in to and '.' not in to: raise # Finally, fall back and assume it's a URL return to
georgemarshall/django
django/shortcuts.py
Python
bsd-3-clause
4,896
import json import logging from lxml import etree from xmodule.capa_module import ComplexEncoder from xmodule.progress import Progress from xmodule.stringify import stringify_children import openendedchild from .combined_open_ended_rubric import CombinedOpenEndedRubric log = logging.getLogger("edx.courseware") class SelfAssessmentModule(openendedchild.OpenEndedChild): """ A Self Assessment module that allows students to write open-ended responses, submit, then see a rubric and rate themselves. Persists student supplied hints, answers, and assessment judgment (currently only correct/incorrect). Parses xml definition file--see below for exact format. Sample XML format: <selfassessment> <hintprompt> What hint about this problem would you give to someone? </hintprompt> <submitmessage> Save Succcesful. Thanks for participating! </submitmessage> </selfassessment> """ TEMPLATE_DIR = "combinedopenended/selfassessment" # states INITIAL = 'initial' ASSESSING = 'assessing' REQUEST_HINT = 'request_hint' DONE = 'done' def setup_response(self, system, location, definition, descriptor): """ Sets up the module @param system: Modulesystem @param location: location, to let the module know where it is. @param definition: XML definition of the module. @param descriptor: SelfAssessmentDescriptor @return: None """ self.child_prompt = stringify_children(self.child_prompt) self.child_rubric = stringify_children(self.child_rubric) def get_html(self, system): """ Gets context and renders HTML that represents the module @param system: Modulesystem @return: Rendered HTML """ # set context variables and render template previous_answer = self.get_display_answer() # Use the module name as a unique id to pass to the template. try: module_id = self.system.location.name except AttributeError: # In cases where we don't have a system or a location, use a fallback. module_id = "self_assessment" context = { 'prompt': self.child_prompt, 'previous_answer': previous_answer, 'ajax_url': system.ajax_url, 'initial_rubric': self.get_rubric_html(system), 'state': self.child_state, 'allow_reset': self._allow_reset(), 'child_type': 'selfassessment', 'accept_file_upload': self.accept_file_upload, 'module_id': module_id, } html = system.render_template('{0}/self_assessment_prompt.html'.format(self.TEMPLATE_DIR), context) return html def handle_ajax(self, dispatch, data, system): """ This is called by courseware.module_render, to handle an AJAX call. "data" is request.POST. Returns a json dictionary: { 'progress_changed' : True/False, 'progress': 'none'/'in_progress'/'done', <other request-specific values here > } """ handlers = { 'save_answer': self.save_answer, 'save_assessment': self.save_assessment, 'save_post_assessment': self.save_hint, 'store_answer': self.store_answer, } if dispatch not in handlers: # This is a dev_facing_error log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) # This is a dev_facing_error return json.dumps({'error': 'Error handling action. Please try again.', 'success': False}) before = self.get_progress() d = handlers[dispatch](data, system) after = self.get_progress() d.update({ 'progress_changed': after != before, 'progress_status': Progress.to_js_status_str(after), }) return json.dumps(d, cls=ComplexEncoder) def get_rubric_html(self, system): """ Return the appropriate version of the rubric, based on the state. """ if self.child_state == self.INITIAL: return '' rubric_renderer = CombinedOpenEndedRubric(system, False) rubric_dict = rubric_renderer.render_rubric(self.child_rubric) success = rubric_dict['success'] rubric_html = rubric_dict['html'] # we'll render it context = { 'rubric': rubric_html, 'max_score': self._max_score, } if self.child_state == self.ASSESSING: context['read_only'] = False elif self.child_state in (self.POST_ASSESSMENT, self.DONE): context['read_only'] = True else: # This is a dev_facing_error raise ValueError("Self assessment module is in an illegal state '{0}'".format(self.child_state)) return system.render_template('{0}/self_assessment_rubric.html'.format(self.TEMPLATE_DIR), context) def get_hint_html(self, system): """ Return the appropriate version of the hint view, based on state. """ if self.child_state in (self.INITIAL, self.ASSESSING): return '' if self.child_state == self.DONE: # display the previous hint latest = self.latest_post_assessment(system) hint = latest if latest is not None else '' else: hint = '' context = {'hint': hint} if self.child_state == self.POST_ASSESSMENT: context['read_only'] = False elif self.child_state == self.DONE: context['read_only'] = True else: # This is a dev_facing_error raise ValueError("Self Assessment module is in an illegal state '{0}'".format(self.child_state)) return system.render_template('{0}/self_assessment_hint.html'.format(self.TEMPLATE_DIR), context) def save_answer(self, data, system): """ After the answer is submitted, show the rubric. Args: data: the request dictionary passed to the ajax request. Should contain a key 'student_answer' Returns: Dictionary with keys 'success' and either 'error' (if not success), or 'rubric_html' (if success). """ # Check to see if this problem is closed closed, msg = self.check_if_closed() if closed: return msg if self.child_state != self.INITIAL: return self.out_of_sync_error(data) error_message = "" # add new history element with answer and empty score and hint. success, error_message, data = self.append_file_link_to_student_answer(data) if success: data['student_answer'] = SelfAssessmentModule.sanitize_html(data['student_answer']) self.new_history_entry(data['student_answer']) self.change_state(self.ASSESSING) return { 'success': success, 'rubric_html': self.get_rubric_html(system), 'error': error_message, 'student_response': data['student_answer'].replace("\n","<br/>") } def save_assessment(self, data, _system): """ Save the assessment. If the student said they're right, don't ask for a hint, and go straight to the done state. Otherwise, do ask for a hint. Returns a dict { 'success': bool, 'state': state, 'hint_html': hint_html OR 'message_html': html and 'allow_reset', 'error': error-msg}, with 'error' only present if 'success' is False, and 'hint_html' or 'message_html' only if success is true :param data: A `webob.multidict.MultiDict` containing the keys asasssment: The sum of assessment scores score_list[]: A multivalue key containing all the individual scores """ closed, msg = self.check_if_closed() if closed: return msg if self.child_state != self.ASSESSING: return self.out_of_sync_error(data) try: score = int(data.get('assessment')) score_list = [int(x) for x in data.getall('score_list[]')] except (ValueError, TypeError): # This is a dev_facing_error log.error("Non-integer score value passed to save_assessment, or no score list present.") # This is a student_facing_error _ = self.system.service(self, "i18n").ugettext return { 'success': False, 'error': _("Error saving your score. Please notify course staff.") } # Record score as assessment and rubric scores as post assessment self.record_latest_score(score) self.record_latest_post_assessment(json.dumps(score_list)) d = {'success': True, } self.change_state(self.DONE) d['allow_reset'] = self._allow_reset() d['state'] = self.child_state return d def save_hint(self, data, _system): ''' Not used currently, as hints have been removed from the system. Save the hint. Returns a dict { 'success': bool, 'message_html': message_html, 'error': error-msg, 'allow_reset': bool}, with the error key only present if success is False and message_html only if True. ''' if self.child_state != self.POST_ASSESSMENT: # Note: because we only ask for hints on wrong answers, may not have # the same number of hints and answers. return self.out_of_sync_error(data) self.record_latest_post_assessment(data['hint']) self.change_state(self.DONE) return { 'success': True, 'message_html': '', 'allow_reset': self._allow_reset(), } def latest_post_assessment(self, system): latest_post_assessment = super(SelfAssessmentModule, self).latest_post_assessment(system) try: rubric_scores = json.loads(latest_post_assessment) except: rubric_scores = [] return [rubric_scores] class SelfAssessmentDescriptor(): """ Module for adding self assessment questions to courses """ mako_template = "widgets/html-edit.html" module_class = SelfAssessmentModule filename_extension = "xml" has_score = True def __init__(self, system): self.system = system @classmethod def definition_from_xml(cls, xml_object, system): """ Pull out the rubric, prompt, and submitmessage into a dictionary. Returns: { 'submitmessage': 'some-html' 'hintprompt': 'some-html' } """ expected_children = [] for child in expected_children: if len(xml_object.xpath(child)) != 1: # This is a staff_facing_error raise ValueError( u"Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format( child)) def parse(k): """Assumes that xml_object has child k""" return stringify_children(xml_object.xpath(k)[0]) return {} def definition_to_xml(self, resource_fs): '''Return an xml element representing this definition.''' elt = etree.Element('selfassessment') def add_child(k): child_str = u'<{tag}>{body}</{tag}>'.format(tag=k, body=getattr(self, k)) child_node = etree.fromstring(child_str) elt.append(child_node) for child in []: add_child(child) return elt
carsongee/edx-platform
common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py
Python
agpl-3.0
11,892
# ============================================================================= # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Custom op used by periodic_resample.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.periodic_resample.python.ops.periodic_resample_op import periodic_resample from tensorflow.python.util.all_util import remove_undocumented _allowed_symbols = ["periodic_resample"] remove_undocumented(__name__, _allowed_symbols)
drpngx/tensorflow
tensorflow/contrib/periodic_resample/__init__.py
Python
apache-2.0
1,176
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class HarkIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?hark\.com/clips/(?P<id>.+?)-.+' _TEST = { 'url': 'http://www.hark.com/clips/mmbzyhkgny-obama-beyond-the-afghan-theater-we-only-target-al-qaeda-on-may-23-2013', 'md5': '6783a58491b47b92c7c1af5a77d4cbee', 'info_dict': { 'id': 'mmbzyhkgny', 'ext': 'mp3', 'title': 'Obama: \'Beyond The Afghan Theater, We Only Target Al Qaeda\' on May 23, 2013', 'description': 'President Barack Obama addressed the nation live on May 23, 2013 in a speech aimed at addressing counter-terrorism policies including the use of drone strikes, detainees at Guantanamo Bay prison facility, and American citizens who are terrorists.', 'duration': 11, } } def _real_extract(self, url): video_id = self._match_id(url) data = self._download_json( 'http://www.hark.com/clips/%s.json' % video_id, video_id) return { 'id': video_id, 'url': data['url'], 'title': data['name'], 'description': data.get('description'), 'thumbnail': data.get('image_original'), 'duration': data.get('duration'), }
Tithen-Firion/youtube-dl
youtube_dl/extractor/hark.py
Python
unlicense
1,341
# (c) 2018, NetApp Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from ansible.modules.storage.netapp.netapp_e_auditlog import AuditLog from units.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args __metaclass__ = type from units.compat import mock class AuditLogTests(ModuleTestCase): REQUIRED_PARAMS = {'api_username': 'rw', 'api_password': 'password', 'api_url': 'http://localhost', 'ssid': '1'} REQ_FUNC = 'ansible.modules.storage.netapp.netapp_e_auditlog.request' MAX_RECORDS_MAXIMUM = 50000 MAX_RECORDS_MINIMUM = 100 def _set_args(self, **kwargs): module_args = self.REQUIRED_PARAMS.copy() if kwargs is not None: module_args.update(kwargs) set_module_args(module_args) def test_max_records_argument_pass(self): """Verify AuditLog arument's max_records and threshold upper and lower boundaries.""" initial = {"max_records": 1000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90} max_records_set = (self.MAX_RECORDS_MINIMUM, 25000, self.MAX_RECORDS_MAXIMUM) for max_records in max_records_set: initial["max_records"] = max_records self._set_args(**initial) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})): audit_log = AuditLog() self.assertTrue(audit_log.max_records == max_records) def test_max_records_argument_fail(self): """Verify AuditLog arument's max_records and threshold upper and lower boundaries.""" initial = {"max_records": 1000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90} max_records_set = (self.MAX_RECORDS_MINIMUM - 1, self.MAX_RECORDS_MAXIMUM + 1) for max_records in max_records_set: with self.assertRaisesRegexp(AnsibleFailJson, r"Audit-log max_records count must be between 100 and 50000"): initial["max_records"] = max_records self._set_args(**initial) AuditLog() def test_threshold_argument_pass(self): """Verify AuditLog arument's max_records and threshold upper and lower boundaries.""" initial = {"max_records": 1000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90} threshold_set = (60, 75, 90) for threshold in threshold_set: initial["threshold"] = threshold self._set_args(**initial) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})): audit_log = AuditLog() self.assertTrue(audit_log.threshold == threshold) def test_threshold_argument_fail(self): """Verify AuditLog arument's max_records and threshold upper and lower boundaries.""" initial = {"max_records": 1000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90} threshold_set = (59, 91) for threshold in threshold_set: with self.assertRaisesRegexp(AnsibleFailJson, r"Audit-log percent threshold must be between 60 and 90"): initial["threshold"] = threshold self._set_args(**initial) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})): AuditLog() def test_is_proxy_pass(self): """Verify that True is returned when proxy is used to communicate with storage.""" initial = {"max_records": 1000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90, "api_url": "https://10.1.1.10/devmgr/v2"} self._set_args(**initial) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})): audit_log = AuditLog() with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})): self.assertTrue(audit_log.is_proxy()) def test_is_proxy_fail(self): """Verify that AnsibleJsonFail exception is thrown when exception occurs.""" initial = {"max_records": 1000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90} self._set_args(**initial) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})): audit_log = AuditLog() with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the webservices about information"): with mock.patch(self.REQ_FUNC, return_value=Exception()): audit_log.is_proxy() def test_get_configuration_pass(self): """Validate get configuration does not throw exception when normal request is returned.""" initial = {"max_records": 1000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90} expected = {"auditLogMaxRecords": 1000, "auditLogLevel": "writeOnly", "auditLogFullPolicy": "overWrite", "auditLogWarningThresholdPct": 90} self._set_args(**initial) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})): audit_log = AuditLog() with mock.patch(self.REQ_FUNC, return_value=(200, expected)): body = audit_log.get_configuration() self.assertTrue(body == expected) def test_get_configuration_fail(self): """Verify AnsibleJsonFail exception is thrown.""" initial = {"max_records": 1000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90} self._set_args(**initial) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})): audit_log = AuditLog() with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the audit-log configuration!"): with mock.patch(self.REQ_FUNC, return_value=Exception()): audit_log.get_configuration() def test_build_configuration_pass(self): """Validate configuration changes will force an update.""" response = {"auditLogMaxRecords": 1000, "auditLogLevel": "writeOnly", "auditLogFullPolicy": "overWrite", "auditLogWarningThresholdPct": 90} initial = {"max_records": 1000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90} changes = [{"max_records": 50000}, {"log_level": "all"}, {"full_policy": "preventSystemAccess"}, {"threshold": 75}] for change in changes: initial_with_changes = initial.copy() initial_with_changes.update(change) self._set_args(**initial_with_changes) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})): audit_log = AuditLog() with mock.patch(self.REQ_FUNC, return_value=(200, response)): update = audit_log.build_configuration() self.assertTrue(update) def test_delete_log_messages_fail(self): """Verify AnsibleJsonFail exception is thrown.""" initial = {"max_records": 1000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90} self._set_args(**initial) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})): audit_log = AuditLog() with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to delete audit-log messages!"): with mock.patch(self.REQ_FUNC, return_value=Exception()): audit_log.delete_log_messages() def test_update_configuration_delete_pass(self): """Verify 422 and force successfully returns True.""" body = {"auditLogMaxRecords": 1000, "auditLogLevel": "writeOnly", "auditLogFullPolicy": "overWrite", "auditLogWarningThresholdPct": 90} initial = {"max_records": 2000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90, "force": True} self._set_args(**initial) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})): audit_log = AuditLog() with mock.patch(self.REQ_FUNC, side_effect=[(200, body), (422, {u"invalidFieldsIfKnown": None, u"errorMessage": u"Configuration change...", u"localizedMessage": u"Configuration change...", u"retcode": u"auditLogImmediateFullCondition", u"codeType": u"devicemgrerror"}), (200, None), (200, None)]): self.assertTrue(audit_log.update_configuration()) def test_update_configuration_delete_skip_fail(self): """Verify 422 and no force results in AnsibleJsonFail exception.""" body = {"auditLogMaxRecords": 1000, "auditLogLevel": "writeOnly", "auditLogFullPolicy": "overWrite", "auditLogWarningThresholdPct": 90} initial = {"max_records": 2000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90, "force": False} self._set_args(**initial) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})): audit_log = AuditLog() with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to update audit-log configuration!"): with mock.patch(self.REQ_FUNC, side_effect=[(200, body), Exception(422, {"errorMessage": "error"}), (200, None), (200, None)]): audit_log.update_configuration()
alxgu/ansible
test/units/modules/storage/netapp/test_netapp_e_auditlog.py
Python
gpl-3.0
10,758
"""Utility to compare (Numpy) version strings. The NumpyVersion class allows properly comparing numpy version strings. The LooseVersion and StrictVersion classes that distutils provides don't work; they don't recognize anything like alpha/beta/rc/dev versions. """ import re from scipy._lib.six import string_types __all__ = ['NumpyVersion'] class NumpyVersion(): """Parse and compare numpy version strings. Numpy has the following versioning scheme (numbers given are examples; they can be >9) in principle): - Released version: '1.8.0', '1.8.1', etc. - Alpha: '1.8.0a1', '1.8.0a2', etc. - Beta: '1.8.0b1', '1.8.0b2', etc. - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) - Development versions after a1: '1.8.0a1.dev-f1234afa', '1.8.0b2.dev-f1234afa', '1.8.1rc1.dev-f1234afa', etc. - Development versions (no git hash available): '1.8.0.dev-Unknown' Comparing needs to be done against a valid version string or other `NumpyVersion` instance. Parameters ---------- vstring : str Numpy version string (``np.__version__``). Notes ----- All dev versions of the same (pre-)release compare equal. Examples -------- >>> from scipy._lib._version import NumpyVersion >>> if NumpyVersion(np.__version__) < '1.7.0': ... print('skip') skip >>> NumpyVersion('1.7') # raises ValueError, add ".0" """ def __init__(self, vstring): self.vstring = vstring ver_main = re.match(r'\d[.]\d+[.]\d+', vstring) if not ver_main: raise ValueError("Not a valid numpy version string") self.version = ver_main.group() self.major, self.minor, self.bugfix = [int(x) for x in self.version.split('.')] if len(vstring) == ver_main.end(): self.pre_release = 'final' else: alpha = re.match(r'a\d', vstring[ver_main.end():]) beta = re.match(r'b\d', vstring[ver_main.end():]) rc = re.match(r'rc\d', vstring[ver_main.end():]) pre_rel = [m for m in [alpha, beta, rc] if m is not None] if pre_rel: self.pre_release = pre_rel[0].group() else: self.pre_release = '' self.is_devversion = bool(re.search(r'.dev', vstring)) def _compare_version(self, other): """Compare major.minor.bugfix""" if self.major == other.major: if self.minor == other.minor: if self.bugfix == other.bugfix: vercmp = 0 elif self.bugfix > other.bugfix: vercmp = 1 else: vercmp = -1 elif self.minor > other.minor: vercmp = 1 else: vercmp = -1 elif self.major > other.major: vercmp = 1 else: vercmp = -1 return vercmp def _compare_pre_release(self, other): """Compare alpha/beta/rc/final.""" if self.pre_release == other.pre_release: vercmp = 0 elif self.pre_release == 'final': vercmp = 1 elif other.pre_release == 'final': vercmp = -1 elif self.pre_release > other.pre_release: vercmp = 1 else: vercmp = -1 return vercmp def _compare(self, other): if not isinstance(other, (string_types, NumpyVersion)): raise ValueError("Invalid object to compare with NumpyVersion.") if isinstance(other, string_types): other = NumpyVersion(other) vercmp = self._compare_version(other) if vercmp == 0: # Same x.y.z version, check for alpha/beta/rc vercmp = self._compare_pre_release(other) if vercmp == 0: # Same version and same pre-release, check if dev version if self.is_devversion is other.is_devversion: vercmp = 0 elif self.is_devversion: vercmp = -1 else: vercmp = 1 return vercmp def __lt__(self, other): return self._compare(other) < 0 def __le__(self, other): return self._compare(other) <= 0 def __eq__(self, other): return self._compare(other) == 0 def __ne__(self, other): return self._compare(other) != 0 def __gt__(self, other): return self._compare(other) > 0 def __ge__(self, other): return self._compare(other) >= 0 def __repr__(self): return "NumpyVersion(%s)" % self.vstring
mbayon/TFG-MachineLearning
venv/lib/python3.6/site-packages/scipy/_lib/_version.py
Python
mit
4,793
def foo(a): pass foo(1)
akosyakov/intellij-community
python/testData/quickFixes/PyRemoveArgumentQuickFixTest/unexpected_after.py
Python
apache-2.0
32
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_urllib_request class VideoMegaIE(InfoExtractor): _VALID_URL = r'(?:videomega:|https?://(?:www\.)?videomega\.tv/(?:(?:view|iframe|cdn)\.php)?\?ref=)(?P<id>[A-Za-z0-9]+)' _TESTS = [{ 'url': 'http://videomega.tv/cdn.php?ref=AOSQBJYKIDDIKYJBQSOA', 'md5': 'cc1920a58add3f05c6a93285b84fb3aa', 'info_dict': { 'id': 'AOSQBJYKIDDIKYJBQSOA', 'ext': 'mp4', 'title': '1254207', 'thumbnail': 're:^https?://.*\.jpg$', } }, { 'url': 'http://videomega.tv/cdn.php?ref=AOSQBJYKIDDIKYJBQSOA&width=1070&height=600', 'only_matching': True, }, { 'url': 'http://videomega.tv/view.php?ref=090051111052065112106089103052052103089106112065052111051090', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) iframe_url = 'http://videomega.tv/cdn.php?ref=%s' % video_id req = compat_urllib_request.Request(iframe_url) req.add_header('Referer', url) req.add_header('Cookie', 'noadvtday=0') webpage = self._download_webpage(req, video_id) title = self._html_search_regex( r'<title>(.+?)</title>', webpage, 'title') title = re.sub( r'(?:^[Vv]ideo[Mm]ega\.tv\s-\s*|\s*-\svideomega\.tv$)', '', title) thumbnail = self._search_regex( r'<video[^>]+?poster="([^"]+)"', webpage, 'thumbnail', fatal=False) video_url = self._search_regex( r'<source[^>]+?src="([^"]+)"', webpage, 'video URL') return { 'id': video_id, 'title': title, 'url': video_url, 'thumbnail': thumbnail, 'http_headers': { 'Referer': iframe_url, }, }
miminus/youtube-dl
youtube_dl/extractor/videomega.py
Python
unlicense
1,920
from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_array_almost_equal from scipy.sparse.csgraph import breadth_first_tree, depth_first_tree,\ csgraph_to_dense, csgraph_from_dense def test_graph_breadth_first(): csgraph = np.array([[0, 1, 2, 0, 0], [1, 0, 0, 0, 3], [2, 0, 0, 7, 0], [0, 0, 7, 0, 1], [0, 3, 0, 1, 0]]) csgraph = csgraph_from_dense(csgraph, null_value=0) bfirst = np.array([[0, 1, 2, 0, 0], [0, 0, 0, 0, 3], [0, 0, 0, 7, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) for directed in [True, False]: bfirst_test = breadth_first_tree(csgraph, 0, directed) assert_array_almost_equal(csgraph_to_dense(bfirst_test), bfirst) def test_graph_depth_first(): csgraph = np.array([[0, 1, 2, 0, 0], [1, 0, 0, 0, 3], [2, 0, 0, 7, 0], [0, 0, 7, 0, 1], [0, 3, 0, 1, 0]]) csgraph = csgraph_from_dense(csgraph, null_value=0) dfirst = np.array([[0, 1, 0, 0, 0], [0, 0, 0, 0, 3], [0, 0, 0, 0, 0], [0, 0, 7, 0, 0], [0, 0, 0, 1, 0]]) for directed in [True, False]: dfirst_test = depth_first_tree(csgraph, 0, directed) assert_array_almost_equal(csgraph_to_dense(dfirst_test), dfirst) def test_graph_breadth_first_trivial_graph(): csgraph = np.array([[0]]) csgraph = csgraph_from_dense(csgraph, null_value=0) bfirst = np.array([[0]]) for directed in [True, False]: bfirst_test = breadth_first_tree(csgraph, 0, directed) assert_array_almost_equal(csgraph_to_dense(bfirst_test), bfirst) def test_graph_depth_first_trivial_graph(): csgraph = np.array([[0]]) csgraph = csgraph_from_dense(csgraph, null_value=0) bfirst = np.array([[0]]) for directed in [True, False]: bfirst_test = depth_first_tree(csgraph, 0, directed) assert_array_almost_equal(csgraph_to_dense(bfirst_test), bfirst)
jlcarmic/producthunt_simulator
venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_traversal.py
Python
mit
2,390
from test_support import * prove_all ()
ptroja/spark2014
testsuite/gnatprove/tests/riposte__usergroup_examples/test.py
Python
gpl-3.0
41
# Copyright (c) 2001 Autonomous Zone Industries # Copyright (c) 2002-2009 Zooko Wilcox-O'Hearn # This file is part of pyutil; see README.rst for licensing terms. """ An object that makes some of the attributes of your class persistent, pickling them and lazily writing them to a file. """ # from the Python Standard Library import os import cPickle as pickle import warnings # from the pyutil library import fileutil import nummedobj import twistedutil # from the Twisted library from twisted.python import log class PickleSaver(nummedobj.NummedObj): """ This makes some of the attributes of your class persistent, saving them in a pickle and saving them lazily. The general idea: You are going to tell PickleSaver which of your attributes ought to be persistently saved, and the name of a file to save them in. Those attributes will get saved to disk, and when your object is instantiated those attributes will get set to the values loaded from the file. Usage: inherit from PickleSaver and call PickleSaver.__init__() in your constructor. You will pass arguments to PickleSaver.__init__() telling it which attributes to save, which file to save them in, and what values they should have if there is no value stored for them in the file. Note: do *not* assign values to your persistent attributes in your constructor, because you might thus overwrite their persistent values. Then whenever you change one of the persistent attributes, call self.lazy_save() (it won't *really* save -- it'll just schedule a save for DELAY minutes later.) If you update an attribute and forget to call self.lazy_save() then the change will not be saved, unless you later call self.lazy_save() before you shut down. Data could be lost if the Python interpreter were to die unexpectedly (for example, due to a segfault in a compiled machine code module or due to the Python process being killed without warning via SIGKILL) before the delay passes. However if the Python interpreter shuts down cleanly (i.e., if it garbage collects and invokes the __del__ methods of the collected objects), then the data will be saved at that time (unless your class has the "not-collectable" problem: http://python.org/doc/current/lib/module-gc.html -- search in text for "uncollectable"). Note: you can pass DELAY=0 to make PickleSaver a not-so-lazy saver. The advantage of laziness is that you don't touch the disk as often -- touching disk is a performance cost. To cleanly shutdown, invoke shutdown(). Further operations after that will result in exceptions. """ class ExtRes: """ This is for holding things (external resources) that PickleSaver needs to finalize after PickleSaver is killed. (post-mortem finalization) In particular, this holds the names and values of all attributes that have been changed, so that after the PickleSaver is garbage-collected those values will be saved to the persistent file. """ def __init__(self, fname, objname): self.fname = fname self.objname = objname self.dirty = False # True iff the attrs have been changed and need to be saved to disk; When you change this flag from False to True, you schedule a save task for 10 minutes later. When the save task goes off it changes the flag from True to False. self.savertask = None self.valstr = None # the pickled (serialized, string) contents of the attributes that should be saved def _save_to_disk(self): if self.valstr is not None: log.msg("%s._save_to_disk(): fname: %s" % (self.objname, self.fname,)) of = open(self.fname + ".tmp", "wb") of.write(self.valstr) of.flush() of.close() of = None fileutil.remove_if_possible(self.fname) fileutil.rename(self.fname + ".tmp", self.fname) log.msg("%s._save_to_disk(): now, having finished write(), os.path.isfile(%s): %s" % (self, self.fname, os.path.isfile(self.fname),)) self.valstr = None self.dirty = False try: self.savertask.callId.cancel() except: pass self.savertask = None def shutdown(self): if self.dirty: self._save_to_disk() if self.savertask: try: self.savertask.callId.cancel() except: pass self.savertask = None def __del__(self): self.shutdown() def __init__(self, fname, attrs, DELAY=60*60, savecb=None): """ @param attrs: a dict whose keys are the names of all the attributes to be persistently stored and whose values are the initial default value that the attribute gets set to the first time it is ever used; After this first initialization, the value will be persistent so the initial default value will never be used again. @param savecb: if not None, then it is a callable that will be called after each save completes (useful for unit tests) (savecb doesn't get called after a shutdown-save, only after a scheduled save) """ warnings.warn("deprecated", DeprecationWarning) nummedobj.NummedObj.__init__(self) self._DELAY = DELAY self._attrnames = attrs.keys() self._extres = PickleSaver.ExtRes(fname=fname, objname=self.__repr__()) self._savecb = savecb for attrname, defaultval in attrs.items(): setattr(self, attrname, defaultval) try: attrdict = pickle.loads(open(self._extres.fname, "rb").read()) for attrname, attrval in attrdict.items(): if not hasattr(self, attrname): log.msg("WARNING: %s has no attribute named %s on load from disk, value: %s." % (self, attrname, attrval,)) setattr(self, attrname, attrval) except (pickle.UnpicklingError, IOError, EOFError,), le: try: attrdict = pickle.loads(open(self._extres.fname + ".tmp", "rb").read()) for attrname, attrval in attrdict.items(): if not hasattr(self, attrname): log.msg("WARNING: %s has no attribute named %s on load from disk, value: %s." % (self, attrname, attrval,)) setattr(self, attrname, attrval) except (pickle.UnpicklingError, IOError, EOFError,), le2: log.msg("Got exception attempting to load attrs. (This is normal if this is the first time you've used this persistent %s object.) fname: %s, le: %s, le2: %s" % (self.__class__, self._extres.fname, le, le2,)) self.lazy_save() def _store_attrs_in_extres(self): d = {} for attrname in self._attrnames: d[attrname] = getattr(self, attrname) # log.msg("%s._store_attrs_in_extres: attrname: %s, val: %s" % (self, attrname, getattr(self, attrname),)) # pickle the attrs now, to ensure that there are no reference cycles self._extres.valstr = pickle.dumps(d, True) # log.msg("%s._store_attrs_in_extres: valstr: %s" % (self, self._extres.valstr,)) self._extres.dirty = True def _save_to_disk(self): log.msg("%s._save_to_disk()" % (self,)) self._extres._save_to_disk() if self._savecb: self._savecb() def _lazy_save(self, delay=None): """ @deprecated: use lazy_save() instead """ return self.lazy_save(delay) def lazy_save(self, delay=None): """ @param delay: how long from now before the data gets saved to disk, or `None' in order to use the default value provided in the constructor """ if delay is None: delay=self._DELAY # copy the values into extres so that if `self' gets garbage-collected the values will be written to disk during post-mortem finalization. (This also marks it as dirty.) self._store_attrs_in_extres() newsavetask = twistedutil.callLater_weakly(delay, self._save_to_disk) if self._extres.savertask: if self._extres.savertask.callId.getTime() < newsavetask.callId.getTime(): try: newsavetask.callId.cancel() except: pass else: try: self._extres.savertask.callId.cancel() except: pass self._extres.savertask = newsavetask else: self._extres.savertask = newsavetask def shutdown(self): self.extres.shutdown() self.extres = None
heathseals/CouchPotatoServer
libs/pyutil/PickleSaver.py
Python
gpl-3.0
8,932
# -*- coding: utf-8 -*- # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== from binascii import hexlify class HashAlgo: """A generic class for an abstract cryptographic hash algorithm. :undocumented: block_size """ #: The size of the resulting hash in bytes. digest_size = None #: The internal block size of the hash algorithm in bytes. block_size = None def __init__(self, hashFactory, data=None): """Initialize the hash object. :Parameters: hashFactory : callable An object that will generate the actual hash implementation. *hashFactory* must have a *new()* method, or must be directly callable. data : byte string The very first chunk of the message to hash. It is equivalent to an early call to `update()`. """ if hasattr(hashFactory, 'new'): self._hash = hashFactory.new() else: self._hash = hashFactory() if data: self.update(data) def update(self, data): """Continue hashing of a message by consuming the next chunk of data. Repeated calls are equivalent to a single call with the concatenation of all the arguments. In other words: >>> m.update(a); m.update(b) is equivalent to: >>> m.update(a+b) :Parameters: data : byte string The next chunk of the message being hashed. """ return self._hash.update(data) def digest(self): """Return the **binary** (non-printable) digest of the message that has been hashed so far. This method does not change the state of the hash object. You can continue updating the object after calling this function. :Return: A byte string of `digest_size` bytes. It may contain non-ASCII characters, including null bytes. """ return self._hash.digest() def hexdigest(self): """Return the **printable** digest of the message that has been hashed so far. This method does not change the state of the hash object. :Return: A string of 2* `digest_size` characters. It contains only hexadecimal ASCII digits. """ return self._hash.hexdigest() def copy(self): """Return a copy ("clone") of the hash object. The copy will have the same internal state as the original hash object. This can be used to efficiently compute the digests of strings that share a common initial substring. :Return: A hash object of the same type """ return self._hash.copy() def new(self, data=None): """Return a fresh instance of the hash object. Unlike the `copy` method, the internal state of the object is empty. :Parameters: data : byte string The next chunk of the message being hashed. :Return: A hash object of the same type """ pass
ktan2020/legacy-automation
win/Lib/site-packages/Crypto/Hash/hashalgo.py
Python
mit
3,984
"""Newton-CG trust-region optimization.""" from __future__ import division, print_function, absolute_import import math import numpy as np import scipy.linalg from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem) __all__ = [] def _minimize_trust_ncg(fun, x0, args=(), jac=None, hess=None, hessp=None, **trust_region_options): """ Minimization of scalar function of one or more variables using the Newton conjugate gradient trust-region algorithm. Options ------- initial_trust_radius : float Initial trust-region radius. max_trust_radius : float Maximum value of the trust-region radius. No steps that are longer than this value will be proposed. eta : float Trust region related acceptance stringency for proposed steps. gtol : float Gradient norm must be less than `gtol` before successful termination. """ if jac is None: raise ValueError('Jacobian is required for Newton-CG trust-region ' 'minimization') if hess is None and hessp is None: raise ValueError('Either the Hessian or the Hessian-vector product ' 'is required for Newton-CG trust-region minimization') return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp, subproblem=CGSteihaugSubproblem, **trust_region_options) class CGSteihaugSubproblem(BaseQuadraticSubproblem): """Quadratic subproblem solved by a conjugate gradient method""" def solve(self, trust_radius): """ Solve the subproblem using a conjugate gradient method. Parameters ---------- trust_radius : float We are allowed to wander only this far away from the origin. Returns ------- p : ndarray The proposed step. hits_boundary : bool True if the proposed step is on the boundary of the trust region. Notes ----- This is algorithm (7.2) of Nocedal and Wright 2nd edition. Only the function that computes the Hessian-vector product is required. The Hessian itself is not required, and the Hessian does not need to be positive semidefinite. """ # get the norm of jacobian and define the origin p_origin = np.zeros_like(self.jac) # define a default tolerance tolerance = min(0.5, math.sqrt(self.jac_mag)) * self.jac_mag # Stop the method if the search direction # is a direction of nonpositive curvature. if self.jac_mag < tolerance: hits_boundary = False return p_origin, hits_boundary # init the state for the first iteration z = p_origin r = self.jac d = -r # Search for the min of the approximation of the objective function. while True: # do an iteration Bd = self.hessp(d) dBd = np.dot(d, Bd) if dBd <= 0: # Look at the two boundary points. # Find both values of t to get the boundary points such that # ||z + t d|| == trust_radius # and then choose the one with the predicted min value. ta, tb = self.get_boundaries_intersections(z, d, trust_radius) pa = z + ta * d pb = z + tb * d if self(pa) < self(pb): p_boundary = pa else: p_boundary = pb hits_boundary = True return p_boundary, hits_boundary r_squared = np.dot(r, r) alpha = r_squared / dBd z_next = z + alpha * d if scipy.linalg.norm(z_next) >= trust_radius: # Find t >= 0 to get the boundary point such that # ||z + t d|| == trust_radius ta, tb = self.get_boundaries_intersections(z, d, trust_radius) p_boundary = z + tb * d hits_boundary = True return p_boundary, hits_boundary r_next = r + alpha * Bd r_next_squared = np.dot(r_next, r_next) if math.sqrt(r_next_squared) < tolerance: hits_boundary = False return z_next, hits_boundary beta_next = r_next_squared / r_squared d_next = -r_next + beta_next * d # update the state for the next iteration z = z_next r = r_next d = d_next
mbayon/TFG-MachineLearning
venv/lib/python3.6/site-packages/scipy/optimize/_trustregion_ncg.py
Python
mit
4,646
# coding: utf-8 # (c) 2015, Toshio Kuratomi <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from nose import tools from ansible.compat.tests import unittest from ansible.parsing.splitter import unquote # Tests using nose's test generators cannot use unittest base class. # http://nose.readthedocs.org/en/latest/writing_tests.html#test-generators class TestUnquote: UNQUOTE_DATA = ( (u'1', u'1'), (u'\'1\'', u'1'), (u'"1"', u'1'), (u'"1 \'2\'"', u'1 \'2\''), (u'\'1 "2"\'', u'1 "2"'), (u'\'1 \'2\'\'', u'1 \'2\''), (u'"1\\"', u'"1\\"'), (u'\'1\\\'', u'\'1\\\''), (u'"1 \\"2\\" 3"', u'1 \\"2\\" 3'), (u'\'1 \\\'2\\\' 3\'', u'1 \\\'2\\\' 3'), (u'"', u'"'), (u'\'', u'\''), # Not entirely sure these are good but they match the current # behaviour (u'"1""2"', u'1""2'), (u'\'1\'\'2\'', u'1\'\'2'), (u'"1" 2 "3"', u'1" 2 "3'), (u'"1"\'2\'"3"', u'1"\'2\'"3'), ) def check_unquote(self, quoted, expected): tools.eq_(unquote(quoted), expected) def test_unquote(self): for datapoint in self.UNQUOTE_DATA: yield self.check_unquote, datapoint[0], datapoint[1]
lberruti/ansible
test/units/parsing/test_unquote.py
Python
gpl-3.0
2,073
# -*- coding: utf-8 -*- """ werkzeug.contrib.wrappers ~~~~~~~~~~~~~~~~~~~~~~~~~ Extra wrappers or mixins contributed by the community. These wrappers can be mixed in into request objects to add extra functionality. Example:: from werkzeug.wrappers import Request as RequestBase from werkzeug.contrib.wrappers import JSONRequestMixin class Request(RequestBase, JSONRequestMixin): pass Afterwards this request object provides the extra functionality of the :class:`JSONRequestMixin`. :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import codecs try: from simplejson import loads except ImportError: from json import loads from werkzeug.exceptions import BadRequest from werkzeug.utils import cached_property from werkzeug.http import dump_options_header, parse_options_header from werkzeug._compat import wsgi_decoding_dance def is_known_charset(charset): """Checks if the given charset is known to Python.""" try: codecs.lookup(charset) except LookupError: return False return True class JSONRequestMixin(object): """Add json method to a request object. This will parse the input data through simplejson if possible. :exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type is not json or if the data itself cannot be parsed as json. """ @cached_property def json(self): """Get the result of simplejson.loads if possible.""" if 'json' not in self.environ.get('CONTENT_TYPE', ''): raise BadRequest('Not a JSON request') try: return loads(self.data) except Exception: raise BadRequest('Unable to read JSON request') class ProtobufRequestMixin(object): """Add protobuf parsing method to a request object. This will parse the input data through `protobuf`_ if possible. :exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type is not protobuf or if the data itself cannot be parsed property. .. _protobuf: http://code.google.com/p/protobuf/ """ #: by default the :class:`ProtobufRequestMixin` will raise a #: :exc:`~werkzeug.exceptions.BadRequest` if the object is not #: initialized. You can bypass that check by setting this #: attribute to `False`. protobuf_check_initialization = True def parse_protobuf(self, proto_type): """Parse the data into an instance of proto_type.""" if 'protobuf' not in self.environ.get('CONTENT_TYPE', ''): raise BadRequest('Not a Protobuf request') obj = proto_type() try: obj.ParseFromString(self.data) except Exception: raise BadRequest("Unable to parse Protobuf request") # Fail if not all required fields are set if self.protobuf_check_initialization and not obj.IsInitialized(): raise BadRequest("Partial Protobuf request") return obj class RoutingArgsRequestMixin(object): """This request mixin adds support for the wsgiorg routing args `specification`_. .. _specification: http://www.wsgi.org/wsgi/Specifications/routing_args """ def _get_routing_args(self): return self.environ.get('wsgiorg.routing_args', (()))[0] def _set_routing_args(self, value): if self.shallow: raise RuntimeError('A shallow request tried to modify the WSGI ' 'environment. If you really want to do that, ' 'set `shallow` to False.') self.environ['wsgiorg.routing_args'] = (value, self.routing_vars) routing_args = property(_get_routing_args, _set_routing_args, doc=''' The positional URL arguments as `tuple`.''') del _get_routing_args, _set_routing_args def _get_routing_vars(self): rv = self.environ.get('wsgiorg.routing_args') if rv is not None: return rv[1] rv = {} if not self.shallow: self.routing_vars = rv return rv def _set_routing_vars(self, value): if self.shallow: raise RuntimeError('A shallow request tried to modify the WSGI ' 'environment. If you really want to do that, ' 'set `shallow` to False.') self.environ['wsgiorg.routing_args'] = (self.routing_args, value) routing_vars = property(_get_routing_vars, _set_routing_vars, doc=''' The keyword URL arguments as `dict`.''') del _get_routing_vars, _set_routing_vars class ReverseSlashBehaviorRequestMixin(object): """This mixin reverses the trailing slash behavior of :attr:`script_root` and :attr:`path`. This makes it possible to use :func:`~urlparse.urljoin` directly on the paths. Because it changes the behavior or :class:`Request` this class has to be mixed in *before* the actual request class:: class MyRequest(ReverseSlashBehaviorRequestMixin, Request): pass This example shows the differences (for an application mounted on `/application` and the request going to `/application/foo/bar`): +---------------+-------------------+---------------------+ | | normal behavior | reverse behavior | +===============+===================+=====================+ | `script_root` | ``/application`` | ``/application/`` | +---------------+-------------------+---------------------+ | `path` | ``/foo/bar`` | ``foo/bar`` | +---------------+-------------------+---------------------+ """ @cached_property def path(self): """Requested path as unicode. This works a bit like the regular path info in the WSGI environment but will not include a leading slash. """ path = wsgi_decoding_dance(self.environ.get('PATH_INFO') or '', self.charset, self.encoding_errors) return path.lstrip('/') @cached_property def script_root(self): """The root path of the script includling a trailing slash.""" path = wsgi_decoding_dance(self.environ.get('SCRIPT_NAME') or '', self.charset, self.encoding_errors) return path.rstrip('/') + '/' class DynamicCharsetRequestMixin(object): """"If this mixin is mixed into a request class it will provide a dynamic `charset` attribute. This means that if the charset is transmitted in the content type headers it's used from there. Because it changes the behavior or :class:`Request` this class has to be mixed in *before* the actual request class:: class MyRequest(DynamicCharsetRequestMixin, Request): pass By default the request object assumes that the URL charset is the same as the data charset. If the charset varies on each request based on the transmitted data it's not a good idea to let the URLs change based on that. Most browsers assume either utf-8 or latin1 for the URLs if they have troubles figuring out. It's strongly recommended to set the URL charset to utf-8:: class MyRequest(DynamicCharsetRequestMixin, Request): url_charset = 'utf-8' .. versionadded:: 0.6 """ #: the default charset that is assumed if the content type header #: is missing or does not contain a charset parameter. The default #: is latin1 which is what HTTP specifies as default charset. #: You may however want to set this to utf-8 to better support #: browsers that do not transmit a charset for incoming data. default_charset = 'latin1' def unknown_charset(self, charset): """Called if a charset was provided but is not supported by the Python codecs module. By default latin1 is assumed then to not lose any information, you may override this method to change the behavior. :param charset: the charset that was not found. :return: the replacement charset. """ return 'latin1' @cached_property def charset(self): """The charset from the content type.""" header = self.environ.get('CONTENT_TYPE') if header: ct, options = parse_options_header(header) charset = options.get('charset') if charset: if is_known_charset(charset): return charset return self.unknown_charset(charset) return self.default_charset class DynamicCharsetResponseMixin(object): """If this mixin is mixed into a response class it will provide a dynamic `charset` attribute. This means that if the charset is looked up and stored in the `Content-Type` header and updates itself automatically. This also means a small performance hit but can be useful if you're working with different charsets on responses. Because the charset attribute is no a property at class-level, the default value is stored in `default_charset`. Because it changes the behavior or :class:`Response` this class has to be mixed in *before* the actual response class:: class MyResponse(DynamicCharsetResponseMixin, Response): pass .. versionadded:: 0.6 """ #: the default charset. default_charset = 'utf-8' def _get_charset(self): header = self.headers.get('content-type') if header: charset = parse_options_header(header)[1].get('charset') if charset: return charset return self.default_charset def _set_charset(self, charset): header = self.headers.get('content-type') ct, options = parse_options_header(header) if not ct: raise TypeError('Cannot set charset if Content-Type ' 'header is missing.') options['charset'] = charset self.headers['Content-Type'] = dump_options_header(ct, options) charset = property(_get_charset, _set_charset, doc=""" The charset for the response. It's stored inside the Content-Type header as a parameter.""") del _get_charset, _set_charset
DasIch/werkzeug
werkzeug/contrib/wrappers.py
Python
bsd-3-clause
10,337
class Project(object): def __init__(self, name, start, end): self.name = name self.start = start self.end = end def __repr__(self): return "Project '%s from %s to %s" % ( self.name, self.start.isoformat(), self.end.isoformat() )
jskksj/cv2stuff
cv2stuff/gendata.py
Python
isc
282
""" Performs management commands for the scheduler app """ import hashlib from flask.ext.script import Manager from sqlalchemy import exc # Importing routes causes our URL routes to be registered from src import routes from src import models from src import scheduler scheduler.app.config.from_object(scheduler.ConfigDevelopment) manager = Manager(scheduler.app) def add_coaches(): """ Adds two coaches (for testing) """ user_1 = models.User( id='ecf1bcae-9c8f-11e5-b5b4-d895b95699bb', fullname='Pat Blargstone', username='pat', password=hashlib.md5('secret').hexdigest()) coach_1 = models.Coach( id='ee8d1d30-9c8f-11e5-89d4-d895b95699bb', user_id=user_1.id) user_2 = models.User( id='ef2a95b0-9c8f-11e5-bd27-d895b95699bb', fullname='Sandy Blargwright', username='sandy', password=hashlib.md5('secret').hexdigest()) coach_2 = models.Coach( id='efad3330-9c8f-11e5-9654-d895b95699bb', user_id=user_2.id) try: scheduler.db.session.add(user_1) scheduler.db.session.add(user_2) scheduler.db.session.add(coach_1) scheduler.db.session.add(coach_2) scheduler.db.session.commit() except exc.SQLAlchemyError: pass if __name__ == '__main__': scheduler.db.create_all() add_coaches() manager.run()
ginstrom/scheduler
manage.py
Python
mit
1,385
#!/usr/bin/python3 """ Script to check recently uploaded files. This script checks if a file description is present and if there are other problems in the image's description. This script will have to be configured for each language. Please submit translations as addition to the Pywikibot framework. Everything that needs customisation is indicated by comments. This script understands the following command-line arguments: -limit The number of images to check (default: 80) -commons The bot will check if an image on Commons has the same name and if true it reports the image. -duplicates[:#] Checking if the image has duplicates (if arg, set how many rollback wait before reporting the image in the report instead of tag the image) default: 1 rollback. -duplicatesreport Report the duplicates in a log *AND* put the template in the images. -maxusernotify Maximum notifications added to a user talk page in a single check, to avoid email spamming. -sendemail Send an email after tagging. -break To break the bot after the first check (default: recursive) -sleep[:#] Time in seconds between repeat runs (default: 30) -wait[:#] Wait x second before check the images (default: 0) -skip[:#] The bot skip the first [:#] images (default: 0) -start[:#] Use allimages() as generator (it starts already from File:[:#]) -cat[:#] Use a category as generator -regex[:#] Use regex, must be used with -url or -page -page[:#] Define the name of the wikipage where are the images -url[:#] Define the url where are the images -nologerror If given, this option will disable the error that is risen when the log is full. Instructions for the real-time settings. For every new block you have to add: <------- -------> In this way the bot can understand where the block starts in order to take the right parameter. * Name= Set the name of the block * Find= search this text in the image's description * Findonly= search for exactly this text in the image's description * Summary= That's the summary that the bot will use when it will notify the problem. * Head= That's the incipit that the bot will use for the message. * Text= This is the template that the bot will use when it will report the image's problem. Todo ---- * Clean the code, some passages are pretty difficult to understand. * Add the "catch the language" function for commons. * Fix and reorganise the new documentation * Add a report for the image tagged. """ # # (C) Pywikibot team, 2006-2022 # # Distributed under the terms of the MIT license. # import collections import re import time from typing import Generator import pywikibot from pywikibot import config, i18n from pywikibot import pagegenerators as pg from pywikibot.backports import List, Tuple from pywikibot.bot import suggest_help from pywikibot.exceptions import ( EditConflictError, Error, IsRedirectPageError, LockedPageError, NoPageError, NotEmailableError, PageRelatedError, PageSaveRelatedError, ServerError, TranslationError, ) from pywikibot.family import Family from pywikibot.site import Namespace ############################################################################### # <--------------------------- Change only below! ---------------------------># ############################################################################### # NOTE: in the messages used by the bot if you put __botnick__ in the text, it # will automatically replaced with the bot's nickname. # That's what you want that will be added. (i.e. the {{no source}} with the # right day/month/year ) N_TXT = { 'commons': '{{subst:nld}}', 'meta': '{{No license}}', 'test': '{{No license}}', 'ar': '{{subst:ملم}}', 'arz': '{{subst:ملم}}', 'de': '{{Dateiüberprüfung}}', 'en': '{{subst:nld}}', 'fa': '{{subst:حق تکثیر تصویر نامعلوم}}', 'fr': '{{subst:lid}}', 'ga': '{{subst:Ceadúnas de dhíth}}', 'hr': '{{Bez licence}}', 'hu': '{{nincslicenc|~~~~~}}', 'it': '{{subst:unverdata}}', 'ja': '{{subst:Nld}}', 'ko': '{{subst:nld}}', 'ru': '{{subst:nld}}', 'sd': '{{subst:اجازت نامعلوم}}', 'sr': '{{subst:датотека без лиценце}}', 'ta': '{{subst:nld}}', 'ur': '{{subst:حقوق نسخہ تصویر نامعلوم}}', 'zh': '{{subst:No license/auto}}', } # Text that the bot will try to see if there's already or not. If there's a # {{ I'll use a regex to make a better check. # This will work so: # '{{no license' --> '\{\{(?:template:)?no[ _]license ?(?:\||\n|\}|/) ?' (case # insensitive). # If there's not a {{ it will work as usual (if x in Text) TXT_FIND = { 'commons': ['{{no license', '{{no license/en', '{{nld', '{{no permission', '{{no permission since'], 'meta': ['{{no license', '{{nolicense', '{{nld'], 'test': ['{{no license'], 'ar': ['{{لت', '{{لا ترخيص'], 'arz': ['{{nld', '{{no license'], 'de': ['{{DÜP', '{{Düp', '{{Dateiüberprüfung'], 'en': ['{{nld', '{{no license'], 'fa': ['{{حق تکثیر تصویر نامعلوم۲'], 'ga': ['{{Ceadúnas de dhíth', '{{Ceadúnas de dhíth'], 'hr': ['{{bez licence'], 'hu': ['{{nincsforrás', '{{nincslicenc'], 'it': ['{{unverdata', '{{unverified'], 'ja': ['{{no source', '{{unknown', '{{non free', '<!--削除についての議論が終了するまで'], 'ko': ['{{출처 없음', '{{라이선스 없음', '{{Unknown'], 'ru': ['{{no license'], 'sd': ['{{ناحوالا', '{{ااجازت نامعلوم', '{{Di-no'], 'sr': ['{{датотека без лиценце', '{{датотека без извора'], 'ta': ['{{no source', '{{nld', '{{no license'], 'ur': ['{{ناحوالہ', '{{اجازہ نامعلوم', '{{Di-no'], 'zh': ['{{no source', '{{unknown', '{{No license'], } # When the bot find that the usertalk is empty is not pretty to put only the # no source without the welcome, isn't it? EMPTY = { 'commons': '{{subst:welcome}}\n~~~~\n', 'meta': '{{subst:Welcome}}\n~~~~\n', 'ar': '{{subst:أهلا ومرحبا}}\n~~~~\n', 'arz': '{{subst:اهلا و سهلا}}\n~~~~\n', 'de': '{{subst:willkommen}} ~~~~', 'en': '{{subst:welcome}}\n~~~~\n', 'fa': '{{subst:خوشامدید|%s}}', 'fr': '{{Bienvenue nouveau\n~~~~\n', 'ga': '{{subst:Fáilte}} - ~~~~\n', 'hr': '{{subst:dd}}--~~~~\n', 'hu': '{{subst:Üdvözlet|~~~~}}\n', 'it': '<!-- inizio template di benvenuto -->\n{{subst:Benvebot}}\n~~~~\n' '<!-- fine template di benvenuto -->', 'ja': '{{subst:Welcome/intro}}\n{{subst:welcome|--~~~~}}\n', 'ko': '{{환영}}--~~~~\n', 'ru': '{{subst:Приветствие}}\n~~~~\n', 'sd': '{{ڀليڪار}}\n~~~~\n', 'sr': '{{dd}}--~~~~\n', 'ta': '{{welcome}}\n~~~~\n', 'ur': '{{خوش آمدید}}\n~~~~\n', 'zh': '{{subst:welcome|sign=~~~~}}', } # if the file has an unknown extension it will be tagged with this template. # In reality, there aren't unknown extension, they are only not allowed... DELETE_IMMEDIATELY = { 'commons': '{{speedy|The file has .%s as extension. ' 'Is it ok? Please check.}}', 'meta': '{{Delete|The file has .%s as extension.}}', 'ar': '{{شطب|الملف له .%s كامتداد.}}', 'arz': '{{مسح|الملف له .%s كامتداد.}}', 'en': '{{db-meta|The file has .%s as extension.}}', 'fa': '{{حذف سریع|تصویر %s اضافی است.}}', 'ga': '{{scrios|Tá iarmhír .%s ar an comhad seo.}}', 'hu': '{{azonnali|A fájlnak .%s a kiterjesztése}}', 'it': '{{cancella subito|motivo=Il file ha come estensione ".%s"}}', 'ja': '{{db|知らないファイルフォーマット %s}}', 'ko': '{{delete|잘못된 파일 형식 (.%s)}}', 'ru': '{{db-badimage}}', 'sr': '{{speedy|Ова датотека садржи екстензију %s. ' 'Молим вас да проверите да ли је у складу са правилима.}}', 'ta': '{{delete|' 'இந்தக் கோப்பு .%s என்றக் கோப்பு நீட்சியைக் கொண்டுள்ளது.}}', 'ur': '{{سریع حذف شدگی|اس ملف میں .%s بطور توسیع موجود ہے۔ }}', 'zh': '{{delete|未知檔案格式%s}}', } # That's the text that the bot will add if it doesn't find the license. # Note: every __botnick__ will be repleaced with your bot's nickname # (feel free not to use if you don't need it) NOTHING_NOTIFICATION = { 'commons': "\n{{subst:User:Filnik/untagged|File:%s}}\n\n''This message " "was '''added automatically by ~~~''', if you need " 'some help about it, please read the text above again and ' 'follow the links in it, if you still need help ask at the ' '[[File:Human-help-browser.svg|18px|link=Commons:Help desk|?]] ' "'''[[Commons:Help desk|->]][[Commons:Help desk]]''' in any " "language you like to use.'' --~~~~", 'meta': '{{subst:No license notice|File:%s}}', 'ar': '{{subst:مصدر الملف|File:%s}} --~~~~', 'arz': '{{subst:file source|File:%s}} --~~~~', 'en': '{{subst:file source|File:%s}} --~~~~', 'fa': '{{subst:اخطار نگاره|%s}}', 'ga': '{{subst:Foinse na híomhá|File:%s}} --~~~~', 'hu': '{{subst:adjforrást|Kép:%s}}\n Ezt az üzenetet ~~~ automatikusan ' 'helyezte el a vitalapodon, kérdéseddel fordulj a gazdájához, vagy ' 'a [[WP:KF|Kocsmafalhoz]]. --~~~~', 'it': '{{subst:Progetto:Coordinamento/Immagini/Bot/Messaggi/Senza licenza|' '%s|~~~}} --~~~~', 'ja': '\n{{subst:Image copyright|File:%s}}--~~~~', 'ko': '\n{{subst:User:Kwjbot IV/untagged|%s}} --~~~~', 'ru': '{{subst:Запрос о статусе файла|Файл:%s}} --~~~~', 'sr': '\n{{subst:Обавештење о датотеци без лиценце|%s}} --~~~~', 'sd': '{{subst:تصوير جو ذريعو|File:%s}}--~~~~', 'ta': '\n{{subst:Di-no license-notice|படிமம்:%s}} ~~~~', 'ur': '{{subst:ماخذ تصویر|File:%s}}--~~~~', 'zh': '\n{{subst:Uploadvionotice|File:%s}} ~~~~', } # This is a list of what bots used this script in your project. # NOTE: YOUR bot username will be automatically added. BOT_LIST = { 'commons': ['Siebot', 'CommonsDelinker', 'Filbot', 'Sz-iwbot', 'ABFbot'], 'meta': ['MABot'], 'ar': ['MenoBot'], 'arz': ['MenoBot'], 'de': ['Xqbot'], 'en': ['OrphanBot'], 'fa': ['Amirobot'], 'ga': ['AllieBot'], 'it': ['Filbot', 'Nikbot', '.snoopybot.'], 'ja': ['Alexbot'], 'ko': ['Kwjbot IV'], 'ru': ['Rubinbot'], 'sr': ['KizuleBot'], 'ta': ['TrengarasuBOT'], 'ur': ['Shuaib-bot', 'Tahir-bot', 'SAMI.Bot'], 'zh': ['Alexbot'], } # The message that the bot will add the second time that find another license # problem. SECOND_MESSAGE_WITHOUT_LICENSE = { 'hu': '\nSzia! Úgy tűnik a [[:Kép:%s]] képpel is hasonló a probléma, ' 'mint az előbbivel. Kérlek olvasd el a [[WP:KÉPLIC|feltölthető ' 'képek]]ről szóló oldalunk, és segítségért fordulj a [[WP:KF-JO|' 'Jogi kocsmafalhoz]]. Köszönöm --~~~~', 'it': ':{{subst:Progetto:Coordinamento/Immagini/Bot/Messaggi/Senza' 'licenza2|%s|~~~}} --~~~~', } # You can add some settings to a wiki page. In this way, you can change them # without touching the code. That's useful if you are running the bot on # Toolserver. PAGE_WITH_SETTINGS = { 'commons': 'User:Filbot/Settings', 'it': 'Progetto:Coordinamento/Immagini/Bot/Settings#Settings', 'sr': 'User:KizuleBot/checkimages.py/подешавања', 'zh': 'User:Alexbot/cisettings#Settings', } # The bot can report some images (like the images that have the same name of an # image on commons) This is the page where the bot will store them. REPORT_PAGE = { 'commons': 'User:Filbot/Report', 'meta': 'User:MABot/Report', 'test': 'User:Pywikibot-test/Report', 'ar': 'User:MenoBot/Report', 'arz': 'User:MenoBot/Report', 'de': 'Benutzer:Xqbot/Report', 'en': 'User:Filnik/Report', 'fa': 'کاربر:Amirobot/گزارش تصویر', 'ga': 'User:AllieBot/ReportImages', 'hu': 'User:Bdamokos/Report', 'it': 'Progetto:Coordinamento/Immagini/Bot/Report', 'ja': 'User:Alexbot/report', 'ko': 'User:Kwjbot IV/Report', 'ru': 'User:Rubinbot/Report', 'sd': 'واپرائيندڙ:Kaleem Bhatti/درخواست تصوير', 'sr': 'User:KizuleBot/checkimages.py/дневник', 'ta': 'User:Trengarasu/commonsimages', 'ur': 'صارف:محمد شعیب/درخواست تصویر', 'zh': 'User:Alexsh/checkimagereport', } # If a template isn't a license but it's included on a lot of images, that can # be skipped to analyze the image without taking care of it. (the template must # be in a list) # Warning: Don't add template like "en, de, it" because they are already in # (added in the code, below # Warning 2: The bot will use regex, make the names compatible, please (don't # add "Template:" or {{because they are already put in the regex). # Warning 3: the part that use this regex is case-insensitive (just to let you # know..) HIDDEN_TEMPLATE = { # Put the other in the page on the project defined below 'commons': ['Template:Information'], 'meta': ['Template:Information'], 'test': ['Template:Information'], 'ar': ['Template:معلومات'], 'arz': ['Template:معلومات'], 'de': ['Template:Information'], 'en': ['Template:Information'], 'fa': ['الگو:اطلاعات'], 'fr': ['Template:Information'], 'ga': ['Template:Information'], 'hr': ['Template:Infoslika'], 'hu': ['Template:Információ', 'Template:Enwiki', 'Template:Azonnali'], 'it': ['Template:EDP', 'Template:Informazioni file', 'Template:Information', 'Template:Trademark', 'Template:Permissionotrs'], 'ja': ['Template:Information'], 'ko': ['Template:그림 정보'], 'ru': ['Template:Изображение', 'Template:Обоснование добросовестного использования'], 'sd': ['Template:معلومات'], 'sr': ['Шаблон:Информација', 'Шаблон:Non-free use rationale 2'], 'ta': ['Template:Information'], 'ur': ['Template:معلومات'], 'zh': ['Template:Information'], } # A page where there's a list of template to skip. PAGE_WITH_HIDDEN_TEMPLATES = { 'commons': 'User:Filbot/White_templates#White_templates', 'it': 'Progetto:Coordinamento/Immagini/Bot/WhiteTemplates', 'ko': 'User:Kwjbot_IV/whitetemplates/list', 'sr': 'User:KizuleBot/checkimages.py/дозвољенишаблони', } # A page where there's a list of template to consider as licenses. PAGE_WITH_ALOWED_TEMPLATES = { 'commons': 'User:Filbot/Allowed templates', 'de': 'Benutzer:Xqbot/Lizenzvorlagen', 'it': 'Progetto:Coordinamento/Immagini/Bot/AllowedTemplates', 'ko': 'User:Kwjbot_IV/AllowedTemplates', 'sr': 'User:KizuleBot/checkimages.py/дозвољенишаблони', } # Template added when the bot finds only an hidden template and nothing else. # Note: every __botnick__ will be repleaced with your bot's nickname # (feel free not to use if you don't need it) HIDDEN_TEMPALTE_NOTIFICATION = { 'commons': ("\n{{subst:User:Filnik/whitetemplate|File:%s}}\n\n''This " 'message was added automatically by ~~~, if you need ' 'some help about it please read the text above again and ' 'follow the links in it, if you still need help ask at the ' '[[File:Human-help-browser.svg|18px|link=Commons:Help desk|?]]' " '''[[Commons:Help desk|→]] [[Commons:Help desk]]''' in any " "language you like to use.'' --~~~~"), 'it': '{{subst:Progetto:Coordinamento/Immagini/Bot/Messaggi/' 'Template_insufficiente|%s|~~~}} --~~~~', 'ko': '\n{{subst:User:Kwj2772/whitetemplates|%s}} --~~~~', } # In this part there are the parameters for the dupe images. # Put here the template that you want to put in the image to warn that it's a # dupe. put __image__ if you want only one image, __images__ if you want the # whole list DUPLICATES_TEXT = { 'commons': '\n{{Dupe|__image__}}', 'de': '{{NowCommons}}', 'it': '\n{{Progetto:Coordinamento/Immagini/Bot/Template duplicati|' '__images__}}', 'ru': '{{NCT|__image__}}', 'sr': '{{NowCommons|__image__}}', } # Message to put in the talk DUPLICATES_USER_TALK_TEXT = { 'it': '{{subst:Progetto:Coordinamento/Immagini/Bot/Messaggi/Duplicati|' '%s|%s|~~~}} --~~~~', } # Regex to detect the template put in the image's description to find the dupe DUPLICATES_REGEX = { 'commons': r'\{\{(?:[Tt]emplate:|)(?:[Dd]up(?:licat|)e|[Bb]ad[ _][Nn]ame)' r'[|}]', 'de': r'\{\{[nN](?:C|ow(?: c|[cC])ommons)[\|\}', 'it': r'\{\{(?:[Tt]emplate:|)[Pp]rogetto:[Cc]oordinamento/Immagini/Bot/' r'Template duplicati[|}]', 'sr': r'\{\{[nN](?:C|ow(?: c|[cC])ommons)[\|\}', } # Category with the licenses and / or with subcategories with the other # licenses. CATEGORY_WITH_LICENSES = { 'commons': 'Category:License tags', 'meta': 'Category:License templates', 'test': 'Category:CC license tags', 'ar': 'تصنيف:قوالب حقوق الصور', 'arz': 'تصنيف:Wikipedia image copyright templates', 'de': 'Kategorie:Vorlage:Lizenz für Bilder', 'en': 'Category:Wikipedia file copyright templates', 'fa': 'رده:الگو:حق تکثیر پرونده', 'ga': "Catagóir:Clibeanna cóipchirt d'íomhánna", 'it': 'Categoria:Template Licenze copyright', 'ja': 'Category:画像の著作権表示テンプレート', 'ko': '분류:위키백과 그림 저작권 틀', 'ru': 'Category:Шаблоны:Лицензии файлов', 'sd': 'زمرو:وڪيپيڊيا فائل ڪاپي رائيٽ سانچا', 'sr': 'Категорија:Шаблони за слике', 'ta': 'Category:காப்புரிமை வார்ப்புருக்கள்', 'ur': 'زمرہ:ویکیپیڈیا سانچہ جات حقوق تصاویر', 'zh': 'Category:版權申告模板', } # Page where is stored the message to send as email to the users EMAIL_PAGE_WITH_TEXT = { # 'de': 'Benutzer:ABF/D3', } # Title of the email EMAIL_SUBJECT = { # 'de': 'Problemen mit Deinem Bild auf der Deutschen Wikipedia', } # Seems that uploader bots aren't interested to get messages regarding the # files that they upload.. strange, uh? # Format: [[user,regex], [user,regex]...] the regex is needed to match the user # where to send the warning-msg UPLOAD_BOTS = { 'commons': [['File Upload Bot (Magnus Manske)', r'\|[Ss]ource=Transferred from .*?; ' r'transferred to Commons by \[\[User:(.*?)\]\]']], } # Service images that don't have to be deleted and/or reported has a template # inside them (you can let this param as None) SERVICE_TEMPLATES = { 'it': ['Template:Immagine di servizio'], } # Add your project (in alphabetical order) if you want that the bot starts PROJECT_INSERTED = ['ar', 'arz', 'commons', 'de', 'en', 'fa', 'ga', 'hu', 'it', 'ja', 'ko', 'ru', 'meta', 'sd', 'sr', 'ta', 'test', 'ur', 'zh'] # END OF CONFIGURATION. SETTINGS_REGEX = re.compile(r""" <-------\ ------->\n \*[Nn]ame\ ?=\ ?['"](.*?)['"]\n \*([Ff]ind|[Ff]indonly)\ ?=\ ?(.*?)\n \*[Ii]magechanges\ ?=\ ?(.*?)\n \*[Ss]ummary\ ?=\ ?['"](.*?)['"]\n \*[Hh]ead\ ?=\ ?['"](.*?)['"]\n \*[Tt]ext\ ?=\ ?['"](.*?)['"]\n \*[Mm]ex\ ?=\ ?['"]?([^\n]*?)['"]?\n """, re.DOTALL | re.VERBOSE) class LogIsFull(Error): """Log is full and the bot cannot add other data to prevent Errors.""" def print_with_time_zone(message) -> None: """Print the messages followed by the TimeZone encoded correctly.""" time_zone = time.strftime('%d %b %Y %H:%M:%S (UTC)', time.gmtime()) pywikibot.output('{} {}'.format(message.rstrip(), time_zone)) class CheckImagesBot: """A robot to check recently uploaded files.""" ignore_save_related_errors = True ignore_server_errors = False def __init__( self, site, log_full_number: int = 25000, sendemail_active: bool = False, duplicates_report: bool = False, log_full_error: bool = True, max_user_notify=None ) -> None: """Initializer, define some instance variables.""" self.site = site self.log_full_error = log_full_error self.log_full_number = log_full_number self.rep_page = i18n.translate(self.site, REPORT_PAGE) if not self.rep_page: raise TranslationError( 'No report page provided in "REPORT_PAGE" dict ' 'for your project!') self.image_namespace = site.namespaces.FILE.custom_name + ':' self.list_entry = '\n* [[:{}%s]] '.format(self.image_namespace) # The summary of the report self.com = i18n.twtranslate(self.site, 'checkimages-log-comment') hiddentemplates_raw = i18n.translate(self.site, HIDDEN_TEMPLATE) if not hiddentemplates_raw: raise TranslationError( 'No non-license templates provided in "HIDDEN_TEMPLATE" dict ' 'for your project!') self.hiddentemplates = { pywikibot.Page(self.site, tmp, ns=self.site.namespaces.TEMPLATE) for tmp in hiddentemplates_raw} self.page_hidden = i18n.translate(self.site, PAGE_WITH_HIDDEN_TEMPLATES) self.page_allowed = i18n.translate(self.site, PAGE_WITH_ALOWED_TEMPLATES) self.comment = i18n.twtranslate(self.site.lang, 'checkimages-source-tag-comment') # Adding the bot's nickname at the notification text if needed. self.bots = i18n.translate(self.site, BOT_LIST) if self.bots: self.bots.append(site.username()) else: self.bots = [site.username()] self.sendemail_active = sendemail_active self.skip_list = [] self.duplicates_report = duplicates_report if max_user_notify: self.num_notify = collections.defaultdict(lambda: max_user_notify) else: self.num_notify = None # Load the licenses only once, so do it once self.list_licenses = self.load_licenses() def set_parameters(self, image) -> None: """Set parameters.""" # ensure we have a FilePage self.image = pywikibot.FilePage(image) self.image_name = image.title(with_ns=False) self.timestamp = None self.uploader = None def report( self, newtext, image_to_report, notification=None, head=None, notification2=None, unver: bool = True, comm_talk=None, comm_image=None ) -> None: """Function to make the reports easier.""" self.image_to_report = image_to_report self.newtext = newtext if not newtext: raise TranslationError( 'No no-license template provided in "N_TXT" dict ' 'for your project!') self.head = head or '' self.notification = notification self.notification2 = notification2 if self.notification: self.notification = re.sub(r'__botnick__', self.site.username(), notification) if self.notification2: self.notification2 = re.sub(r'__botnick__', self.site.username(), notification2) self.comm_talk = comm_talk self.comm_image = comm_image or self.comment image_tagged = False try: image_tagged = self.tag_image(unver) except NoPageError: pywikibot.output('The page has been deleted! Skip!') except EditConflictError: pywikibot.output('Edit conflict! Skip!') if image_tagged and self.notification: try: self.put_mex_in_talk() except EditConflictError: pywikibot.output('Edit Conflict! Retrying...') try: self.put_mex_in_talk() except Exception: pywikibot.exception() pywikibot.output( 'Another error... skipping the user...') def upload_bot_change_function( self, report_page_text, upload_bot_array ) -> str: """Detect the user that has uploaded the file through upload bot.""" regex = upload_bot_array[1] results = re.findall(regex, report_page_text) if results: luser = results[0] return luser # we can't find the user, report the problem to the bot return upload_bot_array[0] def tag_image(self, put: bool = True) -> bool: """Add template to the Image page and find out the uploader.""" # Get the image's description report_page_object = pywikibot.FilePage(self.site, self.image_to_report) try: report_page_text = report_page_object.get() except NoPageError: pywikibot.output(self.image_name + ' has been deleted...') return False # You can use this function also to find only the user that # has upload the image (FixME: Rewrite a bit this part) if put: pywikibot.showDiff(report_page_text, self.newtext + '\n' + report_page_text) pywikibot.output(self.comm_image) try: report_page_object.put(self.newtext + '\n' + report_page_text, summary=self.comm_image) except LockedPageError: pywikibot.output('File is locked. Skipping.') return False # paginetta it's the image page object. try: if report_page_object == self.image and self.uploader: nick = self.uploader else: nick = report_page_object.latest_file_info.user except PageRelatedError: pywikibot.output( 'Seems that {} has only the description and not the file...' .format(self.image_to_report)) repme = self.list_entry + "problems '''with the APIs'''" self.report_image(self.image_to_report, self.rep_page, self.com, repme) return False upload_bots = i18n.translate(self.site, UPLOAD_BOTS) user = pywikibot.User(self.site, nick) luser = user.title(as_url=True) if upload_bots: for upload_bot in upload_bots: if upload_bot[0] == luser: luser = self.upload_bot_change_function(report_page_text, upload_bot) user = pywikibot.User(self.site, luser) self.talk_page = user.getUserTalkPage() self.luser = luser return True def put_mex_in_talk(self) -> None: """Function to put the warning in talk page of the uploader.""" commento2 = i18n.twtranslate(self.site.lang, 'checkimages-source-notice-comment') email_page_name = i18n.translate(self.site, EMAIL_PAGE_WITH_TEXT) email_subj = i18n.translate(self.site, EMAIL_SUBJECT) if self.notification2: self.notification2 %= self.image_to_report else: self.notification2 = self.notification second_text = False # Getting the talk page's history, to check if there is another # advise... try: testoattuale = self.talk_page.get() history = list(self.talk_page.revisions(total=10)) latest_user = history[0]['user'] pywikibot.output( 'The latest user that has written something is: ' + latest_user) # A block to prevent the second message if the bot also # welcomed users... if latest_user in self.bots and len(history) > 1: second_text = True except IsRedirectPageError: pywikibot.output( 'The user talk is a redirect, trying to get the right talk...') try: self.talk_page = self.talk_page.getRedirectTarget() testoattuale = self.talk_page.get() except NoPageError: testoattuale = i18n.translate(self.site, EMPTY) except NoPageError: pywikibot.output('The user page is blank') testoattuale = i18n.translate(self.site, EMPTY) if self.comm_talk: commentox = self.comm_talk else: commentox = commento2 if second_text: new_text = '{}\n\n{}'.format(testoattuale, self.notification2) else: new_text = '{}\n\n== {} ==\n{}'.format(testoattuale, self.head, self.notification) # Check maximum number of notifications for this talk page if (self.num_notify is not None and self.num_notify[self.talk_page.title()] == 0): pywikibot.output('Maximum notifications reached, skip.') return try: self.talk_page.put(new_text, summary=commentox, minor=False) except PageSaveRelatedError as e: if not self.ignore_save_related_errors: raise err = e except ServerError as e: if not self.ignore_server_errors: raise err = e else: if self.num_notify is not None: self.num_notify[self.talk_page.title()] -= 1 err = None if err: pywikibot.exception(err) pywikibot.output('Skipping saving talk page {}' .format(self.talk_page)) if email_page_name and email_subj: email_page = pywikibot.Page(self.site, email_page_name) try: email_text = email_page.get() except (NoPageError, IsRedirectPageError): return if self.sendemail_active: text_to_send = re.sub(r'__user-nickname__', r'{}' .format(self.luser), email_text) email_class = pywikibot.User(self.site, self.luser) try: email_class.send_email(email_subj, text_to_send) except NotEmailableError: pywikibot.output('User is not mailable, aborted') def regex_generator(self, regexp, textrun) -> Generator[pywikibot.FilePage, None, None]: """Find page to yield using regex to parse text.""" regex = re.compile(r'{}'.format(regexp), re.DOTALL) results = regex.findall(textrun) for image in results: yield pywikibot.FilePage(self.site, image) def load_hidden_templates(self) -> None: """Function to load the white templates.""" # A template as {{en is not a license! Adding also them in the # whitelist template... for key in Family.load('wikipedia').langs.keys(): self.hiddentemplates.add(pywikibot.Page( self.site, 'Template:{}'.format(key))) # Hidden template loading if self.page_hidden: try: page_hidden_text = pywikibot.Page(self.site, self.page_hidden).get() except (NoPageError, IsRedirectPageError): page_hidden_text = '' for element in self.load(page_hidden_text): self.hiddentemplates.add(pywikibot.Page(self.site, element)) def important_image(self, list_given) -> pywikibot.FilePage: """ Get tuples of image and time, return the most used or oldest image. :param list_given: a list of tuples which hold seconds and FilePage :type list_given: list :return: the most used or oldest image """ # find the most used image inx_found = None # index of found image max_usage = 0 # hold max amount of using pages for num, element in enumerate(list_given): image = element[1] image_used = len(list(image.usingPages())) if image_used > max_usage: max_usage = image_used inx_found = num if inx_found is not None: return list_given[inx_found][1] # find the oldest image sec, image = max(list_given, key=lambda element: element[0]) return image def check_image_on_commons(self) -> bool: """Checking if the file is on commons.""" pywikibot.output('Checking if [[{}]] is on commons...' .format(self.image_name)) try: hash_found = self.image.latest_file_info.sha1 except NoPageError: return False # Image deleted, no hash found. Skip the image. site = pywikibot.Site('commons', 'commons') commons_image_with_this_hash = next( iter(site.allimages(sha1=hash_found, total=1)), None) if commons_image_with_this_hash: service_template = pywikibot.translate(self.site, SERVICE_TEMPLATES) templates_in_the_image = self.image.templates() if service_template is not None: for template in service_template: if pywikibot.Page(self.site, template) in templates_in_the_image: pywikibot.output( "{} is on commons but it's a service image." .format(self.image_name)) return True # continue with the check-part pywikibot.output(self.image_name + ' is on commons!') if self.image.file_is_shared(): pywikibot.output( "But, the file doesn't exist on your project! Skip...") # We have to skip the check part for that image because # it's on commons but someone has added something on your # project. return False if re.findall(r'\bstemma\b', self.image_name.lower()) and \ self.site.code == 'it': pywikibot.output( "{} has 'stemma' inside, means that it's ok." .format(self.image_name)) return True # It's not only on commons but the image needs a check # the second usually is a url or something like that. # Compare the two in equal way, both url. repme = ((self.list_entry + "is also on '''Commons''': [[commons:File:%s]]") % (self.image_name, commons_image_with_this_hash.title( with_ns=False))) if (self.image.title(as_url=True) == commons_image_with_this_hash.title(as_url=True)): repme += ' (same name)' self.report_image(self.image_name, self.rep_page, self.com, repme, addings=False) return True def check_image_duplicated(self, duplicates_rollback) -> bool: """Function to check the duplicated files.""" dup_text = i18n.translate(self.site, DUPLICATES_TEXT) dup_regex = i18n.translate(self.site, DUPLICATES_REGEX) dup_talk_text = i18n.translate(self.site, DUPLICATES_USER_TALK_TEXT) # Head of the message given to the author dup_talk_head = i18n.twtranslate( self.site, 'checkimages-doubles-head') # Comment while bot reports the problem in the uploader's talk dup_comment_talk = i18n.twtranslate( self.site, 'checkimages-doubles-talk-comment') # Comment used by the bot while it reports the problem in the image dup_comment_image = i18n.twtranslate( self.site, 'checkimages-doubles-file-comment') image_page = pywikibot.FilePage(self.site, self.image_name) hash_found = image_page.latest_file_info.sha1 duplicates = list(self.site.allimages(sha1=hash_found)) if not duplicates: return False # Image deleted, no hash found. Skip the image. if len(duplicates) > 1: xdict = {'en': '%(name)s has {{PLURAL:count' '|a duplicate! Reporting it' '|%(count)s duplicates! Reporting them}}...'} pywikibot.output(i18n.translate('en', xdict, {'name': self.image_name, 'count': len(duplicates) - 1})) if dup_text and dup_regex: time_image_list = [] for dup_page in duplicates: if (dup_page.title(as_url=True) != self.image.title( as_url=True) or self.timestamp is None): try: self.timestamp = ( dup_page.latest_file_info.timestamp) except PageRelatedError: continue data = self.timestamp.timetuple() data_seconds = time.mktime(data) time_image_list.append([data_seconds, dup_page]) older_image_page = self.important_image(time_image_list) older_page_text = older_image_page.text # And if the images are more than two? string = '' images_to_tag_list = [] for dup_page in duplicates: if dup_page == older_image_page: # the most used or oldest image # not report also this as duplicate continue try: dup_page_text = dup_page.text except NoPageError: continue if not (re.findall(dup_regex, dup_page_text) or re.findall(dup_regex, older_page_text)): pywikibot.output( '{} is a duplicate and has to be tagged...' .format(dup_page)) images_to_tag_list.append(dup_page.title()) string += '* {}\n'.format( dup_page.title(as_link=True, textlink=True)) else: pywikibot.output( "Already put the dupe-template in the files's page" " or in the dupe's page. Skip.") return False # Ok - Let's continue the checking phase # true if the image are not to be tagged as dupes only_report = False # put only one image or the whole list according to the request if '__images__' in dup_text: text_for_the_report = dup_text.replace( '__images__', '\n{}* {}\n'.format( string, older_image_page.title( as_link=True, textlink=True))) else: text_for_the_report = dup_text.replace( '__image__', older_image_page.title(as_link=True, textlink=True)) # Two iteration: report the "problem" to the user only once # (the last) if len(images_to_tag_list) > 1: for image_to_tag in images_to_tag_list[:-1]: fp = pywikibot.FilePage(self.site, image_to_tag) already_reported_in_past = fp.revision_count(self.bots) # if you want only one edit, the edit found should be # more than 0 -> num - 1 if already_reported_in_past > duplicates_rollback - 1: only_report = True break # Delete the image in the list where we're write on image = self.image_namespace + image_to_tag text_for_the_report = re.sub( r'\n\*\[\[:{}\]\]'.format(re.escape(image)), '', text_for_the_report) self.report(text_for_the_report, image_to_tag, comm_image=dup_comment_image, unver=True) if images_to_tag_list and not only_report: fp = pywikibot.FilePage(self.site, images_to_tag_list[-1]) already_reported_in_past = fp.revision_count(self.bots) image_title = re.escape(self.image.title(as_url=True)) from_regex = (r'\n\*\[\[:{}{}\]\]' .format(self.image_namespace, image_title)) # Delete the image in the list where we're write on text_for_the_report = re.sub(from_regex, '', text_for_the_report) # if you want only one edit, the edit found should be more # than 0 -> num - 1 if already_reported_in_past > duplicates_rollback - 1 or \ not dup_talk_text: only_report = True else: self.report( text_for_the_report, images_to_tag_list[-1], dup_talk_text % (older_image_page.title(with_ns=True), string), dup_talk_head, comm_talk=dup_comment_talk, comm_image=dup_comment_image, unver=True) if self.duplicates_report or only_report: if only_report: repme = ((self.list_entry + 'has the following duplicates ' "('''forced mode'''):") % self.image.title(as_url=True)) else: repme = ( (self.list_entry + 'has the following duplicates:') % self.image.title(as_url=True)) for dup_page in duplicates: if (dup_page.title(as_url=True) == self.image.title(as_url=True)): # the image itself, not report also this as duplicate continue repme += '\n** [[:{}{}]]'.format( self.image_namespace, dup_page.title(as_url=True)) result = self.report_image(self.image_name, self.rep_page, self.com, repme, addings=False) if not result: return True # If Errors, exit (but continue the check) if older_image_page.title() != self.image_name: # The image is a duplicate, it will be deleted. So skip the # check-part, useless return False return True # Ok - No problem. Let's continue the checking phase def report_image(self, image_to_report, rep_page=None, com=None, rep_text=None, addings: bool = True) -> bool: """Report the files to the report page when needed.""" rep_page = rep_page or self.rep_page com = com or self.com rep_text = rep_text or self.list_entry + '~~~~~' if addings: # Adding the name of the image in the report if not done already rep_text = rep_text % image_to_report another_page = pywikibot.Page(self.site, rep_page) try: text_get = another_page.get() except NoPageError: text_get = '' except IsRedirectPageError: text_get = another_page.getRedirectTarget().get() # Don't care for differences inside brackets. end = rep_text.find('(', max(0, rep_text.find(']]'))) if end < 0: end = None short_text = rep_text[rep_text.find('[['):end].strip() reported = True # Skip if the message is already there. if short_text in text_get: pywikibot.output('{} is already in the report page.' .format(image_to_report)) reported = False elif len(text_get) >= self.log_full_number: if self.log_full_error: raise LogIsFull( 'The log page ({}) is full! Please delete the old files ' 'reported.'.format(another_page.title())) pywikibot.output( 'The log page ({}) is full! Please delete the old files ' ' reported. Skip!'.format(another_page.title())) # Don't report, but continue with the check # (we don't know if this is the first time we check this file # or not) else: # Adding the log another_page.put(text_get + rep_text, summary=com, force=True, minor=False) pywikibot.output('...Reported...') return reported def takesettings(self) -> None: """Function to take the settings from the wiki.""" settings_page = i18n.translate(self.site, PAGE_WITH_SETTINGS) try: if not settings_page: self.settings_data = None else: page = pywikibot.Page(self.site, settings_page) self.settings_data = [] try: testo = page.get() number = 1 for m in SETTINGS_REGEX.finditer(testo): name = str(m.group(1)) find_tipe = str(m.group(2)) find = str(m.group(3)) imagechanges = str(m.group(4)) summary = str(m.group(5)) head = str(m.group(6)) text = str(m.group(7)) mexcatched = str(m.group(8)) tupla = [number, name, find_tipe, find, imagechanges, summary, head, text, mexcatched] self.settings_data += [tupla] number += 1 if not self.settings_data: pywikibot.output( "You've set wrongly your settings, please take a " 'look to the relative page. (run without them)') self.settings_data = None except NoPageError: pywikibot.output("The settings' page doesn't exist!") self.settings_data = None except Error: pywikibot.output( 'Problems with loading the settigs, run without them.') self.settings_data = None self.some_problem = False if not self.settings_data: self.settings_data = None # Real-Time page loaded if self.settings_data: pywikibot.output('>> Loaded the real-time page... <<') else: pywikibot.output('>> No additional settings found! <<') def load_licenses(self) -> List[pywikibot.Page]: """Load the list of the licenses.""" cat_name = i18n.translate(self.site, CATEGORY_WITH_LICENSES) if not cat_name: raise TranslationError( 'No allowed licenses category provided in ' '"CATEGORY_WITH_LICENSES" dict for your project!') pywikibot.output('\nLoading the allowed licenses...\n') cat = pywikibot.Category(self.site, cat_name) list_licenses = list(cat.articles()) if self.site.code == 'commons': no_licenses_to_skip = pywikibot.Category(self.site, 'License-related tags') for license_given in no_licenses_to_skip.articles(): if license_given in list_licenses: list_licenses.remove(license_given) pywikibot.output('') # Add the licenses set in the default page as licenses to check if self.page_allowed: try: page_allowed_text = pywikibot.Page(self.site, self.page_allowed).get() except (NoPageError, IsRedirectPageError): page_allowed_text = '' for name_license in self.load(page_allowed_text): page_license = pywikibot.Page(self.site, name_license) if page_license not in list_licenses: # the list has wiki-pages list_licenses.append(page_license) return list_licenses def mini_template_check(self, template) -> bool: """Check if template is in allowed licenses or in licenses to skip.""" # the list_licenses are loaded in the __init__ # (not to load them multimple times) if template in self.list_licenses: self.license_selected = template.title(with_ns=False) self.seems_ok = True # let the last "fake" license normally detected self.license_found = self.license_selected return True if template in self.hiddentemplates: # if the whitetemplate is not in the images description, we don't # care try: self.all_licenses.remove(template) except ValueError: return False else: self.white_templates_found = True return False def template_in_list(self) -> None: """ Check if template is in list. The problem is the calls to the Mediawiki system because they can be pretty slow. While searching in a list of objects is really fast, so first of all let's see if we can find something in the info that we already have, then make a deeper check. """ for template in self.licenses_found: if self.mini_template_check(template): break if not self.license_found: for template in self.licenses_found: if template.isRedirectPage(): template = template.getRedirectTarget() if self.mini_template_check(template): break def smart_detection(self) -> Tuple[str, bool]: """ Detect templates. The bot instead of checking if there's a simple template in the image's description, checks also if that template is a license or something else. In this sense this type of check is smart. """ self.seems_ok = False self.license_found = None self.white_templates_found = False regex_find_licenses = re.compile( r'(?<!\{)\{\{(?:[Tt]emplate:|)([^{]+?)[|\n<}]', re.DOTALL) regex_are_licenses = re.compile( r'(?<!\{)\{\{(?:[Tt]emplate:|)([^{]+?)\}\}', re.DOTALL) while True: self.load_hidden_templates() self.licenses_found = self.image.templates() templates_in_the_image_raw = regex_find_licenses.findall( self.image_check_text) if not self.licenses_found and templates_in_the_image_raw: # {{nameTemplate|something <- this is not a template, be sure # that we haven't catch something like that. licenses_test = regex_are_licenses.findall( self.image_check_text) if not self.licenses_found and licenses_test: raise Error( "Invalid or broken templates found in the image's " 'page {}!'.format(self.image)) self.all_licenses = [] if not self.list_licenses: raise TranslationError( 'No allowed licenses found in "CATEGORY_WITH_LICENSES" ' 'category for your project!') # Found the templates ONLY in the image's description for template_selected in templates_in_the_image_raw: tp = pywikibot.Page(self.site, template_selected) for template_real in self.licenses_found: if (tp.title(as_url=True, with_ns=False).lower() == template_real.title(as_url=True, with_ns=False).lower()): if template_real not in self.all_licenses: self.all_licenses.append(template_real) break if self.licenses_found: self.template_in_list() if not self.license_found and self.all_licenses: self.all_licenses = [ template.getRedirectTarget() if template.isRedirectPage() else template for template in self.all_licenses if template.exists()] if self.all_licenses: self.license_found = self.all_licenses[0].title() # If it has "some_problem" it must check the additional settings. self.some_problem = False if self.settings_data: # use additional settings self.find_additional_problems() if self.some_problem: if self.mex_used in self.image_check_text: pywikibot.output('File already fixed. Skipping.') else: pywikibot.output( "The file's description for {} contains {}..." .format(self.image_name, self.name_used)) if self.mex_used.lower() == 'default': self.mex_used = self.unvertext if self.imagestatus_used: reported = True else: reported = self.report_image(self.image_name) if reported: self.report(self.mex_used, self.image_name, self.text_used, self.head_used, None, self.imagestatus_used, self.summary_used) else: pywikibot.output('Skipping the file...') self.some_problem = False else: if not self.seems_ok and self.license_found: rep_text_license_fake = ((self.list_entry + "seems to have a ''fake license''," ' license detected:' ' <nowiki>%s</nowiki>') % (self.image_name, self.license_found)) print_with_time_zone( '{} seems to have a fake license: {}, reporting...' .format(self.image_name, self.license_found)) self.report_image(self.image_name, rep_text=rep_text_license_fake, addings=False) elif self.license_found: pywikibot.output('[[%s]] seems ok, license found: {{%s}}...' % (self.image_name, self.license_found)) return (self.license_found, self.white_templates_found) def load(self, raw) -> List[str]: """Load a list of objects from a string using regex.""" list_loaded = [] # I search with a regex how many user have not the talk page # and i put them in a list (i find it more easy and secure) regl = r"(\"|\')(.*?)\1(?:,|\])" pl = re.compile(regl) for xl in pl.finditer(raw): word = xl.group(2).replace('\\\\', '\\') if word not in list_loaded: list_loaded.append(word) return list_loaded def skip_images(self, skip_number, limit) -> bool: """Given a number of files, skip the first -number- files.""" # If the images to skip are more the images to check, make them the # same number if skip_number == 0: pywikibot.output('\t\t>> No files to skip...<<') return False skip_number = min(skip_number, limit) # Print a starting message only if no images has been skipped if not self.skip_list: pywikibot.output( i18n.translate( 'en', 'Skipping the first {{PLURAL:num|file|%(num)s files}}:\n', {'num': skip_number})) # If we still have pages to skip: if len(self.skip_list) < skip_number: pywikibot.output('Skipping {}...'.format(self.image_name)) self.skip_list.append(self.image_name) if skip_number == 1: pywikibot.output('') return True pywikibot.output('') return False @staticmethod def wait(generator, wait_time) -> Generator[pywikibot.FilePage, None, None]: """ Skip the images uploaded before x seconds. Let the users to fix the image's problem alone in the first x seconds. """ print_with_time_zone( 'Skipping the files uploaded less than {} seconds ago..' .format(wait_time)) for page in generator: image = pywikibot.FilePage(page) try: timestamp = image.latest_file_info.timestamp except PageRelatedError: continue now = pywikibot.Timestamp.utcnow() delta = now - timestamp if delta.total_seconds() > wait_time: yield image else: pywikibot.warning( 'Skipping {}, uploaded {} {} ago..' .format(image.title(), delta.days, 'days') if delta.days > 0 else (image.title(), delta.seconds, 'seconds')) def is_tagged(self) -> bool: """Understand if a file is already tagged or not.""" # TODO: enhance and use textlib.MultiTemplateMatchBuilder # Is the image already tagged? If yes, no need to double-check, skip no_license = i18n.translate(self.site, TXT_FIND) if not no_license: raise TranslationError( 'No no-license templates provided in "TXT_FIND" dict ' 'for your project!') for i in no_license: # If there are {{ use regex, otherwise no (if there's not the # {{ may not be a template and the regex will be wrong) if '{{' in i: regex_pattern = re.compile( r'\{\{(?:template)?%s ?(?:\||\r?\n|\}|<|/) ?' % i.split('{{')[1].replace(' ', '[ _]'), re.I) result = regex_pattern.findall(self.image_check_text) if result: return True elif i.lower() in self.image_check_text: return True return False def find_additional_problems(self) -> None: """Extract additional settings from configuration page.""" # In every tuple there's a setting configuration for tupla in self.settings_data: name = tupla[1] find_tipe = tupla[2] find = tupla[3] find_list = self.load(find) imagechanges = tupla[4] if imagechanges.lower() == 'false': imagestatus = False elif imagechanges.lower() == 'true': imagestatus = True else: pywikibot.error('Imagechanges set wrongly!') self.settings_data = None break summary = tupla[5] head_2 = tupla[6] if head_2.count('==') == 2: head_2 = re.findall(r'\s*== *(.+?) *==\s*', head_2)[0] text = tupla[7] % self.image_name mex_catched = tupla[8] for k in find_list: if find_tipe.lower() == 'findonly': search_results = re.findall(r'{}'.format(k.lower()), self.image_check_text.lower()) if search_results: if search_results[0] == self.image_check_text.lower(): self.some_problem = True self.text_used = text self.head_used = head_2 self.imagestatus_used = imagestatus self.name_used = name self.summary_used = summary self.mex_used = mex_catched break elif find_tipe.lower() == 'find': if re.findall(r'{}'.format(k.lower()), self.image_check_text.lower()): self.some_problem = True self.text_used = text self.head_used = head_2 self.imagestatus_used = imagestatus self.name_used = name self.summary_used = summary self.mex_used = mex_catched continue def check_step(self) -> None: """Check a single file page.""" # something = Minimal requirements for an image description. # If this fits, no tagging will take place # (if there aren't other issues) # MIT license is ok on italian wikipedia, let also this here # Don't put "}}" here, please. Useless and can give problems. something = ['{{'] # Allowed extensions try: allowed_formats = self.site.siteinfo.get( 'fileextensions', get_default=False) except KeyError: allowed_formats = [] else: allowed_formats = [item['ext'].lower() for item in allowed_formats] brackets = False delete = False notification = None # get the extension from the image's name extension = self.image_name.split('.')[-1] # Load the notification messages hidden_template_notification = i18n.translate( self.site, HIDDEN_TEMPALTE_NOTIFICATION) self.unvertext = i18n.translate(self.site, N_TXT) di = i18n.translate(self.site, DELETE_IMMEDIATELY) # The header of the Unknown extension's message. dih = i18n.twtranslate(self.site, 'checkimages-unknown-extension-head') # Text that will be add if the bot find a unknown extension. din = i18n.twtranslate(self.site, 'checkimages-unknown-extension-msg') + ' ~~~~' # Header that the bot will add if the image hasn't the license. nh = i18n.twtranslate(self.site, 'checkimages-no-license-head') # Summary of the delete immediately. dels = i18n.twtranslate(self.site, 'checkimages-deletion-comment') nn = i18n.translate(self.site, NOTHING_NOTIFICATION) smwl = i18n.translate(self.site, SECOND_MESSAGE_WITHOUT_LICENSE) try: self.image_check_text = self.image.get() except NoPageError: pywikibot.output('Skipping {} because it has been deleted.' .format(self.image_name)) return except IsRedirectPageError: pywikibot.output("Skipping {} because it's a redirect." .format(self.image_name)) return # Delete the fields where the templates cannot be loaded regex_nowiki = re.compile(r'<nowiki>(.*?)</nowiki>', re.DOTALL) regex_pre = re.compile(r'<pre>(.*?)</pre>', re.DOTALL) self.image_check_text = regex_nowiki.sub('', self.image_check_text) self.image_check_text = regex_pre.sub('', self.image_check_text) # Deleting the useless template from the description (before adding # sth in the image the original text will be reloaded, don't worry). if self.is_tagged(): print_with_time_zone('{} is already tagged.' .format(self.image_name)) return # something is the array with {{, MIT License and so on. for a_word in something: if a_word in self.image_check_text: # There's a template, probably a license brackets = True # Is the extension allowed? (is it an image or f.e. a .xls file?) if allowed_formats and extension.lower() not in allowed_formats: delete = True (license_found, hidden_template_found) = self.smart_detection() # Here begins the check block. if brackets and license_found: return if delete: pywikibot.output('{} is not a file!'.format(self.image_name)) if not di: pywikibot.output('No localized message given for ' "'DELETE_IMMEDIATELY'. Skipping.") return # Some formatting for delete immediately template dels = dels % {'adding': di} di = '\n' + di # Modify summary text config.default_edit_summary = dels canctext = di % extension notification = din % {'file': self.image.title(as_link=True, textlink=True)} head = dih self.report(canctext, self.image_name, notification, head) return if not self.image_check_text.strip(): # empty image description pywikibot.output( "The file's description for {} does not contain a license " ' template!'.format(self.image_name)) if hidden_template_found and hidden_template_notification: notification = hidden_template_notification % self.image_name elif nn: notification = nn % self.image_name head = nh self.report(self.unvertext, self.image_name, notification, head, smwl) return pywikibot.output('{} has only text and not the specific ' 'license...'.format(self.image_name)) if hidden_template_found and hidden_template_notification: notification = hidden_template_notification % self.image_name elif nn: notification = nn % self.image_name head = nh self.report(self.unvertext, self.image_name, notification, head, smwl) def main(*args: str) -> bool: """ Process command line arguments and invoke bot. If args is an empty list, sys.argv is used. :param args: command line arguments """ # Command line configurable parameters repeat = True # Restart after having check all the images? limit = 80 # How many images check? time_sleep = 30 # How many time sleep after the check? skip_number = 0 # How many images to skip before checking? wait_time = 0 # How many time sleep before the check? commons_active = False # Is there an image with the same name at commons? normal = False # Check the new images or use another generator? url_used = False # Use the url-related function instead of the new-pages regex_gen = False # Use the regex generator duplicates_active = False # Use the duplicate option duplicates_report = False # Use the duplicate-report option max_user_notify = None sendemail_active = False # Use the send-email log_full_error = True # Raise an error when the log is full generator = None unknown = [] # unknown parameters local_args = pywikibot.handle_args(args) site = pywikibot.Site() # Here below there are the local parameters. for arg in local_args: option, _, value = arg.partition(':') if option == '-limit': limit = int(value or pywikibot.input( 'How many files do you want to check?')) elif option == '-sleep': time_sleep = int(value or pywikibot.input( 'How many seconds do you want runs to be apart?')) elif option == '-break': repeat = False elif option == '-nologerror': log_full_error = False elif option == '-commons': commons_active = True elif option == '-duplicatesreport': duplicates_report = True elif option == '-duplicates': duplicates_active = True duplicates_rollback = int(value or 1) elif option == '-maxusernotify': max_user_notify = int(value or pywikibot.input( 'What should be the maximum number of notifications per user ' 'per check?')) elif option == '-sendemail': sendemail_active = True elif option == '-skip': skip_number = int(value or pywikibot.input( 'How many files do you want to skip?')) elif option == '-wait': wait_time = int(value or pywikibot.input( 'How many time do you want to wait before checking the ' 'files?')) elif option == '-start': first_page_title = value or pywikibot.input( 'From which page do you want to start?') namespaces = tuple( ns + ':' for ns in site.namespace(Namespace.FILE, True)) if first_page_title.startswith(namespaces): first_page_title = first_page_title.split(':', 1)[1] generator = site.allimages(start=first_page_title) repeat = False elif option == '-page': regex_page_name = value or pywikibot.input( 'Which page do you want to use for the regex?') repeat = False regex_gen = True elif option == '-url': regex_page_url = value or pywikibot.input( 'Which url do you want to use for the regex?') url_used = True repeat = False regex_gen = True elif option == '-regex': regexp_to_use = value or pywikibot.input( 'Which regex do you want to use?') generator = 'regex' repeat = False elif option == '-cat': cat_name = value or pywikibot.input('In which category do I work?') cat = pywikibot.Category(site, 'Category:' + cat_name) generator = cat.articles(namespaces=[6]) repeat = False elif option == '-ref': ref_name = value or pywikibot.input( 'The references of what page should I parse?') ref = pywikibot.Page(site, ref_name) generator = ref.getReferences(namespaces=[6]) repeat = False else: unknown.append(arg) if not generator: normal = True # Ensure that the bot is localized and right command args are given if site.code not in PROJECT_INSERTED: additional_text = ('Your project is not supported by this script.\n' 'To allow your project in the script you have to ' 'add a localization into the script and add your ' 'project to the "PROJECT_INSERTED" list!') else: additional_text = '' if suggest_help(unknown_parameters=unknown, additional_text=additional_text): return False # Reading the log of the new images if another generator is not given. if normal: if limit == 1: pywikibot.output('Retrieving the latest file for checking...') else: pywikibot.output('Retrieving the latest {} files for checking...' .format(limit)) while True: # Defing the Main Class. bot = CheckImagesBot(site, sendemail_active=sendemail_active, duplicates_report=duplicates_report, log_full_error=log_full_error, max_user_notify=max_user_notify) if normal: generator = pg.NewimagesPageGenerator(total=limit, site=site) # if url_used and regex_gen, get the source for the generator if url_used and regex_gen: text_regex = site.getUrl(regex_page_url, no_hostname=True) # Not an url but a wiki page as "source" for the regex elif regex_gen: page = pywikibot.Page(site, regex_page_name) try: text_regex = page.get() except NoPageError: pywikibot.output("{} doesn't exist!".format(page.title())) text_regex = '' # No source, so the bot will quit later. # If generator is the regex' one, use your own Generator using an url # or page and a regex. if generator == 'regex' and regex_gen: generator = bot.regex_generator(regexp_to_use, text_regex) bot.takesettings() if wait_time > 0: generator = bot.wait(generator, wait_time) for image in generator: # Setting the image for the main class bot.set_parameters(image) if skip_number and bot.skip_images(skip_number, limit): continue # Check on commons if there's already an image with the same name if commons_active and site.family.name != 'commons': if not bot.check_image_on_commons(): continue # Check if there are duplicates of the image on the project if duplicates_active: if not bot.check_image_duplicated(duplicates_rollback): continue bot.check_step() if repeat: pywikibot.output('Waiting for {} seconds,'.format(time_sleep)) pywikibot.sleep(time_sleep) else: break return True if __name__ == '__main__': start = time.time() ret = False try: ret = main() except KeyboardInterrupt: ret = True finally: if ret is not False: final = time.time() delta = int(final - start) pywikibot.output('Execution time: {} seconds\n'.format(delta))
wikimedia/pywikibot-core
scripts/checkimages.py
Python
mit
76,106
from django.db import connection from django.http import HttpResponse from django.shortcuts import get_object_or_404 from rest_framework.generics import RetrieveAPIView from jarbas.core.models import Company from jarbas.core.serializers import CompanySerializer from jarbas.chamber_of_deputies.serializers import format_cnpj class CompanyDetailView(RetrieveAPIView): lookup_field = 'cnpj' queryset = Company.objects.all() serializer_class = CompanySerializer def get_object(self): cnpj = self.kwargs.get(self.lookup_field, '00000000000000') return get_object_or_404(Company, cnpj=format_cnpj(cnpj)) def healthcheck(request): """A simple view to run a health check in Django and in the database""" with connection.cursor() as cursor: cursor.execute('SELECT 1') cursor.fetchone() return HttpResponse()
datasciencebr/serenata-de-amor
jarbas/core/views.py
Python
mit
867
#!/usr/bin/env python3 """ Created on 17 Sep 2019 @author: Bruno Beloff ([email protected]) """ import time from scs_core.sys.timeout import Timeout # -------------------------------------------------------------------------------------------------------------------- # run... timeout = Timeout(5) print(timeout) print("-") try: with timeout: time.sleep(10) print("slept") except TimeoutError: print("TimeoutError") finally: print("done")
south-coast-science/scs_core
tests/sys/timeout_test.py
Python
mit
494
class Solution(object): def searchInsert(self, nums, target): """ :type nums: List[int] :type target: int :rtype: int O(logn) """ low = 0 high = len(nums) - 1 if target <= nums[low]: return low if target > nums[high]: return high+1 while low < high: mid = (low + high) // 2 # print low, high, mid if nums[mid] < target <= nums[mid+1]: return mid+1 if nums[mid] >= target: high = mid else: low = mid def searchInsert1(self, nums, target): """ :type nums: List[int] :type target: int :rtype: int 60ms O(n) """ index = 0 for num in nums: if num < target: index += 1 return index if __name__ == '__main__': target = 7 nums = [1,3,5,6] # nums = [1] print Solution().searchInsert(nums,target)
comicxmz001/LeetCode
Python/35. Search Insert Position.py
Python
mit
973