repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
skython/eXe | twisted/internet/iocpreactor/process.py | 14 | 15266 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Support for IReactorProcess for the IOCP proactor.
API Stability: unstable
Maintainer: U{Justin Johnson<mailto:[email protected]>}
This code is potentially unstable. I have performed numerous tests
but couldn't get someone who was knowledgable of win32 to review it.
If you run into problems please submit a bug report to
http://twistedmatrix.com/bugs.
"""
# Win32 imports
import win32api
import win32gui
import win32con
import win32file
import win32pipe
import win32process
import win32security
from win32event import CreateEvent, SetEvent, WaitForSingleObject
from win32event import MsgWaitForMultipleObjects, WAIT_OBJECT_0
from win32event import WAIT_TIMEOUT, INFINITE, QS_ALLINPUT, QS_POSTMESSAGE
from win32event import QS_ALLEVENTS
# Zope & Twisted imports
from zope.interface import implements
from twisted.internet import error
from twisted.python import failure, components
from twisted.python.win32 import cmdLineQuote
from twisted.internet.interfaces import IProcessTransport, IConsumer
# sibling imports
import ops
import process_waiter
# System imports
import os
import sys
import time
import itertools
# Counter for uniquely identifying pipes
counter = itertools.count(1)
class Process(object):
"""A process that integrates with the Twisted event loop.
See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/dllproc/base/creating_a_child_process_with_redirected_input_and_output.asp
for more info on how to create processes in Windows and access their
stdout/err/in. Another good source is http://www.informit.com/articles/article.asp?p=362660&seqNum=2.
Issues:
If your subprocess is a python program, you need to:
- Run python.exe with the '-u' command line option - this turns on
unbuffered I/O. Buffering stdout/err/in can cause problems, see e.g.
http://support.microsoft.com/default.aspx?scid=kb;EN-US;q1903
- (is this still true?) If you don't want Windows messing with data passed over
stdin/out/err, set the pipes to be in binary mode::
import os, sys, mscvrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
"""
implements(IProcessTransport, IConsumer)
# I used this size because abstract.ConnectedSocket did. I don't
# know why though.
bufferSize = 2**2**2**2
# Per http://www-128.ibm.com/developerworks/linux/library/l-rt4/,
# an extra 24 bytes are needed to handle write header. I haven't seen
# any problems not having the extra 24 bytes though, so I'm not
# adding it to the size. I comment here just in case it is discovered
# to be necessary down the road.
pipeBufferSize = bufferSize
def __init__(self, reactor, protocol, command, args, environment, path):
self.reactor = reactor
self.protocol = protocol
self.outBuffer = reactor.AllocateReadBuffer(self.bufferSize)
self.errBuffer = reactor.AllocateReadBuffer(self.bufferSize)
# This is the buffer for *reading* stdin, which is only done to
# determine if the other end of the pipe was closed.
self.inBuffer = reactor.AllocateReadBuffer(self.bufferSize)
# IO operation classes
self.readOutOp = ops.ReadOutOp(self)
self.readErrOp = ops.ReadErrOp(self)
self.readInOp = ops.ReadInOp(self)
self.writeInOp = ops.WriteInOp(self)
self.writeBuffer = ""
self.writing = False
self.finished = False
self.offset = 0
self.writeBufferedSize = 0
self.closingStdin = False
self.closedStdin = False
self.closedStdout = False
self.closedStderr = False
# Stdio handles
self.hChildStdinRd = None
self.hChildStdinWr = None
self.hChildStdinWrDup = None
self.hChildStdoutRd = None
self.hChildStdoutWr = None
self.hChildStdoutRdDup = None
self.hChildStderrRd = None
self.hChildStderrWr = None
self.hChildStderrRdDup = None
self.closedNotifies = 0 # increments to 3 (for stdin, stdout, stderr)
self.closed = False # set to true when all 3 handles close
self.exited = False # set to true when WFMO thread gets signalled proc handle. See doWaitForProcessExit.
# Set the bInheritHandle flag so pipe handles are inherited.
saAttr = win32security.SECURITY_ATTRIBUTES()
saAttr.bInheritHandle = 1
currentPid = win32api.GetCurrentProcess() # -1 which stands for current process
self.pid = os.getpid() # unique pid for pipe naming
# Create a pipe for the child process's STDOUT.
self.stdoutPipeName = r"\\.\pipe\twisted-iocp-stdout-%d-%d-%d" % (self.pid, counter.next(), time.time())
self.hChildStdoutRd = win32pipe.CreateNamedPipe(
self.stdoutPipeName,
win32con.PIPE_ACCESS_INBOUND | win32con.FILE_FLAG_OVERLAPPED, # open mode
win32con.PIPE_TYPE_BYTE, # pipe mode
1, # max instances
self.pipeBufferSize, # out buffer size
self.pipeBufferSize, # in buffer size
0, # timeout
saAttr)
self.hChildStdoutWr = win32file.CreateFile(
self.stdoutPipeName,
win32con.GENERIC_WRITE,
win32con.FILE_SHARE_READ|win32con.FILE_SHARE_WRITE,
saAttr,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_OVERLAPPED,
0);
# Create noninheritable read handle and close the inheritable read
# handle.
self.hChildStdoutRdDup = win32api.DuplicateHandle(
currentPid, self.hChildStdoutRd,
currentPid, 0,
0,
win32con.DUPLICATE_SAME_ACCESS)
win32api.CloseHandle(self.hChildStdoutRd);
self.hChildStdoutRd = self.hChildStdoutRdDup
# Create a pipe for the child process's STDERR.
self.stderrPipeName = r"\\.\pipe\twisted-iocp-stderr-%d-%d-%d" % (self.pid, counter.next(), time.time())
self.hChildStderrRd = win32pipe.CreateNamedPipe(
self.stderrPipeName,
win32con.PIPE_ACCESS_INBOUND | win32con.FILE_FLAG_OVERLAPPED, # open mode
win32con.PIPE_TYPE_BYTE, # pipe mode
1, # max instances
self.pipeBufferSize, # out buffer size
self.pipeBufferSize, # in buffer size
0, # timeout
saAttr)
self.hChildStderrWr = win32file.CreateFile(
self.stderrPipeName,
win32con.GENERIC_WRITE,
win32con.FILE_SHARE_READ|win32con.FILE_SHARE_WRITE,
saAttr,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_OVERLAPPED,
0);
# Create noninheritable read handle and close the inheritable read
# handle.
self.hChildStderrRdDup = win32api.DuplicateHandle(
currentPid, self.hChildStderrRd,
currentPid, 0,
0,
win32con.DUPLICATE_SAME_ACCESS)
win32api.CloseHandle(self.hChildStderrRd)
self.hChildStderrRd = self.hChildStderrRdDup
# Create a pipe for the child process's STDIN. This one is opened
# in duplex mode so we can read from it too in order to detect when
# the child closes their end of the pipe.
self.stdinPipeName = r"\\.\pipe\twisted-iocp-stdin-%d-%d-%d" % (self.pid, counter.next(), time.time())
self.hChildStdinWr = win32pipe.CreateNamedPipe(
self.stdinPipeName,
win32con.PIPE_ACCESS_DUPLEX | win32con.FILE_FLAG_OVERLAPPED, # open mode
win32con.PIPE_TYPE_BYTE, # pipe mode
1, # max instances
self.pipeBufferSize, # out buffer size
self.pipeBufferSize, # in buffer size
0, # timeout
saAttr)
self.hChildStdinRd = win32file.CreateFile(
self.stdinPipeName,
win32con.GENERIC_READ,
win32con.FILE_SHARE_READ|win32con.FILE_SHARE_WRITE,
saAttr,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_OVERLAPPED,
0);
# Duplicate the write handle to the pipe so it is not inherited.
self.hChildStdinWrDup = win32api.DuplicateHandle(
currentPid, self.hChildStdinWr,
currentPid, 0,
0,
win32con.DUPLICATE_SAME_ACCESS)
win32api.CloseHandle(self.hChildStdinWr)
self.hChildStdinWr = self.hChildStdinWrDup
# set the info structure for the new process. This is where
# we tell the process to use the pipes for stdout/err/in.
StartupInfo = win32process.STARTUPINFO()
StartupInfo.hStdOutput = self.hChildStdoutWr
StartupInfo.hStdError = self.hChildStderrWr
StartupInfo.hStdInput = self.hChildStdinRd
StartupInfo.dwFlags = win32process.STARTF_USESTDHANDLES
# create the process
cmdline = ' '.join([cmdLineQuote(a) for a in args])
self.hProcess, hThread, dwPid, dwTid = win32process.CreateProcess(
command, # name
cmdline, # command line
None, # process security attributes
None, # primary thread security attributes
1, # handles are inherited
0, # creation flags
environment, # if NULL, use parent environment
path, # current directory
StartupInfo) # STARTUPINFO pointer
# close handles which only the child will use
win32file.CloseHandle(self.hChildStderrWr)
win32file.CloseHandle(self.hChildStdoutWr)
win32file.CloseHandle(self.hChildStdinRd)
# Begin reading on stdout and stderr, before we have output on them.
self.readOutOp.initiateOp(self.hChildStdoutRd, self.outBuffer)
self.readErrOp.initiateOp(self.hChildStderrRd, self.errBuffer)
# Read stdin which was opened in duplex mode so we can detect when
# the child closed their end of the pipe.
self.readInOp.initiateOp(self.hChildStdinWr, self.inBuffer)
# When the process is done, call connectionLost().
# This function returns right away. Note I call this after
# protocol.makeConnection to ensure that the protocol doesn't
# have processEnded called before protocol.makeConnection.
self.reactor.processWaiter.beginWait(self.reactor, self.hProcess, self)
# notify protocol by calling protocol.makeConnection and specifying
# ourself as the transport.
self.protocol.makeConnection(self)
def signalProcess(self, signalID):
if signalID in ("INT", "TERM", "KILL"):
win32process.TerminateProcess(self.hProcess, 1)
def startWriting(self):
if not self.writing:
self.writing = True
b = buffer(self.writeBuffer, self.offset, self.offset + self.bufferSize)
self.writeInOp.initiateOp(self.hChildStdinWr, b)
def stopWriting(self):
self.writing = False
def writeDone(self, bytes):
self.writing = False
self.offset += bytes
self.writeBufferedSize -= bytes
if self.offset == len(self.writeBuffer):
self.writeBuffer = ""
self.offset = 0
if self.writeBuffer == "":
self.writing = False
# If there's nothing else to write and we're closing,
# do it now.
if self.closingStdin:
self._closeStdin()
self.connectionLostNotify()
else:
self.startWriting()
def write(self, data):
"""Write data to the process' stdin."""
self.writeBuffer += data
self.writeBufferedSize += len(data)
if not self.writing:
self.startWriting()
def writeSequence(self, seq):
"""Write a list of strings to the physical connection.
If possible, make sure that all of the data is written to
the socket at once, without first copying it all into a
single string.
"""
self.write("".join(seq))
def closeStdin(self):
"""Close the process' stdin."""
if not self.closingStdin:
self.closingStdin = True
if not self.writing:
self._closeStdin()
self.connectionLostNotify()
def _closeStdin(self):
if hasattr(self, "hChildStdinWr"):
win32file.CloseHandle(self.hChildStdinWr)
del self.hChildStdinWr
self.closingStdin = False
self.closedStdin = True
def closeStderr(self):
if hasattr(self, "hChildStderrRd"):
win32file.CloseHandle(self.hChildStderrRd)
del self.hChildStderrRd
self.closedStderr = True
self.connectionLostNotify()
def closeStdout(self):
if hasattr(self, "hChildStdoutRd"):
win32file.CloseHandle(self.hChildStdoutRd)
del self.hChildStdoutRd
self.closedStdout = True
self.connectionLostNotify()
def loseConnection(self):
"""Close the process' stdout, in and err."""
self.closeStdin()
self.closeStdout()
self.closeStderr()
def outConnectionLost(self):
self.closeStdout() # in case process closed it, not us
self.protocol.outConnectionLost()
def errConnectionLost(self):
self.closeStderr() # in case process closed it
self.protocol.errConnectionLost()
def inConnectionLost(self):
self._closeStdin()
self.protocol.inConnectionLost()
self.connectionLostNotify()
def connectionLostNotify(self):
"""Will be called 3 times, for stdout/err/in."""
self.closedNotifies = self.closedNotifies + 1
if self.closedNotifies == 3:
self.closed = 1
if self.exited:
self.connectionLost()
def processEnded(self):
self.exited = True
# If all 3 stdio handles are closed, call connectionLost
if self.closed:
self.connectionLost()
def connectionLost(self, reason=None):
"""Shut down resources."""
# Get the exit status and notify the protocol
exitCode = win32process.GetExitCodeProcess(self.hProcess)
if exitCode == 0:
err = error.ProcessDone(exitCode)
else:
err = error.ProcessTerminated(exitCode)
self.protocol.processEnded(failure.Failure(err))
## IConsumer
def registerProducer(self, producer, streaming):
pass
def unregisterProducer(self):
pass
components.backwardsCompatImplements(Process)
| gpl-2.0 |
endlessm/chromium-browser | third_party/chromite/cli/cros/cros_deploy_unittest.py | 1 | 3712 | # -*- coding: utf-8 -*-
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module tests the cros deploy command."""
from __future__ import print_function
import sys
from chromite.cli import command_unittest
from chromite.cli import deploy
from chromite.cli.cros import cros_deploy
from chromite.lib import commandline
from chromite.lib import cros_test_lib
from chromite.lib import remote_access
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
# pylint: disable=protected-access
class MockDeployCommand(command_unittest.MockCommand):
"""Mock out the deploy command."""
TARGET = 'chromite.cli.cros.cros_deploy.DeployCommand'
TARGET_CLASS = cros_deploy.DeployCommand
COMMAND = 'deploy'
def __init__(self, *args, **kwargs):
command_unittest.MockCommand.__init__(self, *args, **kwargs)
def Run(self, inst):
command_unittest.MockCommand.Run(self, inst)
class CrosDeployTest(cros_test_lib.MockTempDirTestCase,
cros_test_lib.OutputTestCase):
"""Test calling `cros deploy` with various arguments.
These tests just check that arguments as specified on the command
line are properly passed through to deploy. Testing the
actual update flow should be done in the deploy unit tests.
"""
DEVICE = remote_access.TEST_IP
PACKAGES = ['foo', 'bar']
def SetupCommandMock(self, cmd_args):
"""Setup comand mock."""
self.cmd_mock = MockDeployCommand(
cmd_args, base_args=['--cache-dir', self.tempdir])
self.StartPatcher(self.cmd_mock)
def setUp(self):
"""Patches objects."""
self.cmd_mock = None
self.deploy_mock = self.PatchObject(deploy, 'Deploy', autospec=True)
self.run_inside_chroot_mock = self.PatchObject(
commandline, 'RunInsideChroot', autospec=True)
def VerifyDeployParameters(self, device, packages, **kwargs):
"""Verifies the arguments passed to Deployer.Run().
This function helps verify that command line specifications are
parsed properly.
Args:
device: expected device hostname.
packages: expected packages list.
kwargs: keyword arguments expected in the call to Deployer.Run().
Arguments unspecified here are checked against their default
value for `cros deploy`.
"""
deploy_args, deploy_kwargs = self.deploy_mock.call_args
self.assertEqual(device, deploy_args[0].hostname)
self.assertListEqual(packages, deploy_args[1])
# `cros deploy` default options. Must match AddParser().
expected_kwargs = {
'board': None,
'strip': True,
'emerge': True,
'root': '/',
'clean_binpkg': True,
'emerge_args': None,
'ssh_private_key': None,
'ping': True,
'dry_run': False,
'force': False,
'update': False,
'deep': False,
'deep_rev': False}
# Overwrite defaults with any variations in this test.
expected_kwargs.update(kwargs)
self.assertDictEqual(expected_kwargs, deploy_kwargs)
def testDefaults(self):
"""Tests `cros deploy` default values."""
self.SetupCommandMock([self.DEVICE] + self.PACKAGES)
self.cmd_mock.inst.Run()
self.assertTrue(self.run_inside_chroot_mock.called)
self.VerifyDeployParameters(self.DEVICE, self.PACKAGES)
def testDeployError(self):
"""Tests that DeployErrors are passed through."""
with self.OutputCapturer():
self.SetupCommandMock([self.DEVICE] + self.PACKAGES)
self.deploy_mock.side_effect = deploy.DeployError
with self.assertRaises(deploy.DeployError):
self.cmd_mock.inst.Run()
| bsd-3-clause |
msteinhoff/foption-bot | src/python/interaction/irc/commands.py | 1 | 42748 | # -*- coding: UTF-8 -*-
"""
$Id$
$URL$
Copyright (c) 2010 foption
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@since Jan 14, 2011
@author Mario Steinhoff
"""
__version__ = '$Rev$'
list = [
'Nick', 'User', 'Mode', 'Quit',
'Join', 'Part', 'Topic', 'Names', 'Invite', 'Kick',
'Privmsg', 'Notice',
'Motd', 'Who', 'Whois',
'Ping', 'Pong',
'WelcomeReply', 'YourHostReply', 'CreatedReply', 'MyInfoReply',
'BounceReply',
'MotdStartReply', 'MotdReply', 'MotdEndReply',
'AwayReply', 'UniqueOpIsReply', 'ChannelModeIsReply', 'InvitingReply',
'TopicReply', 'NoTopicReply',
'WhoisUserReply', 'WhoisServerReply', 'WhoisOperatorReply',
'WhoisIdleReply', 'WhoisChannelsReply', 'quakenet.WhoisAuthReply',
'WhoisEndReply',
'WhoReply', 'WhoEndReply',
'NamesReply', 'NamesEndReply',
'BanListReply', 'BanListEndReply',
'InviteListReply', 'InviteListEndReply',
'ExceptListReply', 'ExceptListEndReply'
'NoSuchServerError', 'TooManyTargetsError', 'NoOriginError',
'NoRecipientError', 'NoTextToSendError', 'NoToplevelError',
'WildTopLevelError', 'NoMotdError', 'UnavailableResourceError',
'NeedMoreParamsError', 'AlreadyRegisteredError', 'UnknownModeError',
'RestrictedError', 'UsersDontMachtError',
'NoSuchNickError', 'NoNicknameGivenError', 'ErroneusNicknameError',
'NicknameInUseError', 'NickCollisionError',
'NoSuchChannelError', 'KeySetError', 'ChannelIsFullError',
'InviteOnlyChannelError', 'BannedFromChannelError', 'BadChannelKeyError',
'BadChannelMaskError', 'NoChannelModesError', 'CannotSendToChannelError',
'TooManyChannelsError', 'UserNotInChannelError', 'NotOnChannelError',
'UserOnChannelError', 'ChanOpPrivilegesNeededError',
]
import string
import random
from core.bot import BotError
from interaction.irc.message import Event
#-------------------------------------------------------------------------------
# Exceptions
#-------------------------------------------------------------------------------
class CommandError(BotError): pass
class MissingArgumentError(CommandError): pass
#-------------------------------------------------------------------------------
# Business Logic
#-------------------------------------------------------------------------------
class Command(object):
"""
High-level API for IRC commands.
"""
def __init__(self, client):
self.client = client
def get_receiver(self):
return self.Receiver(self.client)
def get_sender(self):
return self.Sender(self.client)
class Receiver(object):
"""
IRC command receiver.
Respond to incoming IRC events and dispatch them to all
registered listeners.
"""
def __init__(self, client):
"""
Initialize the receiver object.
@param client: The IRC client instance.
"""
self._listener = []
self.client = client
def add_listener(self, callback):
"""
Add a listener to the receiver instance.
The callback function is called everytime a receive event
occured.
@param callback: A pointer to the callback function.
"""
self._listener.append(callback)
def receive(self, event):
"""
Push a receive event to the command handler.
This will first call the internal command logic and then notice
additional listeners about the event. The event itself can be
modified at any time, altough this is not encouraged.
@param event: The event object.
"""
self._receive(event)
[callback(event) for callback in self._listener]
def _receive(self, event):
"""
Implement general command logic for receive events.
This method can be overriden in sub-classes to implement
module-independent logic.
@param event: The event.
"""
pass
class Sender(object):
def __init__(self, client):
self.client = client
def check_attr(self, attr):
if not hasattr(self, attr):
raise MissingArgumentError(attr)
def create_event(self, container, parameters):
return Event(None, container.token, parameters)
def send(self):
"""
Push a send event to the command handler.
This enables a high-level API for IRC commands. Each command
handler can define class attributes for clean user input and
format input data according to the IRC specifications.
"""
self.client.send_event(self._send())
def _send(self):
"""
Implement general command logic for receive events.
@return An event to send.
"""
pass
"""-------------------------------------------------------------------------
Section: 3.1 Connection Registration
----------------------------------------------------------------------------
The commands described here are used to register a connection with an
IRC server as a user as well as to correctly disconnect.
A "PASS" command is not required for a client connection to be
registered, but it MUST precede the latter of the NICK/USER
combination (for a user connection) or the SERVICE command (for a
service connection). The RECOMMENDED order for a client to register
is as follows:
1. Pass message
2. Nick message 2. Service message
3. User message
Upon success, the client will receive an RPL_WELCOME (for users) or
RPL_YOURESERVICE (for services) message indicating that the
connection is now registered and known the to the entire IRC network.
The reply message MUST contain the full client identifier upon which
it was registered.
----------------------------------------------------------------------------
3.1.1 Password message .................................. 10 - not needed
3.1.2 Nick message ...................................... 10 - needed
3.1.3 User message ...................................... 11 - needed
3.1.4 Oper message ...................................... 12 - not needed
3.1.5 User mode message ................................. 12 - needed
3.1.6 Service message ................................... 13 - not needed
3.1.7 Quit .............................................. 14 - needed
3.1.8 Squit ............................................. 15 - not needed
-------------------------------------------------------------------------"""
class Nick(Command):
"""
Command: NICK
Parameters: <nickname>
NICK command is used to give user a nickname or change the existing
one.
Numeric Replies:
ERR_NONICKNAMEGIVEN ERR_ERRONEUSNICKNAME
ERR_NICKNAMEINUSE ERR_NICKCOLLISION
ERR_UNAVAILRESOURCE ERR_RESTRICTED
"""
token = 'NICK'
class Receiver(Command.Receiver):
def _receive(self, event):
"""
Update the client's identity with the current nickname.
"""
if event.source.nickname == self.client.me.source.nickname:
self.client.me.rename(event.parameter[0])
class Sender(Command.Sender):
def _send(self):
"""
Send a request to set/change the client's nickname.
"""
self.check_attr('nickname')
return self.create_event(Nick, [self.nickname])
class User(Command):
"""
Command: USER
Parameters: <user> <mode> <unused> <realname>
The USER command is used at the beginning of connection to specify
the username, hostname and realname of a new user.
The <mode> parameter should be a numeric, and can be used to
automatically set user modes when registering with the server. This
parameter is a bitmask, with only 2 bits having any signification: if
the bit 2 is set, the user mode 'w' will be set and if the bit 3 is
set, the user mode 'i' will be set. (See Section 3.1.5 "User
Modes").
The <realname> may contain space characters.
Numeric Replies:
ERR_NEEDMOREPARAMS ERR_ALREADYREGISTRED
"""
token ='USER'
class Sender(Command.Sender):
def _send(self):
"""
Register with the IRC server.
"""
self.check_attr('ident')
self.check_attr('realname')
return self.create_event(User, [self.ident, '0', '*', '{0}'.format(self.realname)])
class Mode(Command):
"""
Because user mode message and channel mode are using the same command,
user mode and channel mode logic are implemented in the same class at
the user section.
Command: MODE
Parameters: <nickname>
*( ( "+" / "-" ) *( "i" / "w" / "o" / "O" / "r" ) )
The user MODE's are typically changes which affect either how the
client is seen by others or what 'extra' messages the client is sent.
[...] If no other parameter is given, then the server will return
the current settings for the nick.
The available modes are as follows:
a - user is flagged as away;
i - marks a users as invisible;
w - user receives wallops;
r - restricted user connection;
o - operator flag;
O - local operator flag;
s - marks a user for receipt of server notices.
Additional modes may be available later on.
[...]
Numeric Replies:
ERR_NEEDMOREPARAMS ERR_USERSDONTMATCH
ERR_UMODEUNKNOWNFLAG RPL_UMODEIS
[...]
Command: MODE
Parameters: <channel> *( ( "-" / "+" ) *<modes> *<modeparams> )
The MODE command is provided so that users may query and change the
characteristics of a channel. For more details on available modes
and their uses, see "Internet Relay Chat: Channel Management" [IRC-
CHAN]. Note that there is a maximum limit of three (3) changes per
command for modes that take a parameter.
Numeric Replies:
ERR_NEEDMOREPARAMS ERR_KEYSET
ERR_NOCHANMODES ERR_CHANOPRIVSNEEDED
ERR_USERNOTINCHANNEL ERR_UNKNOWNMODE
RPL_CHANNELMODEIS
RPL_BANLIST RPL_ENDOFBANLIST
RPL_EXCEPTLIST RPL_ENDOFEXCEPTLIST
RPL_INVITELIST RPL_ENDOFINVITELIST
RPL_UNIQOPIS
"""
token = 'MODE'
class Receiver(Command.Receiver):
def _receive(self, event):
pass
class Sender(Command.Sender):
def _send(self):
pass
class Quit(Command):
"""
3.1.7 Quit
Command: QUIT
Parameters: [ <Quit Message> ]
A client session is terminated with a quit message. The server
acknowledges this by sending an ERROR message to the client.
Numeric Replies:
None.
Example:
QUIT :Gone to have lunch ; Preferred message format.
:[email protected] QUIT :Gone to have lunch ; User
syrk has quit IRC to have lunch.
"""
token = 'QUIT'
class Receiver(Command.Receiver):
def _receive(self, event):
pass
class Sender(Command.Sender):
def _send(self):
"""
Send a quit command with a optional quit message.
@param message: The quit message.
"""
parameter = []
if hasattr(self, 'message') and self.message is not None:
parameter.append(self.message)
return self.create_event(Quit, parameter)
"""
----------------------------------------------------------------------------
Section: 3.2 Channel operations
----------------------------------------------------------------------------
This group of messages is concerned with manipulating channels, their
properties (channel modes), and their contents (typically users).
For this reason, these messages SHALL NOT be made available to
services.
All of these messages are requests which will or will not be granted
by the server. The server MUST send a reply informing the user
whether the request was granted, denied or generated an error. When
the server grants the request, the message is typically sent back
(eventually reformatted) to the user with the prefix set to the user
itself.
----------------------------------------------------------------------------
3.2.1 Join message ...................................... 16 - needed
3.2.2 Part message ...................................... 17 - needed
3.2.3 Channel mode message .............................. 18 - needed
3.2.4 Topic message ..................................... 19 - needed
3.2.5 Names message ..................................... 20 - needed
3.2.6 List message ...................................... 21 - not needed
3.2.7 Invite message .................................... 21 - not needed (maybe implemented in the future)
3.2.8 Kick command ...................................... 22 - needed
-------------------------------------------------------------------------"""
class Join(Command):
"""
Command: JOIN
Parameters: ( <channel> *( "," <channel> ) [ <key> *( "," <key> ) ] )
/ "0"
The JOIN command is used by a user to request to start listening to
the specific channel. Servers MUST be able to parse arguments in the
form of a list of target, but SHOULD NOT use lists when sending JOIN
messages to clients.
Once a user has joined a channel, he receives information about
all commands his server receives affecting the channel. This
includes JOIN, MODE, KICK, PART, QUIT and of course PRIVMSG/NOTICE.
This allows channel members to keep track of the other channel
members, as well as channel modes.
If a JOIN is successful, the user receives a JOIN message as
confirmation and is then sent the channel's topic (using RPL_TOPIC) and
the list of users who are on the channel (using RPL_NAMREPLY), which
MUST include the user joining.
[...]
Numeric Replies:
ERR_NEEDMOREPARAMS ERR_BANNEDFROMCHAN
ERR_INVITEONLYCHAN ERR_BADCHANNELKEY
ERR_CHANNELISFULL ERR_BADCHANMASK
ERR_NOSUCHCHANNEL ERR_TOOMANYCHANNELS
ERR_TOOMANYTARGETS ERR_UNAVAILRESOURCE
RPL_TOPIC
"""
token = 'JOIN'
class Sender(Command.Sender):
def _send(self):
"""
Join a channel.
@param channels: The channel names.
@param keys: The optional channel keys.
"""
if hasattr(self, 'channels') and self.channels is None:
parameter = ['0']
else:
parameter = [','.join(self.channels)]
if hasattr(self, 'keys') and self.keys is not None:
parameter.append(','.join(self.keys))
return self.create_event(Join, parameter)
class Part(Command):
"""
Command: PART
Parameters: <channel> *( "," <channel> ) [ <Part Message> ]
The PART command causes the user sending the message to be removed
from the list of active members for all given channels listed in the
parameter string. If a "Part Message" is given, this will be sent
instead of the default message, the nickname. This request is always
granted by the server.
Servers MUST be able to parse arguments in the form of a list of
target, but SHOULD NOT use lists when sending PART messages to
clients.
Numeric Replies:
ERR_NEEDMOREPARAMS ERR_NOSUCHCHANNEL
ERR_NOTONCHANNEL
"""
token = 'PART'
class Sender(Command.Sender):
def _send(self):
"""
Part a channel.
@param channel: The channel name.
@param message: The optional part message.
"""
self.check_attr('channel')
parameter = [self.channel]
if self.message is not None:
parameter.append(self.message)
return self.create_event(Part, parameter)
class Topic(Command):
"""
Command: TOPIC
Parameters: <channel> [ <topic> ]
The TOPIC command is used to change or view the topic of a channel.
The topic for channel <channel> is returned if there is no <topic>
given. If the <topic> parameter is present, the topic for that
channel will be changed, if this action is allowed for the user
requesting it. If the <topic> parameter is an empty string, the
topic for that channel will be removed.
Numeric Replies:
ERR_NEEDMOREPARAMS ERR_NOTONCHANNEL
RPL_NOTOPIC RPL_TOPIC
ERR_CHANOPRIVSNEEDED ERR_NOCHANMODES
"""
token = 'TOPIC'
class Sender(Command.Sender):
def _send(self):
"""
Get/set a channels topic.
"""
self.check_attr('channel')
parameter = [self.channel]
if hasattr(self, 'topic') and self.topic is not None:
parameter.append(self.topic)
return self.create_event(Topic, parameter)
class Names(Command):
"""
Command: NAMES
Parameters: [ <channel> *( "," <channel> ) [ <target> ] ]
By using the NAMES command, a user can list all nicknames that are
visible to him. For more details on what is visible and what is not,
see "Internet Relay Chat: Channel Management" [IRC-CHAN]. The
<channel> parameter specifies which channel(s) to return information
about. There is no error reply for bad channel names.
If no <channel> parameter is given, a list of all channels and their
occupants is returned. At the end of this list, a list of users who
are visible but either not on any channel or not on a visible channel
are listed as being on `channel' "*".
If the <target> parameter is specified, the request is forwarded to
that server which will generate the reply.
Wildcards are allowed in the <target> parameter.
Numerics:
ERR_TOOMANYMATCHES ERR_NOSUCHSERVER
RPL_NAMREPLY RPL_ENDOFNAMES
"""
token = 'NAMES'
class Sender(Command.Sender):
def _send(self):
"""
Request a NAMES list.
"""
self.check_attr('channels')
return self.create_event(Names, [','.join(self.channels)])
class Invite(Command):
"""
Command: INVITE
Parameters: <nickname> <channel>
The INVITE command is used to invite a user to a channel. The
parameter <nickname> is the nickname of the person to be invited to
the target channel <channel>. There is no requirement that the
channel the target user is being invited to must exist or be a valid
channel. However, if the channel exists, only members of the channel
are allowed to invite other users. When the channel has invite-only
flag set, only channel operators may issue INVITE command.
Only the user inviting and the user being invited will receive
notification of the invitation. Other channel members are not
notified. (This is unlike the MODE changes, and is occasionally the
source of trouble for users.)
Numeric Replies:
ERR_NEEDMOREPARAMS ERR_NOSUCHNICK
ERR_NOTONCHANNEL ERR_USERONCHANNEL
ERR_CHANOPRIVSNEEDED
RPL_INVITING RPL_AWAY
"""
token = 'INVITE'
class Sender(Command.Sender):
def _send(self):
self.check_attr('nickname')
self.check_attr('channel')
return self.create_event(Invite, [self.nickname, self.channel])
class Kick(Command):
"""
Command: KICK
Parameters: <channel> *( "," <channel> ) <user> *( "," <user> )
[<comment>]
The KICK command can be used to request the forced removal of a user
from a channel. It causes the <user> to PART from the <channel> by
force. For the message to be syntactically correct, there MUST be
either one channel parameter and multiple user parameter, or as many
channel parameters as there are user parameters. If a "comment" is
given, this will be sent instead of the default message, the nickname
of the user issuing the KICK.
The server MUST NOT send KICK messages with multiple channels or
users to clients. This is necessarily to maintain backward
compatibility with old client software.
Numeric Replies:
ERR_NEEDMOREPARAMS ERR_NOSUCHCHANNEL
ERR_BADCHANMASK ERR_CHANOPRIVSNEEDED
ERR_USERNOTINCHANNEL ERR_NOTONCHANNEL
"""
token = 'KICK'
class Sender(Command.Sender):
def _send(self):
self.check_attr('channels')
self.check_attr('users')
parameter = [','.join(self.channels), ','.join(self.users)]
if hasattr(self, 'message') and self.message is not None:
parameter.append(self.message)
return self.create_event(Kick, parameter)
"""
----------------------------------------------------------------------------
Section: 3.3 Sending messages
----------------------------------------------------------------------------
The main purpose of the IRC protocol is to provide a base for clients
to communicate with each other. PRIVMSG, NOTICE and SQUERY
(described in Section 3.5 on Service Query and Commands) are the only
messages available which actually perform delivery of a text message
from one client to another - the rest just make it possible and try
to ensure it happens in a reliable and structured manner.
----------------------------------------------------------------------------
3.3.1 Private messages .................................. 23 - needed
3.3.2 Notice ............................................ 24 - needed
-------------------------------------------------------------------------"""
class Privmsg(Command):
"""
Command: PRIVMSG
Parameters: <msgtarget> <text to be sent>
PRIVMSG is used to send private messages between users, as well as to
send messages to channels. <msgtarget> is usually the nickname of
the recipient of the message, or a channel name.
The <msgtarget> parameter may also be a host mask (#<mask>) or server
mask ($<mask>). In both cases the server will only send the PRIVMSG
to those who have a server or host matching the mask. The mask MUST
have at least 1 (one) "." in it and no wildcards following the last
".". This requirement exists to prevent people sending messages to
"#*" or "$*", which would broadcast to all users. Wildcards are the
'*' and '?' characters. This extension to the PRIVMSG command is
only available to operators.
Numeric Replies:
ERR_NORECIPIENT ERR_NOTEXTTOSEND
ERR_CANNOTSENDTOCHAN ERR_NOTOPLEVEL
ERR_WILDTOPLEVEL ERR_TOOMANYTARGETS
ERR_NOSUCHNICK
RPL_AWAY
"""
token = 'PRIVMSG'
class Receiver(Command.Receiver):
def _receive(self, event):
if not event.parameter[0].startswith('#') and event.parameter[1] == 'fotzenscheisse':
self.client.stop()
class Sender(Command.Sender):
def _send(self):
self.check_attr('target')
self.check_attr('text')
return self.create_event(Privmsg, [self.target, self.text])
class Notice(Command):
"""
Command: NOTICE
Parameters: <msgtarget> <text>
The NOTICE command is used similarly to PRIVMSG. The difference
between NOTICE and PRIVMSG is that automatic replies MUST NEVER be
sent in response to a NOTICE message. This rule applies to servers
too - they MUST NOT send any error reply back to the client on
receipt of a notice. The object of this rule is to avoid loops
between clients automatically sending something in response to
something it received.
This command is available to services as well as users.
This is typically used by services, and automatons (clients with
either an AI or other interactive program controlling their actions).
See PRIVMSG for more details on replies and examples.
"""
token = 'NOTICE'
class Sender(Command.Sender):
def _send(self):
self.check_attr('target')
self.check_attr('text')
return self.create_event(Notice, [self.target, self.text])
"""
----------------------------------------------------------------------------
Section: 3.4 Server queries and commands
----------------------------------------------------------------------------
3.4.1 Motd message ...................................... 25 - needed
3.4.2 Lusers message .................................... 25 - not needed
3.4.3 Version message ................................... 26 - not needed
3.4.4 Stats message ..................................... 26 - not needed
3.4.5 Links message ..................................... 27 - not needed
3.4.6 Time message ...................................... 28 - not needed
3.4.7 Connect message ................................... 28 - not needed
3.4.8 Trace message ..................................... 29 - not needed
3.4.9 Admin command ..................................... 30 - not needed
3.4.10 Info command ...................................... 31 - not needed
-------------------------------------------------------------------------"""
class Motd(Command):
"""
Command: MOTD
Parameters: [ <target> ]
The MOTD command is used to get the "Message Of The Day" of the given
server, or current server if <target> is omitted.
Wildcards are allowed in the <target> parameter.
Numeric Replies:
RPL_MOTDSTART RPL_MOTD
RPL_ENDOFMOTD ERR_NOMOTD
"""
token = 'MOTD'
class Sender(Command.Sender):
def _send(self):
parameter = []
if hasattr(self, 'target') and self.target is not None:
parameter.append(self.target)
return self.create_event(Motd, parameter)
"""
----------------------------------------------------------------------------
Section: 3.5 Service query and commands
----------------------------------------------------------------------------
3.5.1 Servlist message .................................. 31 - not needed
3.5.2 Squery ............................................ 32 - not needed
-------------------------------------------------------------------------"""
"""
----------------------------------------------------------------------------
Section: 3.6 User based queries
----------------------------------------------------------------------------
3.6.1 Who query ......................................... 32 - needed
3.6.2 Whois query ....................................... 33 - needed
3.6.3 Whowas ............................................ 34 - not needed
-------------------------------------------------------------------------"""
class Who(Command):
"""
Command: WHO
Parameters: [ <mask> [ "o" ] ]
The WHO command is used by a client to generate a query which returns
a list of information which 'matches' the <mask> parameter given by
the client. In the absence of the <mask> parameter, all visible
(users who aren't invisible (user mode +i) and who don't have a
common channel with the requesting client) are listed. The same
result can be achieved by using a <mask> of "0" or any wildcard which
will end up matching every visible user.
The <mask> passed to WHO is matched against users' host, server, real
name and nickname if the channel <mask> cannot be found.
If the "o" parameter is passed only operators are returned according
to the <mask> supplied.
Numeric Replies:
ERR_NOSUCHSERVER
RPL_WHOREPLY RPL_ENDOFWHO
"""
token = 'WHO'
class Sender(Command.Sender):
def _send(self):
self.check_attr('mask')
parameter = [self.mask]
if hasattr(self, 'operators') and self.operators:
parameter.append('o')
return self.create_event(Who, parameter)
class Whois(Command):
"""
Command: WHOIS
Parameters: [ <target> ] <mask> *( "," <mask> )
This command is used to query information about particular user.
The server will answer this command with several numeric messages
indicating different statuses of each user which matches the mask (if
you are entitled to see them). If no wildcard is present in the
<mask>, any information about that nick which you are allowed to see
is presented.
If the <target> parameter is specified, it sends the query to a
specific server. It is useful if you want to know how long the user
in question has been idle as only local server (i.e., the server the
user is directly connected to) knows that information, while
everything else is globally known.
Wildcards are allowed in the <target> parameter.
Numeric Replies:
ERR_NOSUCHSERVER ERR_NONICKNAMEGIVEN
RPL_WHOISUSER RPL_WHOISCHANNELS
RPL_WHOISCHANNELS RPL_WHOISSERVER
RPL_AWAY RPL_WHOISOPERATOR
RPL_WHOISIDLE ERR_NOSUCHNICK
RPL_ENDOFWHOIS
"""
token = 'WHOIS'
class Sender(Command.Sender):
def _send(self):
self.check_attr('user')
parameter = []
if hasattr(self, 'server') and self.server is not None:
parameter.append(self.server)
# add user 2x for extended whois
parameter.append(self.user)
parameter.append(self.user)
return self.create_event(Whois, parameter)
"""
----------------------------------------------------------------------------
Section: 3.7 Miscellaneous messages
----------------------------------------------------------------------------
3.7.1 Kill message ...................................... 35 - not needed
3.7.2 Ping message ...................................... 36 - needed
3.7.3 Pong message ...................................... 37 - needed
3.7.4 Error ............................................. 37 - not needed
-------------------------------------------------------------------------"""
class Ping(Command):
"""
Command: PING
Parameters: <server1> [ <server2> ]
The PING command is used to test the presence of an active client or
server at the other end of the connection. Servers send a PING
message at regular intervals if no other activity detected coming
from a connection. If a connection fails to respond to a PING
message within a set amount of time, that connection is closed. A
PING message MAY be sent even if the connection is active.
When a PING message is received, the appropriate PONG message MUST be
sent as reply to <server1> (server which sent the PING message out)
as soon as possible. If the <server2> parameter is specified, it
represents the target of the ping, and the message gets forwarded
there.
Numeric Replies:
ERR_NOORIGIN ERR_NOSUCHSERVER
"""
token = 'PING'
class Receiver(Command.Receiver):
def _receive(self, event):
pong = self.client.get_command('Pong').get_sender()
if len(event.parameter) == 1:
pong.server = event.parameter[0]
if len(event.parameter) == 2:
pong.server = event.parameter[0]
pong.server2 = event.parameter[1]
pong.send()
class Pong(Command):
"""
Command: PONG
Parameters: <server> [ <server2> ]
PONG message is a reply to ping message. If parameter <server2> is
given, this message MUST be forwarded to given target. The <server>
parameter is the name of the entity who has responded to PING message
and generated this message.
Numeric Replies:
ERR_NOORIGIN ERR_NOSUCHSERVER
"""
token = 'PONG'
class Sender(Command.Sender):
def _send(self):
self.check_attr('server')
parameter = [self.server]
if hasattr(self, 'server2') and self.server2:
parameter.append(self.server2)
return self.create_event(Pong, parameter)
"""-----------------------------------------------------------------------------
5. Replies .................................................... 43
5.1 Command responses ...................................... 43
5.2 Error Replies .......................................... 53
5.3 Reserved numerics ...................................... 59
Numerics in the range from 001 to 099 are used for client-server
connections only and should never travel between servers.
-----------------------------------------------------------------------------"""
class WelcomeReply(Command):
token = '001'
class YourHostReply(Command):
token = '002'
class CreatedReply(Command):
token = '003'
class MyInfoReply(Command):
token = '004'
class BounceReply(Command):
token = '005'
"""-----------------------------------------------------------------------------
Replies generated in the response to commands are found in the
range from 200 to 399.
-----------------------------------------------------------------------------"""
class AwayReply(Command):
token = '301'
class WhoisUserReply(Command):
token = '311'
class WhoisServerReply(Command):
token = '312'
class WhoisOperatorReply(Command):
token = '313'
class WhoEndReply(Command):
token = '315'
class WhoisIdleReply(Command):
token = '317'
class WhoisEndReply(Command):
token = '318'
class WhoisChannelsReply(Command):
token = '319'
class UniqueOpIsReply(Command):
token = '325'
class ChannelModeIsReply(Command):
token = '324'
class NoTopicReply(Command):
token = '331'
class TopicReply(Command):
token = '332'
class InvitingReply(Command):
token = '341'
class InviteListReply(Command):
token = '346'
class InviteListEndReply(Command):
token = '347'
class ExceptListReply(Command):
token = '348'
class ExceptListEndReply(Command):
token = '349'
class WhoReply(Command):
token = '352'
class NamesReply(Command):
"""
353 RPL_NAMREPLY
"( "=" / "*" / "@" ) <channel>
:[ "@" / "+" ] <nick> *( " " [ "@" / "+" ] <nick> )
"@" is used for secret channels
"*" for private channels, and
"=" for others (public channels).
"""
token = '353'
class NamesEndReply(Command):
token = '366'
class BanListReply(Command):
token = '367'
class BanListEndReply(Command):
token = '368'
class MotdReply(Command):
token = '372'
class MotdStartReply(Command):
token = '375'
class MotdEndReply(Command):
token = '376'
class Receiver(Command.Receiver):
def _receive(self, event):
self.client.post_connect()
"""-----------------------------------------------------------------------------
Error replies are found in the range from 400 to 599.
-----------------------------------------------------------------------------"""
class NoSuchNickError(Command):
token = '401'
class NoSuchServerError(Command):
token = '402'
class NoSuchChannelError(Command):
token = '403'
class CannotSendToChannelError(Command):
token = '404'
class TooManyChannelsError(Command):
token = '405'
class TooManyTargetsError(Command):
token = '407'
class NoOriginError(Command):
token = '409'
class NoRecipientError(Command):
token = '411'
class NoTextToSendError(Command):
token = '412'
class NoToplevelError(Command):
token = '413'
class WildTopLevelError(Command):
token = '414'
class NoMotdError(Command):
token = '422'
class Receiver(Command.Receiver):
def _receive(self, event):
self.client.post_connect()
class NoNicknameGivenError(Command):
token = '431'
class Receiver(Command.Receiver):
def _receive(self, event):
nick = self.client.get_command('Nick').get_sender()
nick.nickname = self.client.config.get('nickname')
if len(nick.nickname) == 0:
nick.nickname = self.client.config.get('anickname')
if len(nick.nickname) == 0:
nick.nickname = 'Bot-' + ''.join(random.choice(string.ascii_uppercase) for x in range(3))
self.client.logger.info(
'No nickname was given, trying to use %s',
self.client.me.source.nickname,
nick.nickname
)
nick.send()
class ErroneusNicknameError(Command):
token = '432'
class Receiver(Command.Receiver):
def _receive(self, event):
nick = self.client.get_command('Nick').get_sender()
nick.nickname = 'Bot-' + ''.join(random.choice(string.ascii_uppercase) for x in range(3))
self.client.logger.info(
'Requested nickname %s is not valid on network, trying to use %s instead',
self.client.me.source.nickname,
nick.nickname
)
nick.send()
class NicknameInUseError(Command):
token = '433'
class Receiver(Command.Receiver):
def _receive(self, event):
nick = self.client.get_command('Nick').get_sender()
nick.nickname = self.client.config.get('anickname')
if nick.nickname == self.client.me.source.nickname:
# TODO honor NICKLEN from BounceReply
nickname_length = 15 #quakenet default, hardcoded
random_length = 3 #chosen by fair dice roll
nickname_maxlength = nickname_length - random_length
nick.nickname = nick.nickname[nickname_maxlength:]
nick.nickname += ''.join(random.choice(string.ascii_uppercase) for x in range(random_length))
self.client.logger.info(
'Requested nickname %s is already used on network, trying to use %s instead',
self.client.me.source.nickname,
nick.nickname
)
nick.send()
class NickCollisionError(Command):
token = '436'
class UnavailableResourceError(Command):
token = '437'
class UserNotInChannelError(Command):
token = '441'
class NotOnChannelError(Command):
token = '442'
class UserOnChannelError(Command):
token = '443'
class NeedMoreParamsError(Command):
token = '461'
class AlreadyRegisteredError(Command):
token = '462'
class KeySetError(Command):
token = '467'
class ChannelIsFullError(Command):
token = '471'
class UnknownModeError(Command):
token = '472'
class InviteOnlyChannelError(Command):
token = '473'
class BannedFromChannelError(Command):
token = '474'
class BadChannelKeyError(Command):
token = '475'
class BadChannelMaskError(Command):
token = '476'
class NoChannelModesError(Command):
token = '477'
class ChanOpPrivilegesNeededError(Command):
token = '482'
class RestrictedError(Command):
token = '484'
class UsersDontMachtError(Command):
token = '502'
| mit |
adsorensen/girder | girder/api/v1/resource.py | 1 | 18333 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import six
from ..describe import Description, autoDescribeRoute
from ..rest import Resource as BaseResource, RestException, setResponseHeader, setContentDisposition
from girder.constants import AccessType, TokenScope
from girder.api import access
from girder.utility import parseTimestamp
from girder.utility import ziputil
from girder.utility import path as path_util
from girder.utility.progress import ProgressContext
# Plugins can modify this set to allow other types to be searched
allowedSearchTypes = {'collection', 'folder', 'group', 'item', 'user'}
allowedDeleteTypes = {'collection', 'file', 'folder', 'group', 'item', 'user'}
class Resource(BaseResource):
"""
API Endpoints that deal with operations across multiple resource types.
"""
def __init__(self):
super(Resource, self).__init__()
self.resourceName = 'resource'
self.route('GET', ('search',), self.search)
self.route('GET', ('lookup',), self.lookup)
self.route('GET', (':id',), self.getResource)
self.route('GET', (':id', 'path'), self.path)
self.route('PUT', (':id', 'timestamp'), self.setTimestamp)
self.route('GET', ('download',), self.download)
self.route('POST', ('download',), self.download)
self.route('PUT', ('move',), self.moveResources)
self.route('POST', ('copy',), self.copyResources)
self.route('DELETE', (), self.delete)
@access.public
@autoDescribeRoute(
Description('Search for resources in the system.')
.param('q', 'The search query.')
.param('mode', 'The search mode. Can use either a text search or a '
'prefix-based search.', enum=('text', 'prefix'), required=False,
default='text')
.jsonParam('types', 'A JSON list of resource types to search for, e.g. '
'["user", "folder", "item"].', requireArray=True)
.param('level', 'Minimum required access level.', required=False,
dataType='integer', default=AccessType.READ)
.pagingParams(defaultSort=None, defaultLimit=10)
.errorResponse('Invalid type list format.')
)
def search(self, q, mode, types, level, limit, offset):
level = AccessType.validate(level)
user = self.getCurrentUser()
if mode == 'text':
method = 'textSearch'
else:
method = 'prefixSearch'
results = {}
for modelName in types:
if modelName not in allowedSearchTypes:
continue
if '.' in modelName:
name, plugin = modelName.rsplit('.', 1)
model = self.model(name, plugin)
else:
model = self.model(modelName)
results[modelName] = [
model.filter(d, user) for d in getattr(model, method)(
query=q, user=user, limit=limit, offset=offset, level=level)
]
return results
def _validateResourceSet(self, resources, allowedModels=None):
"""
Validate a set of resources against a set of allowed models.
Also ensures the requested resource set is not empty.
# TODO jsonschema could replace this probably
:param resources: The set of resources requested.
:param allowedModels: if present, an iterable of models that may be
included in the resources.
"""
if allowedModels:
invalid = set(resources.keys()) - set(allowedModels)
if invalid:
raise RestException('Invalid resource types requested: ' + ', '.join(invalid))
count = sum([len(v) for v in six.viewvalues(resources)])
if not count:
raise RestException('No resources specified.')
def _getResourceModel(self, kind, funcName=None):
"""
Load and return a model with a specific function or throw an exception.
:param kind: the name of the model to load
:param funcName: a function name to ensure that each model contains.
:returns: the loaded model.
"""
try:
model = self.model(kind)
except ImportError:
model = None
if not model or (funcName and not hasattr(model, funcName)):
raise RestException('Invalid resources format.')
return model
@access.public(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Look up a resource in the data hierarchy by path.')
.param('path',
'The path of the resource. The path must be an absolute Unix '
'path starting with either "/user/[user name]", for a user\'s '
'resources or "/collection/[collection name]", for resources '
'under a collection.')
.param('test',
'Specify whether to return None instead of throwing an '
'exception when path doesn\'t exist.',
required=False, dataType='boolean', default=False)
.errorResponse('Path is invalid.')
.errorResponse('Path refers to a resource that does not exist.')
.errorResponse('Read access was denied for the resource.', 403)
)
def lookup(self, path, test):
return path_util.lookUpPath(path, self.getCurrentUser(), test)['document']
@access.public(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Get path of a resource.')
.param('id', 'The ID of the resource.', paramType='path')
.param('type', 'The type of the resource (item, file, etc.).')
.errorResponse('ID was invalid.')
.errorResponse('Invalid resource type.')
.errorResponse('Read access was denied for the resource.', 403)
)
def path(self, id, type):
user = self.getCurrentUser()
doc = self._getResource(id, type)
if doc is None:
raise RestException('Invalid resource id.')
return path_util.getResourcePath(type, doc, user=user)
@access.cookie(force=True)
@access.public(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Download a set of items, folders, collections, and users '
'as a zip archive.')
.notes('This route is also exposed via the POST method because the '
'request parameters can be quite long, and encoding them in the '
'URL (as is standard when using the GET method) can cause the '
'URL to become too long, which causes errors.')
.jsonParam('resources', 'A JSON-encoded set of resources to download. Each type is '
'a list of ids. For example: {"item": [(item id 1), (item id 2)], '
'"folder": [(folder id 1)]}.', requireObject=True)
.param('includeMetadata', 'Include any metadata in JSON files in the '
'archive.', required=False, dataType='boolean', default=False)
.produces('application/zip')
.errorResponse('Unsupported or unknown resource type.')
.errorResponse('Invalid resources format.')
.errorResponse('No resources specified.')
.errorResponse('Resource not found.')
.errorResponse('Read access was denied for a resource.', 403)
)
def download(self, resources, includeMetadata):
"""
Returns a generator function that will be used to stream out a zip
file containing the listed resource's contents, filtered by
permissions.
"""
user = self.getCurrentUser()
self._validateResourceSet(resources)
# Check that all the resources are valid, so we don't download the zip
# file if it would throw an error.
for kind in resources:
model = self._getResourceModel(kind, 'fileList')
for id in resources[kind]:
if not model.load(id=id, user=user, level=AccessType.READ):
raise RestException('Resource %s %s not found.' % (kind, id))
setResponseHeader('Content-Type', 'application/zip')
setContentDisposition('Resources.zip')
def stream():
zip = ziputil.ZipGenerator()
for kind in resources:
model = self.model(kind)
for id in resources[kind]:
doc = model.load(id=id, user=user, level=AccessType.READ)
for (path, file) in model.fileList(
doc=doc, user=user, includeMetadata=includeMetadata, subpath=True):
for data in zip.addFile(file, path):
yield data
yield zip.footer()
return stream
@access.user(scope=TokenScope.DATA_OWN)
@autoDescribeRoute(
Description('Delete a set of items, folders, or other resources.')
.jsonParam('resources', 'A JSON-encoded set of resources to delete. Each '
'type is a list of ids. For example: {"item": [(item id 1), '
'(item id2)], "folder": [(folder id 1)]}.', requireObject=True)
.param('progress', 'Whether to record progress on this task.',
default=False, required=False, dataType='boolean')
.errorResponse('Unsupported or unknown resource type.')
.errorResponse('Invalid resources format.')
.errorResponse('No resources specified.')
.errorResponse('Resource not found.')
.errorResponse('Admin access was denied for a resource.', 403)
)
def delete(self, resources, progress):
user = self.getCurrentUser()
self._validateResourceSet(resources, allowedDeleteTypes)
total = sum([len(resources[key]) for key in resources])
with ProgressContext(
progress, user=user, title='Deleting resources',
message='Calculating size...') as ctx:
ctx.update(total=total)
current = 0
for kind in resources:
model = self._getResourceModel(kind, 'remove')
for id in resources[kind]:
doc = model.load(id=id, user=user, level=AccessType.ADMIN, exc=True)
# Don't do a subtree count if we weren't asked for progress
if progress:
subtotal = model.subtreeCount(doc)
if subtotal != 1:
total += subtotal - 1
ctx.update(total=total)
model.remove(doc, progress=ctx)
if progress:
current += subtotal
if ctx.progress['data']['current'] != current:
ctx.update(current=current, message='Deleted ' + kind)
def _getResource(self, id, type):
model = self._getResourceModel(type)
return model.load(id=id, user=self.getCurrentUser(), level=AccessType.READ)
@access.admin
@autoDescribeRoute(
Description('Get any resource by ID.')
.param('id', 'The ID of the resource.', paramType='path')
.param('type', 'The type of the resource (item, file, etc.).')
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the resource.', 403)
)
def getResource(self, id, type):
return self._getResource(id, type)
@access.admin
@autoDescribeRoute(
Description('Set the created or updated timestamp for a resource.')
.param('id', 'The ID of the resource.', paramType='path')
.param('type', 'The type of the resource (item, file, etc.).')
.param('created', 'The new created timestamp.', required=False)
.param('updated', 'The new updated timestamp.', required=False)
.errorResponse('ID was invalid.')
.errorResponse('Access was denied for the resource.', 403)
)
def setTimestamp(self, id, type, created, updated):
user = self.getCurrentUser()
model = self._getResourceModel(type)
doc = model.load(id=id, user=user, level=AccessType.WRITE, exc=True)
if created is not None:
if 'created' not in doc:
raise RestException('Resource has no "created" field.')
doc['created'] = parseTimestamp(created)
if updated is not None:
if 'updated' not in doc:
raise RestException('Resource has no "updated" field.')
doc['updated'] = parseTimestamp(updated)
return model.filter(model.save(doc), user=user)
def _prepareMoveOrCopy(self, resources, parentType, parentId):
user = self.getCurrentUser()
self._validateResourceSet(resources, ('folder', 'item'))
if resources.get('item') and parentType != 'folder':
raise RestException('Invalid parentType.')
return self.model(parentType).load(parentId, level=AccessType.WRITE, user=user, exc=True)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Move a set of items and folders.')
.jsonParam('resources', 'A JSON-encoded set of resources to move. Each type '
'is a list of ids. Only folders and items may be specified. '
'For example: {"item": [(item id 1), (item id2)], "folder": '
'[(folder id 1)]}.', requireObject=True)
.param('parentType', 'Parent type for the new parent of these resources.',
enum=('user', 'collection', 'folder'))
.param('parentId', 'Parent ID for the new parent of these resources.')
.param('progress', 'Whether to record progress on this task.',
required=False, default=False, dataType='boolean')
.errorResponse('Unsupported or unknown resource type.')
.errorResponse('Invalid resources format.')
.errorResponse('Resource type not supported.')
.errorResponse('No resources specified.')
.errorResponse('Resource not found.')
.errorResponse('ID was invalid.')
)
def moveResources(self, resources, parentType, parentId, progress):
user = self.getCurrentUser()
parent = self._prepareMoveOrCopy(resources, parentType, parentId)
total = sum([len(resources[key]) for key in resources])
with ProgressContext(
progress, user=user, title='Moving resources',
message='Calculating requirements...', total=total) as ctx:
for kind in resources:
model = self._getResourceModel(kind, 'move')
for id in resources[kind]:
doc = model.load(id=id, user=user, level=AccessType.WRITE, exc=True)
ctx.update(message='Moving %s %s' % (kind, doc.get('name', '')))
if kind == 'item':
if parent['_id'] != doc['folderId']:
model.move(doc, parent)
elif kind == 'folder':
if ((parentType, parent['_id']) !=
(doc['parentCollection'], doc['parentId'])):
model.move(doc, parent, parentType)
ctx.update(increment=1)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Copy a set of items and folders.')
.jsonParam('resources', 'A JSON-encoded set of resources to copy. Each type '
'is a list of ids. Only folders and items may be specified. '
'For example: {"item": [(item id 1), (item id2)], "folder": '
'[(folder id 1)]}.', requireObject=True)
.param('parentType', 'Parent type for the new parent of these '
'resources.')
.param('parentId', 'Parent ID for the new parent of these resources.')
.param('progress', 'Whether to record progress on this task.',
required=False, default=False, dataType='boolean')
.errorResponse('Unsupported or unknown resource type.')
.errorResponse('Invalid resources format.')
.errorResponse('Resource type not supported.')
.errorResponse('No resources specified.')
.errorResponse('Resource not found.')
.errorResponse('ID was invalid.')
)
def copyResources(self, resources, parentType, parentId, progress):
user = self.getCurrentUser()
parent = self._prepareMoveOrCopy(resources, parentType, parentId)
total = len(resources.get('item', []))
if 'folder' in resources:
model = self._getResourceModel('folder')
for id in resources['folder']:
folder = model.load(id=id, user=user, level=AccessType.READ, exc=True)
total += model.subtreeCount(folder)
with ProgressContext(
progress, user=user, title='Copying resources',
message='Calculating requirements...', total=total) as ctx:
for kind in resources:
model = self._getResourceModel(kind)
for id in resources[kind]:
doc = model.load(id=id, user=user, level=AccessType.READ, exc=True)
ctx.update(message='Copying %s %s' % (kind, doc.get('name', '')))
if kind == 'item':
model.copyItem(doc, folder=parent, creator=user)
ctx.update(increment=1)
elif kind == 'folder':
model.copyFolder(
doc, parent=parent, parentType=parentType, creator=user, progress=ctx)
| apache-2.0 |
johndamen/pyeasyplot | easyplot/gui/plotsettings.py | 1 | 4044 | from PyQt4 import QtGui, QtCore
from .settings import PlotSettings
from . import basewidgets as bw
from .. import datasets
def _marker_field(**kwargs):
return bw.Dropdown(
['.', ',', 'o', '*',
'+', 'x', 'd', 'D',
'v', '^', '<', '>',
's', 'p', '|', '_'], **kwargs)
def _label_field(*args, **kwargs):
return bw.TextOrNone(*args, **kwargs)
def _linestyle_field(**kwargs):
return bw.Dropdown(['-', '--', '-.', ':'], **kwargs)
def _linewidth_field(*args, **kwargs):
return bw.Float(*args, **kwargs)
def _width_field(*args, **kwargs):
return bw.Float(*args, **kwargs)
def _color_field(*args, **kwargs):
return bw.Color(*args, **kwargs)
def _cmap_field(*args, **kwargs):
return bw.Colormap(*args, **kwargs)
def _alpha_field(*args, **kwargs):
return bw.Float(*args, **kwargs)
def _size_field(*args, **kwargs):
return bw.Float(*args, **kwargs)
class TimeseriesPlotSettings(PlotSettings):
DATA_CLS = datasets.Timeseries
def build(self):
super().build()
self.fields['alpha'] = f = _alpha_field()
f.value_changed.connect(self.change)
self.layout.addRow('alpha', f)
self.fields['color'] = f = _color_field()
f.value_changed.connect(self.change)
self.layout.addRow('color', f)
self.fields['linewidth'] = f = _linewidth_field()
f.value_changed.connect(self.change)
self.layout.addRow('linewidth', f)
self.fields['linestyle'] = f = _linestyle_field()
f.value_changed.connect(self.change)
self.layout.addRow('linestyle', f)
self.fields['label'] = f = _label_field()
f.value_changed.connect(self.change)
self.layout.addRow('label', f)
self.fields['marker'] = f = _marker_field()
f.value_changed.connect(self.change)
self.layout.addRow('marker', f)
class PointsPlotSettings(PlotSettings):
DATA_CLS = datasets.Points
def build(self):
super().build()
self.fields['color'] = f = _color_field()
f.value_changed.connect(self.change)
self.layout.addRow('color', f)
self.fields['s'] = f = _size_field()
f.value_changed.connect(self.change)
self.layout.addRow('pointsize', f)
self.fields['alpha'] = f = _alpha_field()
f.value_changed.connect(self.change)
self.layout.addRow('alpha', f)
class ValuePointsPlotSettings(PlotSettings):
DATA_CLS = datasets.ValuePoints
def build(self):
super().build()
self.fields['cmap'] = f = _cmap_field()
f.value_changed.connect(self.change)
self.layout.addRow('colormap', f)
self.fields['s'] = f = _size_field()
f.value_changed.connect(self.change)
self.layout.addRow('pointsize', f)
self.fields['alpha'] = f = _alpha_field()
f.value_changed.connect(self.change)
self.layout.addRow('alpha', f)
class GridPlotSettings(PlotSettings):
DATA_CLS = datasets.Grid
def build(self):
super().build()
self.fields['cmap'] = f = _cmap_field()
f.value_changed.connect(self.change)
self.layout.addRow('colormap', f)
class IrregularGridPlotSettings(PlotSettings):
DATA_CLS = datasets.IrregularGrid
def build(self):
super().build()
self.fields['cmap'] = f = _cmap_field()
f.value_changed.connect(self.change)
self.layout.addRow('colormap', f)
class VectorDataPlotSettings(PlotSettings):
DATA_CLS = datasets.VectorData
def build(self):
super().build()
self.fields['width'] = f = _width_field()
f.value_changed.connect(self.change)
self.layout.addRow('width', f)
self.fields['color'] = f = _color_field()
f.value_changed.connect(self.change)
self.layout.addRow('color', f)
def get_by_dataset(d):
if not isinstance(d, datasets.Dataset):
raise TypeError('argument must be a dataset')
return globals()[d.__class__.__name__+'PlotSettings'] | gpl-3.0 |
tectronics/pychess | lib/pychess/Players/ICPlayer.py | 20 | 10779 | from collections import defaultdict
from pychess.compat import Queue
from pychess.Players.Player import Player, PlayerIsDead, TurnInterrupt
from pychess.Utils.Move import parseSAN, toAN
from pychess.Utils.lutils.lmove import ParsingError
from pychess.Utils.Offer import Offer
from pychess.Utils.const import *
from pychess.System.Log import log
class ICPlayer (Player):
__type__ = REMOTE
def __init__ (self, gamemodel, ichandle, gameno, color, name, icrating=None):
Player.__init__(self)
self.offers = {}
self.queue = Queue()
self.okqueue = Queue()
self.setName(name)
self.ichandle = ichandle
self.icrating = icrating
self.color = color
self.gameno = gameno
self.gamemodel = gamemodel
# If some times later FICS creates another game with same wplayer,bplayer,gameno
# this will change to False and boardUpdate messages will be ignored
self.current = True
self.connection = connection = self.gamemodel.connection
self.connections = connections = defaultdict(list)
connections[connection.bm].append(connection.bm.connect_after("boardUpdate", self.__boardUpdate))
connections[connection.bm].append(connection.bm.connect_after("playGameCreated", self.__playGameCreated))
connections[connection.bm].append(connection.bm.connect_after("obsGameCreated", self.__obsGameCreated))
connections[connection.om].append(connection.om.connect("onOfferAdd", self.__onOfferAdd))
connections[connection.om].append(connection.om.connect("onOfferRemove", self.__onOfferRemove))
connections[connection.om].append(connection.om.connect("onOfferDeclined", self.__onOfferDeclined))
connections[connection.cm].append(connection.cm.connect("privateMessage", self.__onPrivateMessage))
def getICHandle (self):
return self.name
@property
def time (self):
self.gamemodel.timemodel.getPlayerTime(self.color)
#===========================================================================
# Handle signals from the connection
#===========================================================================
def __playGameCreated (self, bm, ficsgame):
if self.gamemodel.ficsplayers[0] == ficsgame.wplayer and \
self.gamemodel.ficsplayers[1] == ficsgame.bplayer and \
self.gameno == ficsgame.gameno:
log.debug("ICPlayer.__playGameCreated: gameno reappeared: gameno=%s white=%s black=%s" % \
(ficsgame.gameno, ficsgame.wplayer.name, ficsgame.bplayer.name))
self.current = False
def __obsGameCreated (self, bm, ficsgame):
if self.gamemodel.ficsplayers[0] == ficsgame.wplayer and \
self.gamemodel.ficsplayers[1] == ficsgame.bplayer and \
self.gameno == ficsgame.gameno:
log.debug("ICPlayer.__obsGameCreated: gameno reappeared: gameno=%s white=%s black=%s" % \
(ficsgame.gameno, ficsgame.wplayer.name, ficsgame.bplayer.name))
self.current = False
def __onOfferAdd (self, om, offer):
if self.gamemodel.status in UNFINISHED_STATES and not self.gamemodel.isObservationGame():
log.debug("ICPlayer.__onOfferAdd: emitting offer: self.gameno=%s self.name=%s %s" % \
(self.gameno, self.name, offer))
self.offers[offer.index] = offer
self.emit ("offer", offer)
def __onOfferDeclined (self, om, offer):
for offer_ in self.gamemodel.offers.keys():
if offer.type == offer_.type:
offer.param = offer_.param
log.debug("ICPlayer.__onOfferDeclined: emitting decline for %s" % offer)
self.emit("decline", offer)
def __onOfferRemove (self, om, offer):
if offer.index in self.offers:
log.debug("ICPlayer.__onOfferRemove: emitting withdraw: self.gameno=%s self.name=%s %s" % \
(self.gameno, self.name, offer))
self.emit ("withdraw", self.offers[offer.index])
del self.offers[offer.index]
def __onPrivateMessage (self, cm, name, title, isadmin, text):
if name == self.ichandle:
self.emit("offer", Offer(CHAT_ACTION, param=text))
def __boardUpdate (self, bm, gameno, ply, curcol, lastmove, fen, wname, bname, wms, bms):
log.debug("ICPlayer.__boardUpdate: id(self)=%d self=%s %s %s %s %d %d %s %s %d %d" % \
(id(self), self, gameno, wname, bname, ply, curcol, lastmove, fen, wms, bms))
if gameno == self.gameno and len(self.gamemodel.players) >= 2 \
and wname == self.gamemodel.players[0].ichandle \
and bname == self.gamemodel.players[1].ichandle \
and self.current:
log.debug("ICPlayer.__boardUpdate: id=%d self=%s gameno=%s: this is my move" % \
(id(self), self, gameno))
# In some cases (like lost on time) the last move is resent
if ply <= self.gamemodel.ply:
return
if 1-curcol == self.color:
log.debug("ICPlayer.__boardUpdate: id=%d self=%s ply=%d: putting move=%s in queue" % \
(id(self), self, ply, lastmove))
self.queue.put((ply, lastmove))
# Ensure the fics thread doesn't continue parsing, before the
# game/player thread has recieved the move.
# Specifically this ensures that we aren't killed due to end of
# game before our last move is recieved
self.okqueue.get(block=True)
#===========================================================================
# Ending the game
#===========================================================================
def __disconnect (self):
if self.connections is None: return
for obj in self.connections:
for handler_id in self.connections[obj]:
if obj.handler_is_connected(handler_id):
obj.disconnect(handler_id)
self.connections = None
def end (self, status, reason):
self.__disconnect()
self.queue.put("del")
def kill (self, reason):
self.__disconnect()
self.queue.put("del")
#===========================================================================
# Send the player move updates
#===========================================================================
def makeMove (self, board1, move, board2):
log.debug("ICPlayer.makemove: id(self)=%d self=%s move=%s board1=%s board2=%s" % \
(id(self), self, move, board1, board2))
if board2 and not self.gamemodel.isObservationGame():
# TODO: Will this work if we just always use CASTLE_SAN?
cn = CASTLE_KK
if board2.variant == FISCHERRANDOMCHESS:
cn = CASTLE_SAN
self.connection.bm.sendMove (toAN (board2, move, castleNotation=cn))
item = self.queue.get(block=True)
try:
if item == "del":
raise PlayerIsDead
if item == "int":
raise TurnInterrupt
ply, sanmove = item
if ply < board1.ply:
# This should only happen in an observed game
board1 = self.gamemodel.getBoardAtPly(max(ply-1, 0))
log.debug("ICPlayer.makemove: id(self)=%d self=%s from queue got: ply=%d sanmove=%s" % \
(id(self), self, ply, sanmove))
try:
move = parseSAN (board1, sanmove)
log.debug("ICPlayer.makemove: id(self)=%d self=%s parsed move=%s" % \
(id(self), self, move))
except ParsingError as e:
raise
return move
finally:
log.debug("ICPlayer.makemove: id(self)=%d self=%s returning move=%s" % \
(id(self), self, move))
self.okqueue.put("ok")
#===========================================================================
# Interacting with the player
#===========================================================================
def pause (self):
pass
def resume (self):
pass
def setBoard (self, fen):
# setBoard will currently only be called for ServerPlayer when starting
# to observe some game. In this case FICS already knows how the board
# should look, and we don't need to set anything
pass
def playerUndoMoves (self, movecount, gamemodel):
log.debug("ICPlayer.playerUndoMoves: id(self)=%d self=%s, undoing movecount=%d" % \
(id(self), self, movecount))
# If current player has changed so that it is no longer us to move,
# We raise TurnInterruprt in order to let GameModel continue the game
if movecount % 2 == 1 and gamemodel.curplayer != self:
self.queue.put("int")
def putMessage (self, text):
self.connection.cm.tellPlayer (self.name, text)
#===========================================================================
# Offer handling
#===========================================================================
def offerRematch (self):
if self.gamemodel.timed:
min = int(self.gamemodel.timemodel.intervals[0][0])/60
inc = self.gamemodel.timemodel.gain
else:
min = 0
inc = 0
self.connection.om.challenge(self.ichandle,
self.gamemodel.ficsgame.game_type, min, inc,
self.gamemodel.ficsgame.rated)
def offer (self, offer):
log.debug("ICPlayer.offer: self=%s %s" % (repr(self), offer))
if offer.type == TAKEBACK_OFFER:
# only 1 outstanding takeback offer allowed on FICS, so remove any of ours
indexes = self.offers.keys()
for index in indexes:
if self.offers[index].type == TAKEBACK_OFFER:
log.debug("ICPlayer.offer: del self.offers[%s] %s" % (index, offer))
del self.offers[index]
self.connection.om.offer(offer, self.gamemodel.ply)
def offerDeclined (self, offer):
log.debug("ICPlayer.offerDeclined: sending decline for %s" % offer)
self.connection.om.decline(offer)
def offerWithdrawn (self, offer):
pass
def offerError (self, offer, error):
pass
def observe (self):
self.connection.client.run_command("observe %s" % self.ichandle)
| gpl-3.0 |
florentx/OpenUpgrade | openerp/modules/registry.py | 37 | 17291 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Models registries.
"""
from collections import Mapping
from contextlib import contextmanager
import logging
import threading
import openerp.sql_db
import openerp.osv.orm
import openerp.tools
import openerp.modules.db
import openerp.tools.config
from openerp.tools import assertion_report
_logger = logging.getLogger(__name__)
class Registry(Mapping):
""" Model registry for a particular database.
The registry is essentially a mapping between model names and model
instances. There is one registry instance per database.
"""
def __init__(self, db_name):
super(Registry, self).__init__()
self.models = {} # model name/model instance mapping
self._sql_error = {}
self._store_function = {}
self._init = True
self._init_parent = {}
self._assertion_report = assertion_report.assertion_report()
self.fields_by_model = None
# modules fully loaded (maintained during init phase by `loading` module)
self._init_modules = set()
self.db_name = db_name
self._db = openerp.sql_db.db_connect(db_name)
# special cursor for test mode; None means "normal" mode
self.test_cr = None
# Indicates that the registry is
self.ready = False
# Inter-process signaling (used only when openerp.multi_process is True):
# The `base_registry_signaling` sequence indicates the whole registry
# must be reloaded.
# The `base_cache_signaling sequence` indicates all caches must be
# invalidated (i.e. cleared).
self.base_registry_signaling_sequence = None
self.base_cache_signaling_sequence = None
# Flag indicating if at least one model cache has been cleared.
# Useful only in a multi-process context.
self._any_cache_cleared = False
cr = self.cursor()
has_unaccent = openerp.modules.db.has_unaccent(cr)
if openerp.tools.config['unaccent'] and not has_unaccent:
_logger.warning("The option --unaccent was given but no unaccent() function was found in database.")
self.has_unaccent = openerp.tools.config['unaccent'] and has_unaccent
cr.close()
#
# Mapping abstract methods implementation
# => mixin provides methods keys, items, values, get, __eq__, and __ne__
#
def __len__(self):
""" Return the size of the registry. """
return len(self.models)
def __iter__(self):
""" Return an iterator over all model names. """
return iter(self.models)
def __contains__(self, model_name):
""" Test whether the model with the given name exists. """
return model_name in self.models
def __getitem__(self, model_name):
""" Return the model with the given name or raise KeyError if it doesn't exist."""
return self.models[model_name]
def __call__(self, model_name):
""" Same as ``self[model_name]``. """
return self.models[model_name]
def do_parent_store(self, cr):
for o in self._init_parent:
self.get(o)._parent_store_compute(cr)
self._init = False
def obj_list(self):
""" Return the list of model names in this registry."""
return self.keys()
def add(self, model_name, model):
""" Add or replace a model in the registry."""
self.models[model_name] = model
def load(self, cr, module):
""" Load a given module in the registry.
At the Python level, the modules are already loaded, but not yet on a
per-registry level. This method populates a registry with the given
modules, i.e. it instanciates all the classes of a the given module
and registers them in the registry.
"""
models_to_load = [] # need to preserve loading order
# Instantiate registered classes (via the MetaModel automatic discovery
# or via explicit constructor call), and add them to the pool.
for cls in openerp.osv.orm.MetaModel.module_to_models.get(module.name, []):
# models register themselves in self.models
model = cls.create_instance(self, cr)
if model._name not in models_to_load:
# avoid double-loading models whose declaration is split
models_to_load.append(model._name)
return [self.models[m] for m in models_to_load]
def clear_caches(self):
""" Clear the caches
This clears the caches associated to methods decorated with
``tools.ormcache`` or ``tools.ormcache_multi`` for all the models.
"""
for model in self.models.itervalues():
model.clear_caches()
# Special case for ir_ui_menu which does not use openerp.tools.ormcache.
ir_ui_menu = self.models.get('ir.ui.menu')
if ir_ui_menu:
ir_ui_menu.clear_cache()
# Useful only in a multi-process context.
def reset_any_cache_cleared(self):
self._any_cache_cleared = False
# Useful only in a multi-process context.
def any_cache_cleared(self):
return self._any_cache_cleared
@classmethod
def setup_multi_process_signaling(cls, cr):
if not openerp.multi_process:
return None, None
# Inter-process signaling:
# The `base_registry_signaling` sequence indicates the whole registry
# must be reloaded.
# The `base_cache_signaling sequence` indicates all caches must be
# invalidated (i.e. cleared).
cr.execute("""SELECT sequence_name FROM information_schema.sequences WHERE sequence_name='base_registry_signaling'""")
if not cr.fetchall():
cr.execute("""CREATE SEQUENCE base_registry_signaling INCREMENT BY 1 START WITH 1""")
cr.execute("""SELECT nextval('base_registry_signaling')""")
cr.execute("""CREATE SEQUENCE base_cache_signaling INCREMENT BY 1 START WITH 1""")
cr.execute("""SELECT nextval('base_cache_signaling')""")
cr.execute("""
SELECT base_registry_signaling.last_value,
base_cache_signaling.last_value
FROM base_registry_signaling, base_cache_signaling""")
r, c = cr.fetchone()
_logger.debug("Multiprocess load registry signaling: [Registry: # %s] "\
"[Cache: # %s]",
r, c)
return r, c
def enter_test_mode(self):
""" Enter the 'test' mode, where one cursor serves several requests. """
assert self.test_cr is None
self.test_cr = self._db.test_cursor()
RegistryManager.enter_test_mode()
def leave_test_mode(self):
""" Leave the test mode. """
assert self.test_cr is not None
self.test_cr.force_close()
self.test_cr = None
RegistryManager.leave_test_mode()
def cursor(self):
""" Return a new cursor for the database. The cursor itself may be used
as a context manager to commit/rollback and close automatically.
"""
cr = self.test_cr
if cr is not None:
# While in test mode, we use one special cursor across requests. The
# test cursor uses a reentrant lock to serialize accesses. The lock
# is granted here by cursor(), and automatically released by the
# cursor itself in its method close().
cr.acquire()
return cr
return self._db.cursor()
class DummyRLock(object):
""" Dummy reentrant lock, to be used while running rpc and js tests """
def acquire(self):
pass
def release(self):
pass
def __enter__(self):
self.acquire()
def __exit__(self, type, value, traceback):
self.release()
class RegistryManager(object):
""" Model registries manager.
The manager is responsible for creation and deletion of model
registries (essentially database connection/model registry pairs).
"""
# Mapping between db name and model registry.
# Accessed through the methods below.
registries = {}
_lock = threading.RLock()
_saved_lock = None
@classmethod
def lock(cls):
""" Return the current registry lock. """
return cls._lock
@classmethod
def enter_test_mode(cls):
""" Enter the 'test' mode, where the registry is no longer locked. """
assert cls._saved_lock is None
cls._lock, cls._saved_lock = DummyRLock(), cls._lock
@classmethod
def leave_test_mode(cls):
""" Leave the 'test' mode. """
assert cls._saved_lock is not None
cls._lock, cls._saved_lock = cls._saved_lock, None
@classmethod
def get(cls, db_name, force_demo=False, status=None, update_module=False):
""" Return a registry for a given database name."""
with cls.lock():
try:
return cls.registries[db_name]
except KeyError:
return cls.new(db_name, force_demo, status,
update_module)
finally:
# set db tracker - cleaned up at the WSGI
# dispatching phase in openerp.service.wsgi_server.application
threading.current_thread().dbname = db_name
@classmethod
def new(cls, db_name, force_demo=False, status=None,
update_module=False):
""" Create and return a new registry for a given database name.
The (possibly) previous registry for that database name is discarded.
"""
import openerp.modules
with cls.lock():
registry = Registry(db_name)
# Initializing a registry will call general code which will in turn
# call registries.get (this object) to obtain the registry being
# initialized. Make it available in the registries dictionary then
# remove it if an exception is raised.
cls.delete(db_name)
cls.registries[db_name] = registry
try:
with registry.cursor() as cr:
seq_registry, seq_cache = Registry.setup_multi_process_signaling(cr)
registry.base_registry_signaling_sequence = seq_registry
registry.base_cache_signaling_sequence = seq_cache
# This should be a method on Registry
openerp.modules.load_modules(registry._db, force_demo, status, update_module)
except Exception:
del cls.registries[db_name]
raise
# load_modules() above can replace the registry by calling
# indirectly new() again (when modules have to be uninstalled).
# Yeah, crazy.
registry = cls.registries[db_name]
cr = registry.cursor()
try:
registry.do_parent_store(cr)
cr.commit()
finally:
cr.close()
registry.ready = True
if update_module:
# only in case of update, otherwise we'll have an infinite reload loop!
cls.signal_registry_change(db_name)
return registry
@classmethod
def delete(cls, db_name):
"""Delete the registry linked to a given database. """
with cls.lock():
if db_name in cls.registries:
cls.registries[db_name].clear_caches()
del cls.registries[db_name]
@classmethod
def delete_all(cls):
"""Delete all the registries. """
with cls.lock():
for db_name in cls.registries.keys():
cls.delete(db_name)
@classmethod
def clear_caches(cls, db_name):
"""Clear caches
This clears the caches associated to methods decorated with
``tools.ormcache`` or ``tools.ormcache_multi`` for all the models
of the given database name.
This method is given to spare you a ``RegistryManager.get(db_name)``
that would loads the given database if it was not already loaded.
"""
with cls.lock():
if db_name in cls.registries:
cls.registries[db_name].clear_caches()
@classmethod
def check_registry_signaling(cls, db_name):
"""
Check if the modules have changed and performs all necessary operations to update
the registry of the corresponding database.
:returns: True if changes has been detected in the database and False otherwise.
"""
changed = False
if openerp.multi_process and db_name in cls.registries:
registry = cls.get(db_name)
cr = registry.cursor()
try:
cr.execute("""
SELECT base_registry_signaling.last_value,
base_cache_signaling.last_value
FROM base_registry_signaling, base_cache_signaling""")
r, c = cr.fetchone()
_logger.debug("Multiprocess signaling check: [Registry - old# %s new# %s] "\
"[Cache - old# %s new# %s]",
registry.base_registry_signaling_sequence, r,
registry.base_cache_signaling_sequence, c)
# Check if the model registry must be reloaded (e.g. after the
# database has been updated by another process).
if registry.base_registry_signaling_sequence is not None and registry.base_registry_signaling_sequence != r:
changed = True
_logger.info("Reloading the model registry after database signaling.")
registry = cls.new(db_name)
# Check if the model caches must be invalidated (e.g. after a write
# occured on another process). Don't clear right after a registry
# has been reload.
elif registry.base_cache_signaling_sequence is not None and registry.base_cache_signaling_sequence != c:
changed = True
_logger.info("Invalidating all model caches after database signaling.")
registry.clear_caches()
registry.reset_any_cache_cleared()
# One possible reason caches have been invalidated is the
# use of decimal_precision.write(), in which case we need
# to refresh fields.float columns.
for model in registry.models.values():
for column in model._columns.values():
if hasattr(column, 'digits_change'):
column.digits_change(cr)
registry.base_registry_signaling_sequence = r
registry.base_cache_signaling_sequence = c
finally:
cr.close()
return changed
@classmethod
def signal_caches_change(cls, db_name):
if openerp.multi_process and db_name in cls.registries:
# Check the registries if any cache has been cleared and signal it
# through the database to other processes.
registry = cls.get(db_name)
if registry.any_cache_cleared():
_logger.info("At least one model cache has been cleared, signaling through the database.")
cr = registry.cursor()
r = 1
try:
cr.execute("select nextval('base_cache_signaling')")
r = cr.fetchone()[0]
finally:
cr.close()
registry.base_cache_signaling_sequence = r
registry.reset_any_cache_cleared()
@classmethod
def signal_registry_change(cls, db_name):
if openerp.multi_process and db_name in cls.registries:
_logger.info("Registry changed, signaling through the database")
registry = cls.get(db_name)
cr = registry.cursor()
r = 1
try:
cr.execute("select nextval('base_registry_signaling')")
r = cr.fetchone()[0]
finally:
cr.close()
registry.base_registry_signaling_sequence = r
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
iut-ibk/P8-WSC-GUI | 3dparty/Editra/src/extern/dexml/_util.py | 1 | 4954 | import copy
class Error(Exception):
"""Base exception class for the dexml module."""
pass
class ParseError(Error):
"""Exception raised when XML could not be parsed into objects."""
pass
class RenderError(Error):
"""Exception raised when object could not be rendered into XML."""
pass
class XmlError(Error):
"""Exception raised to encapsulate errors from underlying XML parser."""
pass
class PARSE_DONE:
"""Constant returned by a Field when it has finished parsing."""
pass
class PARSE_MORE:
"""Constant returned by a Field when it wants additional nodes to parse."""
pass
class PARSE_SKIP:
"""Constant returned by a Field when it cannot parse the given node."""
pass
class PARSE_CHILDREN:
"""Constant returned by a Field to parse children from its container tag."""
pass
class Meta:
"""Class holding meta-information about a dexml.Model subclass.
Each dexml.Model subclass has an attribute 'meta' which is an instance
of this class. That instance holds information about how to model
corresponds to XML, such as its tagname, namespace, and error handling
semantics. You would not ordinarily create an instance of this class;
instead let the ModelMetaclass create one automatically.
These attributes control how the model corresponds to the XML:
* tagname: the name of the tag representing this model
* namespace: the XML namespace in which this model lives
These attributes control parsing/rendering behaviour:
* namespace_prefix: the prefix to use for rendering namespaced tags
* ignore_unknown_elements: ignore unknown elements when parsing
* case_sensitive: match tag/attr names case-sensitively
* order_sensitive: match child tags in order of field definition
"""
_defaults = {"tagname":None,
"namespace":None,
"namespace_prefix":None,
"ignore_unknown_elements":True,
"case_sensitive":True,
"order_sensitive":True}
def __init__(self,name,meta_attrs):
for (attr,default) in self._defaults.items():
setattr(self,attr,meta_attrs.get(attr,default))
if self.tagname is None:
self.tagname = name
def _meta_attributes(meta):
"""Extract attributes from a "meta" object."""
meta_attrs = {}
if meta:
for attr in dir(meta):
if not attr.startswith("_"):
meta_attrs[attr] = getattr(meta,attr)
return meta_attrs
class ModelMetaclass(type):
"""Metaclass for dexml.Model and subclasses.
This metaclass is responsible for introspecting Model class definitions
and setting up appropriate default behaviours. For example, this metaclass
sets a Model's default tagname to be equal to the declared class name.
"""
instances = {}
def __new__(mcls,name,bases,attrs):
cls = super(ModelMetaclass,mcls).__new__(mcls,name,bases,attrs)
# Don't do anything if it's not a subclass of Model
parents = [b for b in bases if isinstance(b, ModelMetaclass)]
# HACK
import fields
if not parents:
return cls
# Set up the cls.meta object, inheriting from base classes
meta_attrs = {}
for base in bases:
if isinstance(base,ModelMetaclass) and hasattr(base,"meta"):
meta_attrs.update(_meta_attributes(base.meta))
meta_attrs.pop("tagname",None)
meta_attrs.update(_meta_attributes(attrs.get("meta",None)))
cls.meta = Meta(name,meta_attrs)
# Create ordered list of field objects, telling each about their
# name and containing class. Inherit fields from base classes
# only if not overridden on the class itself.
base_fields = {}
for base in bases:
if not isinstance(base,ModelMetaclass):
continue
for field in base._fields:
if field.field_name not in base_fields:
field = copy.copy(field)
field.model_class = cls
base_fields[field.field_name] = field
cls_fields = []
for (name,value) in attrs.iteritems():
if isinstance(value,fields.Field):
base_fields.pop(name,None)
value.field_name = name
value.model_class = cls
cls_fields.append(value)
cls._fields = base_fields.values() + cls_fields
cls._fields.sort(key=lambda f: f._order_counter)
# Register the new class so we can find it by name later on
mcls.instances[(cls.meta.namespace,cls.meta.tagname)] = cls
return cls
@classmethod
def find_class(mcls,tagname,namespace=None):
"""Find dexml.Model subclass for the given tagname and namespace."""
return mcls.instances.get((namespace,tagname))
| gpl-2.0 |
jimi-c/ansible | lib/ansible/modules/network/avi/avi_sslkeyandcertificate.py | 20 | 5892 | #!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_sslkeyandcertificate
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of SSLKeyAndCertificate Avi RESTful Object
description:
- This module is used to configure SSLKeyAndCertificate object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
ca_certs:
description:
- Ca certificates in certificate chain.
certificate:
description:
- Sslcertificate settings for sslkeyandcertificate.
required: true
certificate_management_profile_ref:
description:
- It is a reference to an object of type certificatemanagementprofile.
created_by:
description:
- Creator name.
dynamic_params:
description:
- Dynamic parameters needed for certificate management profile.
enckey_base64:
description:
- Encrypted private key corresponding to the private key (e.g.
- Those generated by an hsm such as thales nshield).
enckey_name:
description:
- Name of the encrypted private key (e.g.
- Those generated by an hsm such as thales nshield).
hardwaresecuritymodulegroup_ref:
description:
- It is a reference to an object of type hardwaresecuritymodulegroup.
key:
description:
- Private key.
key_params:
description:
- Sslkeyparams settings for sslkeyandcertificate.
name:
description:
- Name of the object.
required: true
status:
description:
- Enum options - ssl_certificate_finished, ssl_certificate_pending.
- Default value when not specified in API or module is interpreted by Avi Controller as SSL_CERTIFICATE_FINISHED.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Enum options - ssl_certificate_type_virtualservice, ssl_certificate_type_system, ssl_certificate_type_ca.
- Default value when not specified in API or module is interpreted by Avi Controller as SSL_CERTIFICATE_TYPE_VIRTUALSERVICE.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a SSL Key and Certificate
avi_sslkeyandcertificate:
controller: 10.10.27.90
username: admin
password: AviNetworks123!
key: |
-----BEGIN PRIVATE KEY-----
....
-----END PRIVATE KEY-----
certificate:
self_signed: true
certificate: |
-----BEGIN CERTIFICATE-----
....
-----END CERTIFICATE-----
type: SSL_CERTIFICATE_TYPE_VIRTUALSERVICE
name: MyTestCert
"""
RETURN = '''
obj:
description: SSLKeyAndCertificate (api/sslkeyandcertificate) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
ca_certs=dict(type='list',),
certificate=dict(type='dict', required=True),
certificate_management_profile_ref=dict(type='str',),
created_by=dict(type='str',),
dynamic_params=dict(type='list',),
enckey_base64=dict(type='str',),
enckey_name=dict(type='str',),
hardwaresecuritymodulegroup_ref=dict(type='str',),
key=dict(type='str', no_log=True,),
key_params=dict(type='dict',),
name=dict(type='str', required=True),
status=dict(type='str',),
tenant_ref=dict(type='str',),
type=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'sslkeyandcertificate',
set(['key']))
if __name__ == '__main__':
main()
| gpl-3.0 |
jameslovejoy/apportionment | scripts/apportion.py | 1 | 1096 | import math
class Apportion:
populations = {}
seats = {}
def __init__(self):
f = open('../data/2010.csv', 'r')
for line in f:
state, pop = [s.strip() for s in line.split(',')]
self.seats[state] = 1
self.populations[state] = int(pop.strip())
@classmethod
def find_highest_priority(cls):
highest = 0
highest_state = None
for state in cls.populations:
n = cls.seats[state]
priority = cls.populations[state] / math.sqrt(n*(n+1))
if priority > highest:
highest = priority
highest_state = state
return highest_state
@classmethod
def run(cls):
# 435 seats: Every state gets 1 to start, leaving 385 left to apportion.
for n in range(385):
state = cls.find_highest_priority()
cls.seats[state] += 1
seat_number = 51 + n
print "Assigning Seat {} to {}".format(seat_number, state)
print "Just missed the cut..."
state = cls.find_highest_priority()
print "Seat 436 would be assigned to {}".format(state)
for state in sorted(cls.seats):
print("{}\t{}").format(state.rjust(20), str(cls.seats[state]).rjust(3))
Apportion().run() | mit |
pasmod/simurg | simurg/scrapper/template.py | 1 | 4965 | from selector_finder import find_selector
from dragnet import content_extractor
from collections import OrderedDict
from unidecode import unidecode
from bs4 import BeautifulSoup
from simurg.clients.fetcher import fetch
from simurg.util import is_valid
import logging
import os.path
import time
import re
def clean_soup(soup):
"""Removes some elements that may negatively affect the
quality of headline extraction
# Arguments
soup: parsed html document
"""
exclude_tags = ['style', 'script', '[document]', 'head', 'title']
[s.extract() for s in soup(exclude_tags)]
def find_headline_element(soup, headline):
"""Finds the headline element on a page based on a headline hint.
# Argument
soup: parsed html page
headline: headline hint to be used
# Returns
el: headline element (None if not found)
"""
clean_soup(soup)
# headline sometimes contains "..." at the end. We eliminate it.
headline = headline[:-4]
if ':' in headline:
headline = headline.split(':')[1]
elems = soup(text=re.compile(re.escape(headline)))
d = {}
for el in elems:
d[el.parent] = el.parent.text.strip()
headline_elems = sorted(d, key=lambda k: len(d[k]))
if len(headline_elems) > 0:
return headline_elems
logging.debug('Headline "{}" not found'.format(unidecode(headline)))
return None
def append_html(news, redis_client):
"""Appends an html field to the news, only if the wayback_url is valid and
the url does not already exist in the database.
# Arguments
news: news object as dictionary
# Returns
news: news object with or without html field
"""
if is_valid(news, field='wayback_url'):
fetch_url = news['wayback_url']
else:
fetch_url = news['url']
if not redis_client.exists(news['url']):
news['html'] = fetch(fetch_url)
return news
logging.info('Skipping duplicate url: {}'.format(news['url']))
return news
def append_headline_selector(news):
"""Appends the headline css selector field to the news, only if the html
field exists and is valid.
# Arguments
news: news object as dictionary
# Returns
news: news object with or without headline css selector field
"""
if is_valid(news, field='html'):
soup = BeautifulSoup(news['html'], 'html.parser')
headline_elems = find_headline_element(soup, news['headline'])
if headline_elems:
news['headline_selector'] = find_selector(soup, headline_elems)
return news
logging.debug('Headline css selector could not be found!')
else:
logging.debug('Fetching html page failed. url={}'.
format(news['url']))
return news
def get_base_url(lang='de'):
"""Return the google news url for a specific language
# Arguments
lang: required language for google news
# Returns
url: corresponding google news url for the given language
"""
if lang == 'de':
return 'http://news.google.com/news?ned=de'
if lang == 'en':
return 'http://news.google.com/news?ned=us'
if lang == 'fr':
return 'https://news.google.com/news?ned=fr'
if lang == 'it':
return 'https://news.google.com/news?ned=it'
else:
raise ValueError('unsupported language {}'.format(lang))
def populate(redis_client):
"""Populates the entries in the database with fields such as headline,
body, html and url
# Arguments
lang: language of the database
# Returns
news: news objects populated with required fields
"""
keys = redis_client.keys()
folder = 'docs/{}/'.format(redis_client.lang)
for key in keys:
value = redis_client.get(key)
f = folder + value['id'] + '.json'
if os.path.isfile(f):
logging.info('Skipping existing document: {}'.format(f))
continue
if value['wayback_url'] == 'None':
html = fetch(value['url'])
else:
html = fetch(value['wayback_url'])
time.sleep(1)
if html:
soup = BeautifulSoup(html, 'html.parser')
else:
continue
headline_elems = soup.select(value['headline_selector'], None)
if len(headline_elems) > 0:
headline = headline_elems[0].text.strip()
else:
logging.debug('Headline can not be refound: url={}, selector={}'
.format(value['url'], value['headline_selector']))
continue
news = OrderedDict()
news['id'] = value['id']
news['timestamp'] = value['timestamp']
news['lang'] = redis_client.lang
news['url'] = value['url']
news['wayback_url'] = value['wayback_url']
news['headline'] = headline.strip()
news['body'] = content_extractor.analyze(html).strip()
yield news
| mit |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/setuptools/command/test.py | 33 | 8865 | import os
import operator
import sys
import contextlib
import itertools
from distutils.errors import DistutilsError, DistutilsOptionError
from distutils import log
from unittest import TestLoader
from setuptools.extern import six
from setuptools.extern.six.moves import map, filter
from pkg_resources import (resource_listdir, resource_exists, normalize_path,
working_set, _namespace_packages,
add_activation_listener, require, EntryPoint)
from setuptools import Command
from setuptools.py31compat import unittest_main
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module, pattern=None):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(module.__name__, file + '/__init__.py'):
submodule = module.__name__ + '.' + file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests) != 1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
# adapted from jaraco.classes.properties:NonDataProperty
class NonDataProperty(object):
def __init__(self, fget):
self.fget = fget
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self.fget(obj)
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=', 'm', "Run 'test_suite' in specified module"),
('test-suite=', 's',
"Run single test, case or suite (e.g. 'module.test_suite')"),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
self.test_runner = None
def finalize_options(self):
if self.test_suite and self.test_module:
msg = "You may specify a module or a suite, but not both"
raise DistutilsOptionError(msg)
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module + ".test_suite"
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
@NonDataProperty
def test_args(self):
return list(self._test_args())
def _test_args(self):
if self.verbose:
yield '--verbose'
if self.test_suite:
yield self.test_suite
def with_project_on_sys_path(self, func):
"""
Backward compatibility for project_on_sys_path context.
"""
with self.project_on_sys_path():
func()
@contextlib.contextmanager
def project_on_sys_path(self, include_dists=[]):
with_2to3 = six.PY3 and getattr(self.distribution, 'use_2to3', False)
if with_2to3:
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
project_path = normalize_path(ei_cmd.egg_base)
sys.path.insert(0, project_path)
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
with self.paths_on_pythonpath([project_path]):
yield
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
@staticmethod
@contextlib.contextmanager
def paths_on_pythonpath(paths):
"""
Add the indicated paths to the head of the PYTHONPATH environment
variable so that subprocesses will also see the packages at
these paths.
Do this in a context that restores the value on exit.
"""
nothing = object()
orig_pythonpath = os.environ.get('PYTHONPATH', nothing)
current_pythonpath = os.environ.get('PYTHONPATH', '')
try:
prefix = os.pathsep.join(paths)
to_join = filter(None, [prefix, current_pythonpath])
new_path = os.pathsep.join(to_join)
if new_path:
os.environ['PYTHONPATH'] = new_path
yield
finally:
if orig_pythonpath is nothing:
os.environ.pop('PYTHONPATH', None)
else:
os.environ['PYTHONPATH'] = orig_pythonpath
@staticmethod
def install_dists(dist):
"""
Install the requirements indicated by self.distribution and
return an iterable of the dists that were built.
"""
ir_d = dist.fetch_build_eggs(dist.install_requires or [])
tr_d = dist.fetch_build_eggs(dist.tests_require or [])
return itertools.chain(ir_d, tr_d)
def run(self):
installed_dists = self.install_dists(self.distribution)
cmd = ' '.join(self._argv)
if self.dry_run:
self.announce('skipping "%s" (dry run)' % cmd)
return
self.announce('running "%s"' % cmd)
paths = map(operator.attrgetter('location'), installed_dists)
with self.paths_on_pythonpath(paths):
with self.project_on_sys_path():
self.run_tests()
def run_tests(self):
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if six.PY3 and getattr(self.distribution, 'use_2to3', False):
module = self.test_suite.split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
exit_kwarg = {} if sys.version_info < (2, 7) else {"exit": False}
test = unittest_main(
None, None, self._argv,
testLoader=self._resolve_as_ep(self.test_loader),
testRunner=self._resolve_as_ep(self.test_runner),
**exit_kwarg
)
if not test.result.wasSuccessful():
msg = 'Test failed: %s' % test.result
self.announce(msg, log.ERROR)
raise DistutilsError(msg)
@property
def _argv(self):
return ['unittest'] + self.test_args
@staticmethod
def _resolve_as_ep(val):
"""
Load the indicated attribute value, called, as a as if it were
specified as an entry point.
"""
if val is None:
return
parsed = EntryPoint.parse("x=" + val)
return parsed.resolve()()
| gpl-3.0 |
misterdanb/midi.py | midi.py | 1 | 21677 | import sys
import os
import struct
import array
from enum import Enum
def bytes_to_uint16(byte_list):
return struct.unpack('>H', byte_list[:4])[0]
def uint16_to_bytes(value):
return struct.pack('>H', value)
def bytes_to_uint24(byte_list):
return struct.unpack('>I', b'\x00' + byte_list[:3])[0]
def uint24_to_bytes(value):
return struct.pack('>I', value)[1:4]
def bytes_to_uint32(byte_list):
return struct.unpack('>I', byte_list[:4])[0]
def uint32_to_bytes(value):
return struct.pack('>I', value)
def bytes_to_str(byte_list):
return byte_list.decode('utf-8')
def str_to_bytes(value):
return value.encode('utf-8')
def enum_values(enum):
return list(map(lambda x: x.value, enum))
def enum_names(enum):
return list(map(lambda x: x.name, enum))
def decode_variable_length_value(byte_list):
value = 0
tmp_pos = 0
while byte_list[tmp_pos] & 0b10000000 != 0:
value_part = byte_list[tmp_pos] & 0b01111111
value |= value_part
value <<= 7
tmp_pos += 1
value_part = byte_list[tmp_pos] & 0b01111111
value |= value_part
tmp_pos += 1
return(value, tmp_pos)
def encode_variable_length_value(value):
bytes_repr = bytearray()
bytes_repr.insert(0, value & 0b01111111)
value >>= 7
while value & 0b01111111 != 0:
bytes_repr.insert(0, (value & 0b01111111) | 0b10000000)
value >>= 7
return(bytes(bytes_repr))
class MidiException(Exception):
pass
class MidiFile():
def __init__(self, path):
self.path = path
self.chunks = []
try:
with open(path, 'rb') as midi_file:
midi_data = midi_file.read()
file_pos = 0
while file_pos < len(midi_data):
new_chunk = Chunk(midi_data[file_pos:])
self.chunks.append(new_chunk)
file_pos += 8 + new_chunk.length
except:
raise(MidiException('Could not open midi file'))
def __iter__(self):
for chunk in self.chunks:
yield(chunk)
def __repr__(self):
return('<File: ' + self.path + '>')
def export(self, path='out.mid'):
with open(path, 'wb') as midi_file:
for chunk in self.chunks:
midi_file.write(chunk.to_bytes())
class ChunkType(Enum):
m_thd = 'MThd'
m_trk = 'MTrk'
class Chunk():
def __init__(self, byte_list):
self.chunk_type = ChunkType(bytes_to_str(byte_list[:4]))
self.length = bytes_to_uint32(byte_list[4:8])
if self.chunk_type == ChunkType.m_thd:
if self.length == 6:
self.file_format = bytes_to_uint16(byte_list[8:10])
self.tracks_count = bytes_to_uint16(byte_list[10:12])
self.division = bytes_to_uint16(byte_list[12:14])
else:
raise(MidiException('Invalid MThd chunk'))
elif self.chunk_type == ChunkType.m_trk:
self.mtrk_events = []
tmp_pos = 8
while tmp_pos < 8 + self.length:
new_mtrk_event = MTrkEvent(byte_list[tmp_pos:])
self.mtrk_events.append(new_mtrk_event)
tmp_pos += new_mtrk_event.length
def __iter__(self):
if self.chunk_type == ChunkType.m_thd:
yield(None)
else:
for mtrk_event in self.mtrk_events:
yield(mtrk_event)
def __repr__(self):
if self.chunk_type == ChunkType.m_thd:
return('<Chunk Type: ' + self.chunk_type.name + ', ' +
'Length: ' + str(self.length) + ', ' +
'File format: ' + str(self.file_format) + ', ' +
'Tracks count: ' + str(self.tracks_count) + ', ' +
'Division: ' + str(self.division) + '>')
elif self.chunk_type == ChunkType.m_trk:
return('<Chunk Type: ' + self.chunk_type.name + '. ' +
'Length: ' + str(self.length) + '>')
def to_bytes(self):
bytes_repr = bytearray()
bytes_repr += str_to_bytes(self.chunk_type.value);
bytes_repr += uint32_to_bytes(self.length);
if self.chunk_type == ChunkType.m_thd:
bytes_repr += uint16_to_bytes(self.file_format)
bytes_repr += uint16_to_bytes(self.tracks_count)
bytes_repr += uint16_to_bytes(self.division)
elif self.chunk_type == ChunkType.m_trk:
for mtrk_event in self.mtrk_events:
bytes_repr += mtrk_event.to_bytes()
return(bytes(bytes_repr))
class MTrkEvent():
def __init__(self, byte_list):
self.delta_time, self.length = decode_variable_length_value(byte_list)
tmp_pos = self.length
event_code = byte_list[tmp_pos]
if (event_code & 0b11110000) in enum_values(MidiEventType):
self.event = MidiEvent(byte_list[tmp_pos:])
elif event_code in enum_values(SystemEventType):
self.event = SystemEvent(byte_list[tmp_pos:])
elif event_code == 0b11111111:
self.event = MetaEvent(byte_list[tmp_pos:])
else:
raise(MidiException('No such event'))
self.length += self.event.length
def __repr__(self):
return('<Delta time: ' + str(self.delta_time) + ', ' +
'Event: ' + self.event.__class__.__name__ + '>')
def to_bytes(self):
bytes_repr = bytearray()
bytes_repr += encode_variable_length_value(self.delta_time)
bytes_repr += self.event.to_bytes()
return(bytes(bytes_repr))
class MidiEventType(Enum):
note_off = 0b10000000
note_on = 0b10010000
note_pressure = 0b10100000
control_change = 0b10110000
program_change = 0b11000000
channel_pressure = 0b11010000
pitch_change = 0b11100000
class MidiEvent():
def __init__(self, byte_list):
try:
self.event_type = MidiEventType(byte_list[0] & 0b11110000)
self.channel_number = byte_list[0] & 0b00001111
if self.event_type == MidiEventType.note_off or \
self.event_type == MidiEventType.note_on:
self.note = byte_list[1]
self.velocity = byte_list[2]
self.length = 3
elif self.event_type == MidiEventType.note_pressure:
self.note = byte_list[1]
self.pressure = byte_list[2]
self.length = 3
elif self.event_type == MidiEventType.control_change:
self.control_number = byte_list[1]
self.new_value = byte_list[2]
self.length = 3
elif self.event_type == MidiEventType.program_change:
self.program_number = byte_list[1]
self.length = 2
elif self.event_type == MidiEventType.channel_pressure:
self.channel_pressure = byte_list[1]
self.length = 2
elif self.event_type == MidiEventType.pitch_change:
self.bottom = byte_list[1]
self.next_value = byte_list[2]
self.length = 3
except ValueError:
raise(MidiException('No such midi event type'))
def __repr__(self):
if self.event_type == MidiEventType.note_off or \
self.event_type == MidiEventType.note_on:
return('<Midi event type: ' + self.event_type.name + ', ' +
'Channel number: ' + str(self.channel_number) + ', ' +
'Note number: ' + str(self.note) + ', ' +
'Velocity: ' + str(self.velocity) + '>')
elif self.event_type == MidiEventType.note_pressure:
return('<Midi event type: ' + self.event_type.name + ', ' +
'Channel number: ' + str(self.channel_number) + ', ' +
'Note number: ' + str(self.note) + ', ' +
'Pressure: ' + str(self.pressure) + '>')
elif self.event_type == MidiEventType.control_change:
return('<Midi event type: ' + self.event_type.name + '. ' +
'Channel number: ' + str(self.channel_number) + ', ' +
'Controller number: ' + str(self.control_number) + ', ' +
'New Value: ' + str(self.new_value) + '>')
elif self.event_type == MidiEventType.program_change:
return('<Midi event type: ' + self.event_type.name + ', ' +
'Channel number: ' + str(self.channel_number) + ', ' +
'New program number: ' + str(self.program_number) + '>')
elif self.event_type == MidiEventType.channel_pressure:
return('<Midi event type: ' + self.event_type.name + ', ' +
'Channel number: ' + str(self.channel_number) + ', ' +
'Pressure: ' + str(self.channel_pressure) + '>')
elif self.event_type == MidiEventType.pitch_change:
return('<Midi event type: ' + self.event_type.name + ', ' +
'Channel: ' + str(self.channel_number) + ', ' +
'Bottom: ' + str(self.bottom) + ', ' +
'Next Value: ' + str(self.next_value) + '>')
def to_bytes(self):
bytes_repr = bytearray()
bytes_repr.append(self.event_type.value | self.channel_number)
if self.event_type == MidiEventType.note_off or \
self.event_type == MidiEventType.note_on:
bytes_repr.append(self.note)
bytes_repr.append(self.velocity)
elif self.event_type == MidiEventType.note_pressure:
bytes_repr.append(self.note)
bytes_repr.append(self.pressure)
elif self.event_type == MidiEventType.control_change:
bytes_repr.append(self.control_number)
bytes_repr.append(self.new_value)
elif self.event_type == MidiEventType.program_change:
bytes_repr.append(self.program_number)
elif self.event_type == MidiEventType.channel_pressure:
bytes_repr.append(self.channel_pressure)
elif self.event_type == MidiEventType.pitch_change:
bytes_repr.append(self.bottom)
bytes_repr.append(self.next_value)
return(bytes(bytes_repr))
class SystemEventType(Enum):
exclusive = 0b11110000
common_song_position = 0b11110010
common_song_select = 0b11110011
common_tune_request = 0b11110110
common = 0b11110111
real_time_timing_clock = 0b11111000
real_time_start = 0b11111010
real_time_continue = 0b11111011
real_time_stop = 0b11111100
real_time_active_sensing = 0b11111110
class SystemEvent():
def __init__(self, byte_list):
try:
self.event_type = SystemEventType(byte_list[0])
if self.event_type == SystemEventType.exclusive or \
self.event_type == SystemEventType.common:
self.length = 2
tmp_pos = 1
while byte_list[tmp_pos] != SystemEventType.common.value:
tmp_pos += 1
self.length += 1
self.payload = byte_list[1:self.length - 1]
elif self.event_type == SystemEventType.common_song_position:
self.length = 3
elif self.event_type == SystemEventType.common_song_select:
self.length = 2
elif self.event_type == SystemEventType.common_tune_request:
self.length = 1
elif self.event_type == SystemEventType.real_time_timing_clock:
self.length = 1
elif self.event_type == SystemEventType.real_time_start:
self.length = 1
elif self.event_type == SystemEventType.real_time_continue:
self.length = 1
elif self.event_type == SystemEventType.real_time_stop:
self.length = 1
elif self.event_type == SystemEventType.real_time_active_sensing:
self.length = 1
elif self.event_type == SystemEventType.real_time_reset:
self.length = 1
except ValueError:
raise(MidiException('No such system event type'))
def __repr__(self):
if self.event_type == SystemEventType.exclusive or \
self.event_type == SystemEventType.common:
return('<System event type: ' + self.event_type.name + ', ' +
'Payload: ' + str(self.payload) + '>')
else:
return('<System event type: ' + self.event_type.name + '>')
def to_bytes(self):
bytes_repr = bytearray()
bytes_repr.append(self.event_type.value)
if self.event_type == SystemEventType.exclusive or \
self.event_type == SystemEventType.common:
bytes_repr += self.payload
bytes_repr.append(SystemEventType.common.value)
elif self.event_type == SystemEventType.common_song_position:
# todo
bytes_repr.append(0)
bytes_repr.append(0)
elif self.event_type == SystemEventType.common_song_select:
# todo
bytes_repr.append(0)
elif self.event_type == SystemEventType.common_tune_request:
pass
elif self.event_type == SystemEventType.real_time_timing_clock:
pass
elif self.event_type == SystemEventType.real_time_start:
pass
elif self.event_type == SystemEventType.real_time_continue:
pass
elif self.event_type == SystemEventType.real_time_stop:
pass
elif self.event_type == SystemEventType.real_time_active_sensing:
pass
elif self.event_type == SystemEventType.real_time_reset:
pass
return(bytes(bytes_repr))
class MetaEventType(Enum):
sequence_number = 0b00000000
text = 0b00000001
copyright_notice = 0b00000010
text_sequence_or_track_name = 0b00000011
instrument_name = 0b00000100
lyric = 0b00000101
marker = 0b0000110
cue_point = 0b00000111
channel_prefix = 0b00100000
end_of_track = 0b00101111
tempo = 0b01010001
smpte_offset = 0b01010100
time_signature = 0b01011000
key_signature = 0b01011001
sequencer_specific_payload = 0b01111111
class MetaEvent():
def __init__(self, byte_list):
if byte_list[0] == 0b11111111:
try:
self.event_type = MetaEventType(byte_list[1])
self.payload_length, self.length = decode_variable_length_value(byte_list[2:])
tmp_pos = 2 + self.length
payload = byte_list[tmp_pos:tmp_pos + self.payload_length]
if self.event_type == MetaEventType.sequence_number:
self.sequence_number = bytes_to_uint16(payload)
elif self.event_type == MetaEventType.text:
self.text = bytes_to_str(payload)
elif self.event_type == MetaEventType.copyright_notice:
self.copyright_notice = bytes_to_str(payload)
elif self.event_type == MetaEventType.text_sequence_or_track_name:
self.text_sequence_or_track_name = bytes_to_str(payload)
elif self.event_type == MetaEventType.instrument_name:
self.instrument_name = bytes_to_str(payload)
elif self.event_type == MetaEventType.lyric:
self.lyric = bytes_to_str(payload)
elif self.event_type == MetaEventType.marker:
self.marker = bytes_to_str(payload)
elif self.event_type == MetaEventType.cue_point:
self.cue_point = bytes_to_str(payload)
elif self.event_type == MetaEventType.channel_prefix:
self.channel_prefix = payload[0]
elif self.event_type == MetaEventType.end_of_track:
pass
elif self.event_type == MetaEventType.tempo:
self.tempo = bytes_to_uint24(payload)
elif self.event_type == MetaEventType.smpte_offset:
self.smpte_offset = payload
elif self.event_type == MetaEventType.time_signature:
self.time_signature = payload
elif self.event_type == MetaEventType.key_signature:
self.key_signature = payload
elif self.event_type == MetaEventType.sequencer_specific_payload:
self.sequencer_specific_payload = payload
self.length += 2 + self.payload_length
except:
raise(MidiException('No such meta event'))
else:
raise(MidiException('Not a meta event'))
def __repr__(self):
if self.event_type == MetaEventType.sequence_number:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Sequence number: ' + str(self.sequence_number) + '>')
elif self.event_type == MetaEventType.text:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Text: ' + self.text + '>')
elif self.event_type == MetaEventType.copyright_notice:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Copyright notice: ' + self.copyright_notice + '>')
elif self.event_type == MetaEventType.text_sequence_or_track_name:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Text sequence or track name: ' + self.text_sequence_or_track_name + '>')
elif self.event_type == MetaEventType.instrument_name:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Instrument name: ' + self.instrument_name + '>')
elif self.event_type == MetaEventType.lyric:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Lyric: ' + self.lyric + '>')
elif self.event_type == MetaEventType.marker:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Marker: ' + self.marker + '>')
elif self.event_type == MetaEventType.cue_point:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Cue point: ' + self.cue_point + '>')
elif self.event_type == MetaEventType.channel_prefix:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Channel prefix: ' + str(self.channel_prefix) + '>')
elif self.event_type == MetaEventType.end_of_track:
return('<Meta event type: ' + self.event_type.name + '>')
elif self.event_type == MetaEventType.tempo:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Tempo: ' + str(self.tempo) + '>')
elif self.event_type == MetaEventType.smpte_offset:
return('<Meta event type: ' + self.event_type.name + ', ' +
'SMPTE offset: ' + str(self.smpte_offset) + '>')
elif self.event_type == MetaEventType.time_signature:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Time signature: ' + str(self.time_signature) + '>')
elif self.event_type == MetaEventType.key_signature:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Key signature: ' + str(self.key_signature) + '>')
elif self.event_type == MetaEventType.sequencer_specific_payload:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Sequencer specific payload: ' + str(self.sequencer_specific_payload) + '>')
def to_bytes(self):
bytes_repr = bytearray()
bytes_repr.append(0b11111111)
bytes_repr.append(self.event_type.value)
bytes_repr += encode_variable_length_value(self.payload_length)
if self.event_type == MetaEventType.sequence_number:
bytes_repr += uint16_to_bytes(self.sequence_number)
elif self.event_type == MetaEventType.text:
bytes_repr += str_to_bytes(self.text)
elif self.event_type == MetaEventType.copyright_notice:
bytes_repr += str_to_bytes(self.copyright_noticed)
elif self.event_type == MetaEventType.text_sequence_or_track_name:
bytes_repr += str_to_bytes(self.text_sequence_or_track_name)
elif self.event_type == MetaEventType.instrument_name:
bytes_repr += str_to_bytes(self.instrument_name)
elif self.event_type == MetaEventType.lyric:
bytes_repr += str_to_bytes(self.lyric)
elif self.event_type == MetaEventType.marker:
bytes_repr += str_to_bytes(self.marker)
elif self.event_type == MetaEventType.cue_point:
bytes_repr += str_to_bytes(self.cue_point)
elif self.event_type == MetaEventType.channel_prefix:
# this is not looking too safe
bytes_repr.append(self.channel_prefix)
elif self.event_type == MetaEventType.end_of_track:
pass
elif self.event_type == MetaEventType.tempo:
bytes_repr += uint24_to_bytes(self.tempo)
elif self.event_type == MetaEventType.smpte_offset:
bytes_repr += self.smpte_offset
elif self.event_type == MetaEventType.time_signature:
bytes_repr += self.time_signature
elif self.event_type == MetaEventType.key_signature:
bytes_repr += self.key_signature
elif self.event_type == MetaEventType.sequencer_specific_payload:
bytes_repr += self.sequencer_specific_payload
return(bytes(bytes_repr))
| gpl-3.0 |
braams/shtoom | shtoom/ui/qtui/shtoommainwindow.py | 1 | 7392 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'shtoommainwindow.ui'
#
# Created: Wed Jan 19 15:16:20 2005
# by: The PyQt User Interface Compiler (pyuic) 3.13
#
# WARNING! All changes made in this file will be lost!
from qt import *
class ShtoomMainWindow(QMainWindow):
def __init__(self,parent = None,name = None,fl = 0):
QMainWindow.__init__(self,parent,name,fl)
self.statusBar()
if not name:
self.setName("ShtoomMainWindow")
self.setCentralWidget(QWidget(self,"qt_central_widget"))
ShtoomMainWindowLayout = QVBoxLayout(self.centralWidget(),11,6,"ShtoomMainWindowLayout")
layout4 = QHBoxLayout(None,0,6,"layout4")
self.textLabel1 = QLabel(self.centralWidget(),"textLabel1")
self.textLabel1.setAlignment(QLabel.AlignVCenter | QLabel.AlignRight)
layout4.addWidget(self.textLabel1)
self.addressComboBox = QComboBox(0,self.centralWidget(),"addressComboBox")
self.addressComboBox.setSizePolicy(QSizePolicy(QSizePolicy.Expanding,QSizePolicy.Minimum,0,0,self.addressComboBox.sizePolicy().hasHeightForWidth()))
self.addressComboBox.setEditable(1)
layout4.addWidget(self.addressComboBox)
self.lookupButton = QPushButton(self.centralWidget(),"lookupButton")
self.lookupButton.setSizePolicy(QSizePolicy(QSizePolicy.Minimum,QSizePolicy.Minimum,0,0,self.lookupButton.sizePolicy().hasHeightForWidth()))
self.lookupButton.setMaximumSize(QSize(25,32767))
layout4.addWidget(self.lookupButton)
ShtoomMainWindowLayout.addLayout(layout4)
layout2 = QHBoxLayout(None,0,6,"layout2")
self.callButton = QPushButton(self.centralWidget(),"callButton")
self.callButton.setSizePolicy(QSizePolicy(QSizePolicy.Minimum,QSizePolicy.Minimum,0,0,self.callButton.sizePolicy().hasHeightForWidth()))
layout2.addWidget(self.callButton)
self.hangupButton = QPushButton(self.centralWidget(),"hangupButton")
self.hangupButton.setSizePolicy(QSizePolicy(QSizePolicy.Minimum,QSizePolicy.Minimum,0,0,self.hangupButton.sizePolicy().hasHeightForWidth()))
layout2.addWidget(self.hangupButton)
self.registerButton = QPushButton(self.centralWidget(),"registerButton")
self.registerButton.setSizePolicy(QSizePolicy(QSizePolicy.Minimum,QSizePolicy.Minimum,0,0,self.registerButton.sizePolicy().hasHeightForWidth()))
layout2.addWidget(self.registerButton)
ShtoomMainWindowLayout.addLayout(layout2)
self.statusLabel = QLabel(self.centralWidget(),"statusLabel")
ShtoomMainWindowLayout.addWidget(self.statusLabel)
self.fileDTMFAction = QAction(self,"fileDTMFAction")
self.fileDTMFAction.setEnabled(1)
self.fileDebugAction = QAction(self,"fileDebugAction")
self.fileDebugAction.setEnabled(1)
self.fileExitAction = QAction(self,"fileExitAction")
self.helpAboutAction = QAction(self,"helpAboutAction")
self.editPreferencesAction = QAction(self,"editPreferencesAction")
self.MenuBar = QMenuBar(self,"MenuBar")
self.fileMenu = QPopupMenu(self)
self.fileDTMFAction.addTo(self.fileMenu)
self.fileDebugAction.addTo(self.fileMenu)
self.fileMenu.insertSeparator()
self.fileExitAction.addTo(self.fileMenu)
self.MenuBar.insertItem(QString(""),self.fileMenu,1)
self.Edit = QPopupMenu(self)
self.editPreferencesAction.addTo(self.Edit)
self.MenuBar.insertItem(QString(""),self.Edit,2)
self.helpMenu = QPopupMenu(self)
self.helpAboutAction.addTo(self.helpMenu)
self.MenuBar.insertItem(QString(""),self.helpMenu,3)
self.languageChange()
self.resize(QSize(343,156).expandedTo(self.minimumSizeHint()))
self.clearWState(Qt.WState_Polished)
self.connect(self.fileDTMFAction,SIGNAL("activated()"),self.fileDTMF)
self.connect(self.fileDebugAction,SIGNAL("activated()"),self.fileDebugging)
self.connect(self.editPreferencesAction,SIGNAL("activated()"),self.editPreferences)
self.connect(self.fileExitAction,SIGNAL("activated()"),self.fileExit)
self.connect(self.helpAboutAction,SIGNAL("activated()"),self.helpAbout)
self.connect(self.callButton,SIGNAL("clicked()"),self.callButton_clicked)
self.connect(self.hangupButton,SIGNAL("clicked()"),self.hangupButton_clicked)
self.connect(self.registerButton,SIGNAL("clicked()"),self.registerButton_clicked)
self.connect(self.lookupButton,SIGNAL("clicked()"),self.lookupButton_clicked)
def languageChange(self):
self.setCaption(self.__tr("Shtoom"))
self.textLabel1.setText(self.__tr("Address"))
self.lookupButton.setText(self.__tr("..."))
self.callButton.setText(self.__tr("Call"))
self.hangupButton.setText(self.__tr("Hang Up"))
self.registerButton.setText(self.__tr("Register"))
self.statusLabel.setText(QString.null)
self.fileDTMFAction.setText(self.__tr("DTMF"))
self.fileDTMFAction.setMenuText(self.__tr("DTMF"))
self.fileDTMFAction.setToolTip(self.__tr("Show DTMF Window"))
self.fileDTMFAction.setAccel(self.__tr("Ctrl+D"))
self.fileDebugAction.setText(self.__tr("Debug Log"))
self.fileDebugAction.setMenuText(self.__tr("Debug Log"))
self.fileDebugAction.setToolTip(self.__tr("Show Debugging Log"))
self.fileDebugAction.setAccel(self.__tr("Ctrl+O"))
self.fileExitAction.setText(self.__tr("Exit"))
self.fileExitAction.setMenuText(self.__tr("Exit"))
self.fileExitAction.setAccel(QString.null)
self.helpAboutAction.setText(self.__tr("About"))
self.helpAboutAction.setMenuText(self.__tr("About"))
self.helpAboutAction.setAccel(QString.null)
self.editPreferencesAction.setText(self.__tr("Preferences"))
self.editPreferencesAction.setMenuText(self.__tr("Preferences"))
self.editPreferencesAction.setAccel(self.__tr("Ctrl+P"))
if self.MenuBar.findItem(1):
self.MenuBar.findItem(1).setText(self.__tr("File"))
if self.MenuBar.findItem(2):
self.MenuBar.findItem(2).setText(self.__tr("Edit"))
if self.MenuBar.findItem(3):
self.MenuBar.findItem(3).setText(self.__tr("Help"))
def fileDTMF(self):
print "ShtoomMainWindow.fileDTMF(): Not implemented yet"
def fileDebugging(self):
print "ShtoomMainWindow.fileDebugging(): Not implemented yet"
def fileExit(self):
print "ShtoomMainWindow.fileExit(): Not implemented yet"
def editPreferences(self):
print "ShtoomMainWindow.editPreferences(): Not implemented yet"
def helpAbout(self):
print "ShtoomMainWindow.helpAbout(): Not implemented yet"
def callButton_clicked(self):
print "ShtoomMainWindow.callButton_clicked(): Not implemented yet"
def hangupButton_clicked(self):
print "ShtoomMainWindow.hangupButton_clicked(): Not implemented yet"
def registerButton_clicked(self):
print "ShtoomMainWindow.registerButton_clicked(): Not implemented yet"
def lookupButton_clicked(self):
print "ShtoomMainWindow.lookupButton_clicked(): Not implemented yet"
def __tr(self,s,c = None):
return qApp.translate("ShtoomMainWindow",s,c)
| lgpl-2.1 |
hkariti/ansible | lib/ansible/modules/network/avi/avi_cluster.py | 26 | 3935 | #!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cluster
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of Cluster Avi RESTful Object
description:
- This module is used to configure Cluster object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
name:
description:
- Name of the object.
required: true
nodes:
description:
- List of clusternode.
rejoin_nodes_automatically:
description:
- Re-join cluster nodes automatically in the event one of the node is reset to factory.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
virtual_ip:
description:
- A virtual ip address.
- This ip address will be dynamically reconfigured so that it always is the ip of the cluster leader.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create Cluster object
avi_cluster:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_cluster
"""
RETURN = '''
obj:
description: Cluster (api/cluster) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
name=dict(type='str', required=True),
nodes=dict(type='list',),
rejoin_nodes_automatically=dict(type='bool',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
virtual_ip=dict(type='dict',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'cluster',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
jiahaoliang/group-based-policy | gbpservice/nfp/service_vendor_agents/haproxy/haproxy-agent/src/local.py | 101 | 1745 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Greenthread local storage of variables using weak references"""
import weakref
from eventlet import corolocal
class WeakLocal(corolocal.local):
def __getattribute__(self, attr):
rval = corolocal.local.__getattribute__(self, attr)
if rval:
# NOTE(mikal): this bit is confusing. What is stored is a weak
# reference, not the value itself. We therefore need to lookup
# the weak reference and return the inner value here.
rval = rval()
return rval
def __setattr__(self, attr, value):
value = weakref.ref(value)
return corolocal.local.__setattr__(self, attr, value)
# NOTE(mikal): the name "store" should be deprecated in the future
store = WeakLocal()
# A "weak" store uses weak references and allows an object to fall out of scope
# when it falls out of scope in the code that uses the thread local storage. A
# "strong" store will hold a reference to the object so that it never falls out
# of scope.
weak_store = WeakLocal()
strong_store = corolocal.local
| apache-2.0 |
barnsnake351/nova | nova/tests/unit/scheduler/filters/test_availability_zone_filters.py | 57 | 2170 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.scheduler.filters import availability_zone_filter
from nova import test
from nova.tests.unit.scheduler import fakes
@mock.patch('nova.scheduler.filters.utils.aggregate_metadata_get_by_host')
class TestAvailabilityZoneFilter(test.NoDBTestCase):
def setUp(self):
super(TestAvailabilityZoneFilter, self).setUp()
self.filt_cls = availability_zone_filter.AvailabilityZoneFilter()
@staticmethod
def _make_zone_request(zone):
return {
'context': mock.sentinel.ctx,
'request_spec': {
'instance_properties': {
'availability_zone': zone
}
}
}
def test_availability_zone_filter_same(self, agg_mock):
agg_mock.return_value = {'availability_zone': 'nova'}
request = self._make_zone_request('nova')
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(self.filt_cls.host_passes(host, request))
def test_availability_zone_filter_same_comma(self, agg_mock):
agg_mock.return_value = {'availability_zone': 'nova,nova2'}
request = self._make_zone_request('nova')
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(self.filt_cls.host_passes(host, request))
def test_availability_zone_filter_different(self, agg_mock):
agg_mock.return_value = {'availability_zone': 'nova'}
request = self._make_zone_request('bad')
host = fakes.FakeHostState('host1', 'node1', {})
self.assertFalse(self.filt_cls.host_passes(host, request))
| apache-2.0 |
N3da/incubator-airflow | airflow/contrib/hooks/spark_submit_hook.py | 6 | 10915 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import subprocess
import re
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
log = logging.getLogger(__name__)
class SparkSubmitHook(BaseHook):
"""
This hook is a wrapper around the spark-submit binary to kick off a spark-submit job.
It requires that the "spark-submit" binary is in the PATH or the spark_home to be
supplied.
:param conf: Arbitrary Spark configuration properties
:type conf: dict
:param conn_id: The connection id as configured in Airflow administration. When an
invalid connection_id is supplied, it will default to yarn.
:type conn_id: str
:param files: Upload additional files to the container running the job, separated by a
comma. For example hive-site.xml.
:type files: str
:param py_files: Additional python files used by the job, can be .zip, .egg or .py.
:type py_files: str
:param jars: Submit additional jars to upload and place them in executor classpath.
:type jars: str
:param java_class: the main class of the Java application
:type java_class: str
:param total_executor_cores: (Standalone & Mesos only) Total cores for all executors (Default: all the available cores on the worker)
:type total_executor_cores: int
:param executor_cores: (Standalone & YARN only) Number of cores per executor (Default: 2)
:type executor_cores: int
:param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G)
:type executor_memory: str
:param driver_memory: Memory allocated to the driver (e.g. 1000M, 2G) (Default: 1G)
:type driver_memory: str
:param keytab: Full path to the file that contains the keytab
:type keytab: str
:param principal: The name of the kerberos principal used for keytab
:type principal: str
:param name: Name of the job (default airflow-spark)
:type name: str
:param num_executors: Number of executors to launch
:type num_executors: int
:param application_args: Arguments for the application being submitted
:type application_args: list
:param verbose: Whether to pass the verbose flag to spark-submit process for debugging
:type verbose: bool
"""
def __init__(self,
conf=None,
conn_id='spark_default',
files=None,
py_files=None,
jars=None,
java_class=None,
total_executor_cores=None,
executor_cores=None,
executor_memory=None,
driver_memory=None,
keytab=None,
principal=None,
name='default-name',
num_executors=None,
application_args=None,
verbose=False):
self._conf = conf
self._conn_id = conn_id
self._files = files
self._py_files = py_files
self._jars = jars
self._java_class = java_class
self._total_executor_cores = total_executor_cores
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._driver_memory = driver_memory
self._keytab = keytab
self._principal = principal
self._name = name
self._num_executors = num_executors
self._application_args = application_args
self._verbose = verbose
self._sp = None
self._yarn_application_id = None
self._connection = self._resolve_connection()
self._is_yarn = 'yarn' in self._connection['master']
def _resolve_connection(self):
# Build from connection master or default to yarn if not available
conn_data = {'master': 'yarn',
'queue': None,
'deploy_mode': None,
'spark_home': None,
'spark_binary': 'spark-submit'}
try:
# Master can be local, yarn, spark://HOST:PORT or mesos://HOST:PORT
conn = self.get_connection(self._conn_id)
if conn.port:
conn_data['master'] = "{}:{}".format(conn.host, conn.port)
else:
conn_data['master'] = conn.host
# Determine optional yarn queue from the extra field
extra = conn.extra_dejson
conn_data['queue'] = extra.get('queue', None)
conn_data['deploy_mode'] = extra.get('deploy-mode', None)
conn_data['spark_home'] = extra.get('spark-home', None)
conn_data['spark_binary'] = extra.get('spark-binary', 'spark-submit')
except AirflowException:
logging.debug(
"Could not load connection string {}, defaulting to {}".format(
self._conn_id, conn_data['master']
)
)
return conn_data
def get_conn(self):
pass
def _build_command(self, application):
"""
Construct the spark-submit command to execute.
:param application: command to append to the spark-submit command
:type application: str
:return: full command to be executed
"""
# If the spark_home is passed then build the spark-submit executable path using
# the spark_home; otherwise assume that spark-submit is present in the path to
# the executing user
if self._connection['spark_home']:
connection_cmd = [os.path.join(self._connection['spark_home'], 'bin', self._connection['spark_binary'])]
else:
connection_cmd = [self._connection['spark_binary']]
# The url ot the spark master
connection_cmd += ["--master", self._connection['master']]
if self._conf:
for key in self._conf:
connection_cmd += ["--conf", "{}={}".format(key, str(self._conf[key]))]
if self._files:
connection_cmd += ["--files", self._files]
if self._py_files:
connection_cmd += ["--py-files", self._py_files]
if self._jars:
connection_cmd += ["--jars", self._jars]
if self._num_executors:
connection_cmd += ["--num-executors", str(self._num_executors)]
if self._total_executor_cores:
connection_cmd += ["--total-executor-cores", str(self._total_executor_cores)]
if self._executor_cores:
connection_cmd += ["--executor-cores", str(self._executor_cores)]
if self._executor_memory:
connection_cmd += ["--executor-memory", self._executor_memory]
if self._driver_memory:
connection_cmd += ["--driver-memory", self._driver_memory]
if self._keytab:
connection_cmd += ["--keytab", self._keytab]
if self._principal:
connection_cmd += ["--principal", self._principal]
if self._name:
connection_cmd += ["--name", self._name]
if self._java_class:
connection_cmd += ["--class", self._java_class]
if self._verbose:
connection_cmd += ["--verbose"]
if self._connection['queue']:
connection_cmd += ["--queue", self._connection['queue']]
if self._connection['deploy_mode']:
connection_cmd += ["--deploy-mode", self._connection['deploy_mode']]
# The actual script to execute
connection_cmd += [application]
# Append any application arguments
if self._application_args:
for arg in self._application_args:
if len(arg.split()) > 1:
for splitted_option in arg.split():
connection_cmd += [splitted_option]
else:
connection_cmd += [arg]
logging.debug("Spark-Submit cmd: {}".format(connection_cmd))
return connection_cmd
def submit(self, application="", **kwargs):
"""
Remote Popen to execute the spark-submit job
:param application: Submitted application, jar or py file
:type application: str
:param kwargs: extra arguments to Popen (see subprocess.Popen)
"""
spark_submit_cmd = self._build_command(application)
self._sp = subprocess.Popen(spark_submit_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs)
# Using two iterators here to support 'real-time' logging
sources = [self._sp.stdout, self._sp.stderr]
for source in sources:
self._process_log(iter(source.readline, b''))
output, stderr = self._sp.communicate()
if self._sp.returncode:
raise AirflowException(
"Cannot execute: {}. Error code is: {}. Output: {}, Stderr: {}".format(
spark_submit_cmd, self._sp.returncode, output, stderr
)
)
def _process_log(self, itr):
"""
Processes the log files and extracts useful information out of it
:param itr: An iterator which iterates over the input of the subprocess
"""
# Consume the iterator
for line in itr:
line = line.decode('utf-8').strip()
# If we run yarn cluster mode, we want to extract the application id from
# the logs so we can kill the application when we stop it unexpectedly
if self._is_yarn and self._connection['deploy_mode'] == 'cluster':
match = re.search('(application[0-9_]+)', line)
if match:
self._yarn_application_id = match.groups()[0]
# Pass to logging
logging.info(line)
def on_kill(self):
if self._sp and self._sp.poll() is None:
logging.info('Sending kill signal to {}'.format(self._connection['spark_binary']))
self._sp.kill()
if self._yarn_application_id:
logging.info('Killing application on YARN')
kill_cmd = "yarn application -kill {0}".format(self._yarn_application_id).split()
yarn_kill = subprocess.Popen(kill_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
logging.info("YARN killed with return code: {0}".format(yarn_kill.wait()))
| apache-2.0 |
NeilBryant/check_mk | doc/treasures/wato_geo_fields.py | 6 | 2020 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# place this file to ~/local/share/check_mk/web/plugins/wato to get two new fields in the wato host properties.
# this fields can be used to add Latiude and Longitude information. Usefull for the Nagvis Geomap
declare_host_attribute(
NagiosTextAttribute(
"lat",
"_LAT",
"Latitude",
"Latitude",
),
show_in_table = False,
show_in_folder = False,
)
declare_host_attribute(
NagiosTextAttribute(
"long",
"_LONG",
"Longitude",
"Longitude",
),
show_in_table = False,
show_in_folder = False,
)
| gpl-2.0 |
Glasgow2015/team-10 | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/big5freq.py | 3133 | 82594 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
Big5CharToFreqOrder = (
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 #last 512
#Everything below is of no interest for detection purpose
2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392
2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408
5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424
5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440
5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456
5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472
5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488
5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504
5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520
5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536
5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552
5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568
5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584
5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600
6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616
6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632
6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648
6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664
6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680
6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696
6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712
6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728
6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744
6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760
6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776
6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792
6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808
6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824
6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840
6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856
6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872
6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888
6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904
6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920
6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936
6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952
6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968
6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984
6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000
6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016
6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032
6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048
6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064
6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080
6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096
6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112
6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128
6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144
6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160
6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176
6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192
6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208
6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224
6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240
6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256
3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272
6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288
6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304
3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320
6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336
6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352
6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368
6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384
6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400
6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416
6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432
4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448
6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464
6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480
3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496
6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512
6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528
6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544
6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560
6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576
6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592
6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608
6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624
6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640
6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656
6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672
7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688
7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704
7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720
7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736
7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752
7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768
7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784
7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800
7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816
7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832
7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848
7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864
7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880
7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896
7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912
7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928
7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944
7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960
7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976
7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992
7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008
7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024
7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040
7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056
7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072
7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088
7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104
7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120
7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136
7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152
7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168
7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184
7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200
7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216
7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248
7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264
7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280
7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296
7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312
7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328
7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344
7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360
7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376
7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392
7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408
7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424
7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440
3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456
7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472
7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488
7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504
7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520
4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536
7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552
7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568
7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584
7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600
7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616
7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632
7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648
7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664
7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680
7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696
7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712
8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728
8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744
8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760
8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776
8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792
8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808
8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824
8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840
8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856
8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872
8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888
8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904
8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920
8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936
8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952
8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968
8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984
8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016
8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032
8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048
8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064
8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080
8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096
8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112
8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128
8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144
8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160
8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176
8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192
8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208
8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224
8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240
8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256
8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272
8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288
8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304
8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320
8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336
8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352
8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368
8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384
8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400
8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416
8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448
8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464
8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480
8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496
8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512
8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528
8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544
8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560
8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576
8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592
8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608
8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624
8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640
8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656
8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672
8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688
4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704
8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720
8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736
8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752
8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768
9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784
9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800
9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816
9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832
9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848
9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864
9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880
9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896
9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912
9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928
9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944
9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960
9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976
9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992
9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008
9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024
9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040
9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056
9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072
9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088
9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104
9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120
9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136
9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152
9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168
9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184
9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200
9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216
9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232
9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248
9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264
9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280
9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296
9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312
9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328
9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344
9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360
9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376
3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392
9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408
9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424
9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440
4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456
9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472
9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488
9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504
9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520
9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536
9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552
9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568
9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584
9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600
9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616
9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632
9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648
9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664
9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680
9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696
9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712
9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728
9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744
9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760
9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776
9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792
9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808
9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824
10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840
10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856
10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872
10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888
10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904
10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920
10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936
10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952
10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968
4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984
10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000
10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016
10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032
10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048
10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064
10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080
10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096
10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112
4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128
10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144
10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160
10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176
10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192
10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208
10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224
10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240
10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256
10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272
10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288
10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304
10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320
10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336
10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352
10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368
10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384
10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400
4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416
10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432
10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448
10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464
10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480
10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496
10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512
10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528
10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544
10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560
10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576
10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592
10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608
10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624
10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640
10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656
10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672
10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688
10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704
10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720
10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736
10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752
10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768
10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784
10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800
10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816
10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832
10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848
10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864
10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880
10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896
11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912
11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928
11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944
4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960
11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976
11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992
11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008
11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024
11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040
11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056
11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072
11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088
11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104
11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120
11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136
11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152
11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168
11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184
11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200
11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216
11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232
11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248
11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264
11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280
11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296
11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312
11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328
11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344
11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360
11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376
11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392
11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408
11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424
11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440
11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456
11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472
4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488
11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504
11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520
11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536
11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552
11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568
11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584
11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600
11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616
11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632
11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648
11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664
11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680
11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696
11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712
11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728
11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744
11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760
11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776
11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792
11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808
11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824
11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840
11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856
11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872
11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888
11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904
11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920
11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936
12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952
12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968
12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984
12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000
12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016
12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032
12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048
12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064
12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080
12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096
12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112
12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128
12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144
12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160
12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176
4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192
4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208
4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224
12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240
12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256
12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272
12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288
12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304
12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320
12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336
12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352
12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368
12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384
12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400
12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416
12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432
12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448
12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464
12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480
12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496
12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512
12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528
12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544
12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560
12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576
12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592
12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608
12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624
12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640
12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656
12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672
12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688
12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704
12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720
12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736
12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752
12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768
12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784
12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800
12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816
12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832
12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848
12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864
12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880
12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896
12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912
12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928
12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944
12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960
12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976
4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992
13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008
13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024
13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040
13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056
13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072
13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088
13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104
4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120
13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136
13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152
13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168
13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184
13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200
13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216
13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232
13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248
13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264
13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280
13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296
13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312
13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328
13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344
13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360
5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376
13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392
13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408
13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424
13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440
13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456
13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472
13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488
13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504
13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520
13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536
13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552
13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568
13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584
13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600
13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616
13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632
13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648
13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664
13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680
13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696
13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712
13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728
13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744
13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760
13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776
13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792
13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808
13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824
13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840
13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856
13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872
13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888
13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904
13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920
13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936
13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952
13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968
13968,13969,13970,13971,13972) #13973
# flake8: noqa
| apache-2.0 |
upliftaero/MissionPlanner | Lib/lib2to3/fixes/fix_exec.py | 61 | 1042 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for exec.
This converts usages of the exec statement into calls to a built-in
exec() function.
exec code in ns1, ns2 -> exec(code, ns1, ns2)
"""
# Local imports
from .. import pytree
from .. import fixer_base
from ..fixer_util import Comma, Name, Call
class FixExec(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
exec_stmt< 'exec' a=any 'in' b=any [',' c=any] >
|
exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any >
"""
def transform(self, node, results):
assert results
syms = self.syms
a = results["a"]
b = results.get("b")
c = results.get("c")
args = [a.clone()]
args[0].prefix = ""
if b is not None:
args.extend([Comma(), b.clone()])
if c is not None:
args.extend([Comma(), c.clone()])
return Call(Name(u"exec"), args, prefix=node.prefix)
| gpl-3.0 |
pforret/python-for-android | python-modules/twisted/twisted/internet/epollreactor.py | 56 | 8121 | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An epoll() based implementation of the twisted main loop.
To install the event loop (and you should do this before any connections,
listeners or connectors are added)::
from twisted.internet import epollreactor
epollreactor.install()
"""
import sys, errno
from zope.interface import implements
from twisted.internet.interfaces import IReactorFDSet
from twisted.python import _epoll
from twisted.python import log
from twisted.internet import posixbase, error
from twisted.internet.main import CONNECTION_DONE, CONNECTION_LOST
_POLL_DISCONNECTED = (_epoll.HUP | _epoll.ERR)
class EPollReactor(posixbase.PosixReactorBase):
"""
A reactor that uses epoll(4).
@ivar _poller: A L{poll} which will be used to check for I/O
readiness.
@ivar _selectables: A dictionary mapping integer file descriptors to
instances of L{FileDescriptor} which have been registered with the
reactor. All L{FileDescriptors} which are currently receiving read or
write readiness notifications will be present as values in this
dictionary.
@ivar _reads: A dictionary mapping integer file descriptors to arbitrary
values (this is essentially a set). Keys in this dictionary will be
registered with C{_poller} for read readiness notifications which will
be dispatched to the corresponding L{FileDescriptor} instances in
C{_selectables}.
@ivar _writes: A dictionary mapping integer file descriptors to arbitrary
values (this is essentially a set). Keys in this dictionary will be
registered with C{_poller} for write readiness notifications which will
be dispatched to the corresponding L{FileDescriptor} instances in
C{_selectables}.
"""
implements(IReactorFDSet)
def __init__(self):
"""
Initialize epoll object, file descriptor tracking dictionaries, and the
base class.
"""
# Create the poller we're going to use. The 1024 here is just a hint
# to the kernel, it is not a hard maximum.
self._poller = _epoll.epoll(1024)
self._reads = {}
self._writes = {}
self._selectables = {}
posixbase.PosixReactorBase.__init__(self)
def _add(self, xer, primary, other, selectables, event, antievent):
"""
Private method for adding a descriptor from the event loop.
It takes care of adding it if new or modifying it if already added
for another state (read -> read/write for example).
"""
fd = xer.fileno()
if fd not in primary:
cmd = _epoll.CTL_ADD
flags = event
if fd in other:
flags |= antievent
cmd = _epoll.CTL_MOD
# epoll_ctl can raise all kinds of IOErrors, and every one
# indicates a bug either in the reactor or application-code.
# Let them all through so someone sees a traceback and fixes
# something. We'll do the same thing for every other call to
# this method in this file.
self._poller._control(cmd, fd, flags)
# Update our own tracking state *only* after the epoll call has
# succeeded. Otherwise we may get out of sync.
primary[fd] = 1
selectables[fd] = xer
def addReader(self, reader):
"""
Add a FileDescriptor for notification of data available to read.
"""
self._add(reader, self._reads, self._writes, self._selectables, _epoll.IN, _epoll.OUT)
def addWriter(self, writer):
"""
Add a FileDescriptor for notification of data available to write.
"""
self._add(writer, self._writes, self._reads, self._selectables, _epoll.OUT, _epoll.IN)
def _remove(self, xer, primary, other, selectables, event, antievent):
"""
Private method for removing a descriptor from the event loop.
It does the inverse job of _add, and also add a check in case of the fd
has gone away.
"""
fd = xer.fileno()
if fd == -1:
for fd, fdes in selectables.items():
if xer is fdes:
break
else:
return
if fd in primary:
cmd = _epoll.CTL_DEL
flags = event
if fd in other:
flags = antievent
cmd = _epoll.CTL_MOD
else:
del selectables[fd]
del primary[fd]
# See comment above _control call in _add.
self._poller._control(cmd, fd, flags)
def removeReader(self, reader):
"""
Remove a Selectable for notification of data available to read.
"""
self._remove(reader, self._reads, self._writes, self._selectables, _epoll.IN, _epoll.OUT)
def removeWriter(self, writer):
"""
Remove a Selectable for notification of data available to write.
"""
self._remove(writer, self._writes, self._reads, self._selectables, _epoll.OUT, _epoll.IN)
def removeAll(self):
"""
Remove all selectables, and return a list of them.
"""
return self._removeAll(
[self._selectables[fd] for fd in self._reads],
[self._selectables[fd] for fd in self._writes])
def getReaders(self):
return [self._selectables[fd] for fd in self._reads]
def getWriters(self):
return [self._selectables[fd] for fd in self._writes]
def doPoll(self, timeout):
"""
Poll the poller for new events.
"""
if timeout is None:
timeout = 1
timeout = int(timeout * 1000) # convert seconds to milliseconds
try:
# Limit the number of events to the number of io objects we're
# currently tracking (because that's maybe a good heuristic) and
# the amount of time we block to the value specified by our
# caller.
l = self._poller.wait(len(self._selectables), timeout)
except IOError, err:
if err.errno == errno.EINTR:
return
# See epoll_wait(2) for documentation on the other conditions
# under which this can fail. They can only be due to a serious
# programming error on our part, so let's just announce them
# loudly.
raise
_drdw = self._doReadOrWrite
for fd, event in l:
try:
selectable = self._selectables[fd]
except KeyError:
pass
else:
log.callWithLogger(selectable, _drdw, selectable, fd, event)
doIteration = doPoll
def _doReadOrWrite(self, selectable, fd, event):
"""
fd is available for read or write, make the work and raise errors
if necessary.
"""
why = None
inRead = False
if event & _POLL_DISCONNECTED and not (event & _epoll.IN):
if fd in self._reads:
inRead = True
why = CONNECTION_DONE
else:
why = CONNECTION_LOST
else:
try:
if event & _epoll.IN:
why = selectable.doRead()
inRead = True
if not why and event & _epoll.OUT:
why = selectable.doWrite()
inRead = False
if selectable.fileno() != fd:
why = error.ConnectionFdescWentAway(
'Filedescriptor went away')
inRead = False
except:
log.err()
why = sys.exc_info()[1]
if why:
self._disconnectSelectable(selectable, why, inRead)
def install():
"""
Install the epoll() reactor.
"""
p = EPollReactor()
from twisted.internet.main import installReactor
installReactor(p)
__all__ = ["EPollReactor", "install"]
| apache-2.0 |
chand3040/sree_odoo | openerp/addons/account/company.py | 384 | 2814 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'expects_chart_of_accounts': fields.boolean('Expects a Chart of Accounts'),
'tax_calculation_rounding_method': fields.selection([
('round_per_line', 'Round per Line'),
('round_globally', 'Round Globally'),
], 'Tax Calculation Rounding Method',
help="If you select 'Round per Line' : for each tax, the tax amount will first be computed and rounded for each PO/SO/invoice line and then these rounded amounts will be summed, leading to the total amount for that tax. If you select 'Round Globally': for each tax, the tax amount will be computed for each PO/SO/invoice line, then these amounts will be summed and eventually this total tax amount will be rounded. If you sell with tax included, you should choose 'Round per line' because you certainly want the sum of your tax-included line subtotals to be equal to the total amount with taxes."),
'paypal_account': fields.char("Paypal Account", size=128, help="Paypal username (usually email) for receiving online payments."),
'overdue_msg': fields.text('Overdue Payments Message', translate=True),
}
_defaults = {
'expects_chart_of_accounts': True,
'tax_calculation_rounding_method': 'round_per_line',
'overdue_msg': '''Dear Sir/Madam,
Our records indicate that some payments on your account are still due. Please find details below.
If the amount has already been paid, please disregard this notice. Otherwise, please forward us the total amount stated below.
If you have any queries regarding your account, Please contact us.
Thank you in advance for your cooperation.
Best Regards,'''
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ThePletch/ansible | lib/ansible/modules/cloud/amazon/ec2_win_password.py | 23 | 5690 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ec2_win_password
short_description: gets the default administrator password for ec2 windows instances
description:
- Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. i-XXXXXXX). This module has a dependency on python-boto.
version_added: "2.0"
author: "Rick Mendes (@rickmendes)"
options:
instance_id:
description:
- The instance id to get the password data from.
required: true
key_file:
description:
- Path to the file containing the key pair used on the instance.
required: true
key_passphrase:
version_added: "2.0"
description:
- The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to convert your password protected keys if they do not use DES or 3DES. ex) openssl rsa -in current_key -out new_key -des3.
required: false
default: null
wait:
version_added: "2.0"
description:
- Whether or not to wait for the password to be available before returning.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
version_added: "2.0"
description:
- Number of seconds to wait before giving up.
required: false
default: 120
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Example of getting a password
tasks:
- name: get the Administrator password
ec2_win_password:
profile: my-boto-profile
instance_id: i-XXXXXX
region: us-east-1
key_file: "~/aws-creds/my_test_key.pem"
# Example of getting a password with a password protected key
tasks:
- name: get the Administrator password
ec2_win_password:
profile: my-boto-profile
instance_id: i-XXXXXX
region: us-east-1
key_file: "~/aws-creds/my_protected_test_key.pem"
key_passphrase: "secret"
# Example of waiting for a password
tasks:
- name: get the Administrator password
ec2_win_password:
profile: my-boto-profile
instance_id: i-XXXXXX
region: us-east-1
key_file: "~/aws-creds/my_test_key.pem"
wait: yes
wait_timeout: 45
'''
from base64 import b64decode
from os.path import expanduser
from Crypto.Cipher import PKCS1_v1_5
from Crypto.PublicKey import RSA
import datetime
try:
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance_id = dict(required=True),
key_file = dict(required=True),
key_passphrase = dict(no_log=True, default=None, required=False),
wait = dict(type='bool', default=False, required=False),
wait_timeout = dict(default=120, required=False),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='Boto required for this module.')
instance_id = module.params.get('instance_id')
key_file = expanduser(module.params.get('key_file'))
key_passphrase = module.params.get('key_passphrase')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
ec2 = ec2_connect(module)
if wait:
start = datetime.datetime.now()
end = start + datetime.timedelta(seconds=wait_timeout)
while datetime.datetime.now() < end:
data = ec2.get_password_data(instance_id)
decoded = b64decode(data)
if wait and not decoded:
time.sleep(5)
else:
break
else:
data = ec2.get_password_data(instance_id)
decoded = b64decode(data)
if wait and datetime.datetime.now() >= end:
module.fail_json(msg = "wait for password timeout after %d seconds" % wait_timeout)
try:
f = open(key_file, 'r')
except IOError as e:
module.fail_json(msg = "I/O error (%d) opening key file: %s" % (e.errno, e.strerror))
else:
try:
with f:
key = RSA.importKey(f.read(), key_passphrase)
except (ValueError, IndexError, TypeError) as e:
module.fail_json(msg = "unable to parse key file")
cipher = PKCS1_v1_5.new(key)
sentinel = 'password decryption failed!!!'
try:
decrypted = cipher.decrypt(decoded, sentinel)
except ValueError as e:
decrypted = None
if decrypted == None:
module.exit_json(win_password='', changed=False)
else:
if wait:
elapsed = datetime.datetime.now() - start
module.exit_json(win_password=decrypted, changed=True, elapsed=elapsed.seconds)
else:
module.exit_json(win_password=decrypted, changed=True)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
dbones/linux | tools/perf/python/twatch.py | 1565 | 1316 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <[email protected]>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
grzes/djangae | djangae/fields/charfields.py | 2 | 1862 | from django.core.exceptions import ImproperlyConfigured
from django.db import models
from djangae.core import validators
from google.appengine.api.datastore_types import _MAX_STRING_LENGTH
class CharOrNoneField(models.CharField):
""" A field that stores only non-empty strings or None (it won't store empty strings).
This is useful if you want values to be unique but also want to allow empty values.
"""
empty_strings_allowed = False
def __init__(self, *args, **kwargs):
# Don't allow null=False because that would be insane.
if not kwargs.get('null', True):
raise ImproperlyConfigured("You can't set null=False on a CharOrNoneField.")
# Set blank=True as the default, but allow it to be overridden, as it's theoretically
# possible that you might want to prevent emptiness only in a form
defaults = dict(null=True, blank=True, default=None)
defaults.update(**kwargs)
super(CharOrNoneField, self).__init__(*args, **defaults)
def pre_save(self, model_instance, add):
value = super(CharOrNoneField, self).pre_save(model_instance, add)
# Change empty strings to None
if not value:
return None
return value
class CharField(models.CharField):
def __init__(self, max_length=_MAX_STRING_LENGTH, *args, **kwargs):
assert max_length <= _MAX_STRING_LENGTH, \
"%ss max_length must not be grater than %d bytes." % (self.__class__.__name__, _MAX_STRING_LENGTH)
super(CharField, self).__init__(max_length=max_length, *args, **kwargs)
# Append the MaxBytesValidator if it's not been included already
self.validators = [
x for x in self.validators if not isinstance(x, validators.MaxBytesValidator)
] + [validators.MaxBytesValidator(limit_value=max_length)]
| bsd-3-clause |
wolfe-pack/moro | public/javascripts/brat/tools/norm_db_lookup.py | 3 | 4451 | #!/usr/bin/env python
# Test script for lookup in a normalization SQL DB, intended for
# DB testing.
# TODO: duplicates parts of primary norm DB implementation, dedup.
import sys
import os.path
import sqlite3 as sqlite
TYPE_TABLES = ["names", "attributes", "infos"]
NON_EMPTY_TABLES = set(["names"])
def argparser():
import argparse
ap=argparse.ArgumentParser(description="Print results of lookup in normalization SQL DB for keys read from STDIN.")
ap.add_argument("-v", "--verbose", default=False, action="store_true", help="Verbose output.")
ap.add_argument("-np", "--no-prompt", default=False, action="store_true", help="No prompt.")
ap.add_argument("database", metavar="DATABASE", help="Name of database to read")
return ap
def string_norm_form(s):
return s.lower().strip().replace('-', ' ')
def datas_by_ids(cursor, ids):
# select separately from names, attributes and infos
responses = {}
for table in TYPE_TABLES:
command = '''
SELECT E.uid, L.text, N.value
FROM entities E
JOIN %s N
ON E.id = N.entity_id
JOIN labels L
ON L.id = N.label_id
WHERE E.uid IN (%s)''' % (table, ','.join(['?' for i in ids]))
cursor.execute(command, list(ids))
response = cursor.fetchall()
# group by ID first
for id_, label, value in response:
if id_ not in responses:
responses[id_] = {}
if table not in responses[id_]:
responses[id_][table] = []
responses[id_][table].append([label, value])
# short-circuit on missing or incomplete entry
if (table in NON_EMPTY_TABLES and
len([i for i in responses if responses[i][table] == 0]) != 0):
return None
# empty or incomplete?
for id_ in responses:
for t in NON_EMPTY_TABLES:
if len(responses[id_][t]) == 0:
return None
# has expected content, format and return
datas = {}
for id_ in responses:
datas[id_] = []
for t in TYPE_TABLES:
datas[id_].append(responses[id_].get(t,[]))
return datas
def ids_by_name(cursor, name, exactmatch=False, return_match=False):
return ids_by_names(cursor, [name], exactmatch, return_match)
def ids_by_names(cursor, names, exactmatch=False, return_match=False):
if not return_match:
command = 'SELECT E.uid'
else:
command = 'SELECT E.uid, N.value'
command += '''
FROM entities E
JOIN names N
ON E.id = N.entity_id
'''
if exactmatch:
command += 'WHERE N.value IN (%s)' % ','.join(['?' for n in names])
else:
command += 'WHERE N.normvalue IN (%s)' % ','.join(['?' for n in names])
names = [string_norm_form(n) for n in names]
cursor.execute(command, names)
responses = cursor.fetchall()
if not return_match:
return [r[0] for r in responses]
else:
return [(r[0],r[1]) for r in responses]
def main(argv):
arg = argparser().parse_args(argv[1:])
# try a couple of alternative locations based on the given DB
# name: name as path, name as filename in work dir, and name as
# filename without suffix in work dir
dbn = arg.database
dbpaths = [dbn, os.path.join('work', dbn), os.path.join('work', dbn)+'.db']
dbfn = None
for p in dbpaths:
if os.path.exists(p):
dbfn = p
break
if dbfn is None:
print >> sys.stderr, "Error: %s: no such file" % dbfn
return 1
try:
connection = sqlite.connect(dbfn)
except sqlite.OperationalError, e:
print >> sys.stderr, "Error connecting to DB %s:" % dbfn, e
return 1
cursor = connection.cursor()
while True:
if not arg.no_prompt:
print ">>> ",
l = sys.stdin.readline()
if not l:
break
l = l.rstrip()
try:
r = ids_by_name(cursor, l)
if len(r) != 0:
d = datas_by_ids(cursor, r)
for i in d:
print i+'\t', '\t'.join([' '.join(["%s:%s" % (k,v) for k,v in a]) for a in d[i]])
elif l == '':
print "(Use Ctrl-D to exit)"
else:
print "(no record found for '%s')" % l
except Exception, e:
print >> sys.stderr, "Unexpected error", e
return 1
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| bsd-2-clause |
goodwinnk/intellij-community | plugins/hg4idea/testData/bin/hgext/convert/bzr.py | 94 | 11295 | # bzr.py - bzr support for the convert extension
#
# Copyright 2008, 2009 Marek Kubica <[email protected]> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
# This module is for handling 'bzr', that was formerly known as Bazaar-NG;
# it cannot access 'bar' repositories, but they were never used very much
import os
from mercurial import demandimport
# these do not work with demandimport, blacklist
demandimport.ignore.extend([
'bzrlib.transactions',
'bzrlib.urlutils',
'ElementPath',
])
from mercurial.i18n import _
from mercurial import util
from common import NoRepo, commit, converter_source
try:
# bazaar imports
from bzrlib import bzrdir, revision, errors
from bzrlib.revisionspec import RevisionSpec
except ImportError:
pass
supportedkinds = ('file', 'symlink')
class bzr_source(converter_source):
"""Reads Bazaar repositories by using the Bazaar Python libraries"""
def __init__(self, ui, path, rev=None):
super(bzr_source, self).__init__(ui, path, rev=rev)
if not os.path.exists(os.path.join(path, '.bzr')):
raise NoRepo(_('%s does not look like a Bazaar repository')
% path)
try:
# access bzrlib stuff
bzrdir
except NameError:
raise NoRepo(_('Bazaar modules could not be loaded'))
path = os.path.abspath(path)
self._checkrepotype(path)
try:
self.sourcerepo = bzrdir.BzrDir.open(path).open_repository()
except errors.NoRepositoryPresent:
raise NoRepo(_('%s does not look like a Bazaar repository')
% path)
self._parentids = {}
def _checkrepotype(self, path):
# Lightweight checkouts detection is informational but probably
# fragile at API level. It should not terminate the conversion.
try:
from bzrlib import bzrdir
dir = bzrdir.BzrDir.open_containing(path)[0]
try:
tree = dir.open_workingtree(recommend_upgrade=False)
branch = tree.branch
except (errors.NoWorkingTree, errors.NotLocalUrl):
tree = None
branch = dir.open_branch()
if (tree is not None and tree.bzrdir.root_transport.base !=
branch.bzrdir.root_transport.base):
self.ui.warn(_('warning: lightweight checkouts may cause '
'conversion failures, try with a regular '
'branch instead.\n'))
except Exception:
self.ui.note(_('bzr source type could not be determined\n'))
def before(self):
"""Before the conversion begins, acquire a read lock
for all the operations that might need it. Fortunately
read locks don't block other reads or writes to the
repository, so this shouldn't have any impact on the usage of
the source repository.
The alternative would be locking on every operation that
needs locks (there are currently two: getting the file and
getting the parent map) and releasing immediately after,
but this approach can take even 40% longer."""
self.sourcerepo.lock_read()
def after(self):
self.sourcerepo.unlock()
def _bzrbranches(self):
return self.sourcerepo.find_branches(using=True)
def getheads(self):
if not self.rev:
# Set using=True to avoid nested repositories (see issue3254)
heads = sorted([b.last_revision() for b in self._bzrbranches()])
else:
revid = None
for branch in self._bzrbranches():
try:
r = RevisionSpec.from_string(self.rev)
info = r.in_history(branch)
except errors.BzrError:
pass
revid = info.rev_id
if revid is None:
raise util.Abort(_('%s is not a valid revision') % self.rev)
heads = [revid]
# Empty repositories return 'null:', which cannot be retrieved
heads = [h for h in heads if h != 'null:']
return heads
def getfile(self, name, rev):
revtree = self.sourcerepo.revision_tree(rev)
fileid = revtree.path2id(name.decode(self.encoding or 'utf-8'))
kind = None
if fileid is not None:
kind = revtree.kind(fileid)
if kind not in supportedkinds:
# the file is not available anymore - was deleted
raise IOError(_('%s is not available in %s anymore') %
(name, rev))
mode = self._modecache[(name, rev)]
if kind == 'symlink':
target = revtree.get_symlink_target(fileid)
if target is None:
raise util.Abort(_('%s.%s symlink has no target')
% (name, rev))
return target, mode
else:
sio = revtree.get_file(fileid)
return sio.read(), mode
def getchanges(self, version):
# set up caches: modecache and revtree
self._modecache = {}
self._revtree = self.sourcerepo.revision_tree(version)
# get the parentids from the cache
parentids = self._parentids.pop(version)
# only diff against first parent id
prevtree = self.sourcerepo.revision_tree(parentids[0])
return self._gettreechanges(self._revtree, prevtree)
def getcommit(self, version):
rev = self.sourcerepo.get_revision(version)
# populate parent id cache
if not rev.parent_ids:
parents = []
self._parentids[version] = (revision.NULL_REVISION,)
else:
parents = self._filterghosts(rev.parent_ids)
self._parentids[version] = parents
branch = self.recode(rev.properties.get('branch-nick', u'default'))
if branch == 'trunk':
branch = 'default'
return commit(parents=parents,
date='%d %d' % (rev.timestamp, -rev.timezone),
author=self.recode(rev.committer),
desc=self.recode(rev.message),
branch=branch,
rev=version)
def gettags(self):
bytetags = {}
for branch in self._bzrbranches():
if not branch.supports_tags():
return {}
tagdict = branch.tags.get_tag_dict()
for name, rev in tagdict.iteritems():
bytetags[self.recode(name)] = rev
return bytetags
def getchangedfiles(self, rev, i):
self._modecache = {}
curtree = self.sourcerepo.revision_tree(rev)
if i is not None:
parentid = self._parentids[rev][i]
else:
# no parent id, get the empty revision
parentid = revision.NULL_REVISION
prevtree = self.sourcerepo.revision_tree(parentid)
changes = [e[0] for e in self._gettreechanges(curtree, prevtree)[0]]
return changes
def _gettreechanges(self, current, origin):
revid = current._revision_id
changes = []
renames = {}
seen = set()
# Process the entries by reverse lexicographic name order to
# handle nested renames correctly, most specific first.
curchanges = sorted(current.iter_changes(origin),
key=lambda c: c[1][0] or c[1][1],
reverse=True)
for (fileid, paths, changed_content, versioned, parent, name,
kind, executable) in curchanges:
if paths[0] == u'' or paths[1] == u'':
# ignore changes to tree root
continue
# bazaar tracks directories, mercurial does not, so
# we have to rename the directory contents
if kind[1] == 'directory':
if kind[0] not in (None, 'directory'):
# Replacing 'something' with a directory, record it
# so it can be removed.
changes.append((self.recode(paths[0]), revid))
if kind[0] == 'directory' and None not in paths:
renaming = paths[0] != paths[1]
# neither an add nor an delete - a move
# rename all directory contents manually
subdir = origin.inventory.path2id(paths[0])
# get all child-entries of the directory
for name, entry in origin.inventory.iter_entries(subdir):
# hg does not track directory renames
if entry.kind == 'directory':
continue
frompath = self.recode(paths[0] + '/' + name)
if frompath in seen:
# Already handled by a more specific change entry
# This is important when you have:
# a => b
# a/c => a/c
# Here a/c must not be renamed into b/c
continue
seen.add(frompath)
if not renaming:
continue
topath = self.recode(paths[1] + '/' + name)
# register the files as changed
changes.append((frompath, revid))
changes.append((topath, revid))
# add to mode cache
mode = ((entry.executable and 'x')
or (entry.kind == 'symlink' and 's')
or '')
self._modecache[(topath, revid)] = mode
# register the change as move
renames[topath] = frompath
# no further changes, go to the next change
continue
# we got unicode paths, need to convert them
path, topath = paths
if path is not None:
path = self.recode(path)
if topath is not None:
topath = self.recode(topath)
seen.add(path or topath)
if topath is None:
# file deleted
changes.append((path, revid))
continue
# renamed
if path and path != topath:
renames[topath] = path
changes.append((path, revid))
# populate the mode cache
kind, executable = [e[1] for e in (kind, executable)]
mode = ((executable and 'x') or (kind == 'symlink' and 'l')
or '')
self._modecache[(topath, revid)] = mode
changes.append((topath, revid))
return changes, renames
def _filterghosts(self, ids):
"""Filters out ghost revisions which hg does not support, see
<http://bazaar-vcs.org/GhostRevision>
"""
parentmap = self.sourcerepo.get_parent_map(ids)
parents = tuple([parent for parent in ids if parent in parentmap])
return parents
| apache-2.0 |
glaubitz/fs-uae-debian | launcher/arcade/arcade_main.py | 2 | 4015 | from arcade.Application import Application
from arcade.glui.imageloader import ImageLoader
from arcade.ui.arcade_window import (
ArcadeWindow,
check_argument,
fullscreen,
maximized,
)
from fsbc.settings import Settings
from fsbc.system import macosx
from .gnome3 import running_in_gnome_3, handle_gnome_extensions
import launcher.version
from fsbc.init import initialize_application
K_UI_MODE_ALL_HIDDEN = 3
K_UI_OPTION_AUTO_SHOW_MENU_BAR = 1 << 0
def os_x_set_system_ui_mode(mode, option):
# noinspection PyUnresolvedReferences
import objc
# noinspection PyUnresolvedReferences
from Foundation import NSBundle
bundle = NSBundle.bundleWithPath_(
"/System/Library/Frameworks/Carbon.framework"
)
objc.loadBundleFunctions(
bundle, globals(), (("SetSystemUIMode", b"III", ""),)
)
# noinspection PyUnresolvedReferences
SetSystemUIMode(mode, option)
def main():
application = Application()
initialize_application("fs-uae-arcade", version=launcher.version.VERSION)
# fs_width, fs_height = fsui.get_screen_size()
# cursor_position = None
# use_window = False
# use_window_decorations = True
# use_fullscreen = True
# use_fullscreen_window = False
# use_top_clock = check_argument("top_clock") != "0"
# use_top_logo = check_argument("top_logo") != "0"
if macosx:
if fullscreen() or maximized():
if check_argument("system_autohide") == "1":
os_x_set_system_ui_mode(
K_UI_MODE_ALL_HIDDEN, K_UI_OPTION_AUTO_SHOW_MENU_BAR
)
elif running_in_gnome_3():
if fullscreen() or maximized():
# use_fullscreen = False
# use_window_decorations = False
# use_window = "maximized"
if check_argument("system_autohide") == "1":
handle_gnome_extensions()
# cursor_position = fs_width - 1, fs_height - 1
# use_top_clock = False
# use_top_logo = False
# app.settings["fs-uae:fullscreen-mode::default"] = "window"
else:
# We want a normal window.
pass
Settings.instance().set("__arcade", "1")
# if windows:
# pass
# elif macosx:
# # use_fullscreen_window = True
# # Settings.instance().set("__fullscreen_mode", "window")
# pass
# else:
# # app.settings["fs-uae:fullscreen-mode::default"] = "window"
# pass
# if check_argument("fullscreen"):
# use_fullscreen = check_argument("fullscreen") == "1"
#
# if "--fullscreen-mode=fullscreen" in sys.argv:
# use_fullscreen_window = False
# elif "--fullscreen-mode=window" in sys.argv:
# use_fullscreen_window = True
#
# if "--maximize" in sys.argv:
# use_window = "maximized"
# use_fullscreen = False
#
# if "--no-window-decorations" in sys.argv:
# use_window_decorations = False
# app.settings["game-center:fullscreen"] = \
# "1" if use_fullscreen else "0"
# if use_fullscreen_window:
# app.settings["game-center:fullscreen-mode"] = "window"
# else:
# app.settings["game-center:fullscreen-mode"] = ""
# app.settings["game-center:window-decorations"] = \
# "1" if use_window_decorations else "0"
# app.settings["game-center:maximize"] = \
# "1" if use_window == "maximized" else "0"
# app.settings["game-center:top-clock"] = "1" if use_top_clock else "0"
# app.settings["game-center:top-logo"] = "1" if use_top_logo else "0"
ArcadeWindow().show_auto()
# if cursor_position is not None:
# os.environ["FSGS_RETURN_CURSOR_TO"] = "{0},{1}".format(
# cursor_position[0], cursor_position[1])
application.run()
print("application.run returned")
application.stop()
ImageLoader.get().stop()
application.wait()
print(" --- arcade.arcade_main.main is done ---")
return
| gpl-2.0 |
PfarrCh/openrsa | test/solovaystrassen_testcase.py | 1 | 1957 | # -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2017 Christian Pfarr
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import unittest
from solovaystrassen import SolovayStrassen
class SolovayStrassenTestCase(unittest.TestCase):
def setUp(self):
self.a1 = 17
self.a2 = 29
self.a3 = 23
self.maybe = 91
def tearDown(self):
del self.a1
del self.a2
del self.a3
del self.maybe
def test_composition(self):
self.assertTrue(not SolovayStrassen.is_composite(self.a1, self.maybe),
"SolovayStrassen detects a composition, but it could be prime")
self.assertTrue(not SolovayStrassen.is_composite(self.a2, self.maybe),
"SolovayStrassen detects a composition, but it could be prime")
self.assertTrue(SolovayStrassen.is_composite(self.a3, self.maybe),
"SolovayStrassen detects no composition, but it is one")
| mit |
Ichimonji10/robottelo | tests/foreman/performance/test_candlepin_concurrent_subscription_attach.py | 2 | 3812 | """Test class for concurrent subscription by register and attach
@Requirement: Candlepin concurrent subscription attach
@CaseAutomation: Automated
@CaseLevel: Acceptance
@CaseComponent: OTHER
@TestType: Functional
@CaseImportance: High
@Upstream: No
"""
from robottelo.performance.constants import (
ATTACH_ENV,
RAW_ATT_FILE_NAME,
RAW_REG_FILE_NAME,
STAT_ATT_FILE_NAME,
STAT_REG_FILE_NAME,
)
from robottelo.test import ConcurrentTestCase
class ConcurrentSubAttachTestCase(ConcurrentTestCase):
"""Concurrent Subscribe to Red Hat Satellite 6 Server by attach tests"""
@classmethod
def setUpClass(cls):
super(ConcurrentSubAttachTestCase, cls).setUpClass()
# parameters for concurrent register and attach test
# note: may need to change savepoint name
cls._set_testcase_parameters(
'enabled_repos',
RAW_ATT_FILE_NAME,
STAT_ATT_FILE_NAME,
raw_reg=RAW_REG_FILE_NAME,
stat_reg=STAT_REG_FILE_NAME,
)
# parameters for attach step
cls.environment = ATTACH_ENV
def setUp(self):
super(ConcurrentSubAttachTestCase, self).setUp()
# Get subscription id
(self.sub_id, sub_name) = self._get_subscription_id()
self.logger.debug(
'subscription {0} id is: {1}'.format(sub_name, self.sub_id))
def test_subscribe_ak_sequential(self):
"""Subscribe system sequentially using 1 virtual machine
@id: 41d80f4f-60df-4a49-967c-929604ca156e
@Steps:
1. create result dictionary
2. sequentially run by one thread;
the thread iterates all total number of iterations
3. produce result of timing
@Assert: Restoring where there's no system registered
"""
self.kick_off_ak_test(self.num_threads[0], 5000)
def test_register_attach_2_clients(self):
"""Subscribe system concurrently using 2 virtual machines
@id: 9849c556-c2a7-4ae3-a7b7-5291bdf158fd
@Steps:
1. create result dictionary
2. concurrent run by multiple threads;
each thread iterates a limited number of times
3. produce result of timing
@Assert: Restoring from database without any registered systems.
"""
self.kick_off_att_test(self.num_threads[1], 5000)
def test_register_attach_4_clients(self):
"""Subscribe system concurrently using 4 virtual machines
@id: dfc7da77-6127-42ee-bbaa-4e3b48c86c9d
@Assert: Restoring from database without any registered systems.
"""
self.kick_off_att_test(self.num_threads[2], 5000)
def test_register_attach_6_clients(self):
"""Subscribe system concurrently using 6 virtual machines
@id: 1a03261a-2756-4ea2-a718-86b5cfa9bd87
@Assert: Restoring from database without any registered systems.
"""
self.kick_off_att_test(self.num_threads[3], 6000)
def test_register_attach_8_clients(self):
"""Subscribe system concurrently using 8 virtual machines
@id: fc5049b1-93ba-4cba-854f-bb763d137832
@Assert: Restoring from database without any registered systems.
"""
self.kick_off_att_test(self.num_threads[4], 5000)
def test_register_attach_10_clients(self):
"""Subscribe system concurrently using 10 virtual machines
@id: a7ce9e04-b9cc-4c2b-b9e8-22ea8ceb1fab
@Steps:
1. create result dictionary
2. concurrent run by multiple threads;
and each thread iterates a limited number of times
3. produce result of timing
@Assert: Restoring from database without any registered systems.
"""
self.kick_off_att_test(self.num_threads[5], 5000)
| gpl-3.0 |
bjodah/symengine.py | symengine/tests/test_sets.py | 2 | 3799 | from symengine.utilities import raises
from symengine.lib.symengine_wrapper import (Interval, EmptySet, UniversalSet,
FiniteSet, Union, Complement, ImageSet, ConditionSet, Reals, Integers,
And, Or, oo, Symbol, true, Ge, Eq, Gt)
def test_Interval():
assert Interval(0, oo) == Interval(0, oo, False, True)
assert Interval(-oo, 0) == Interval(-oo, 0, True, False)
assert Interval(oo, -oo) == EmptySet()
assert Interval(oo, oo) == EmptySet()
assert Interval(-oo, -oo) == EmptySet()
assert isinstance(Interval(1, 1), FiniteSet)
assert Interval(1, 0) == EmptySet()
assert Interval(1, 1, False, True) == EmptySet()
assert Interval(1, 1, True, False) == EmptySet()
assert Interval(1, 1, True, True) == EmptySet()
assert Interval(1, 2).union(Interval(2, 3)) == Interval(1, 3)
def test_EmptySet():
E = EmptySet()
assert E.intersection(UniversalSet()) == E
def test_UniversalSet():
U = UniversalSet()
x = Symbol("x")
assert U.union(Interval(2, 4)) == U
assert U.intersection(Interval(2, 4)) == Interval(2, 4)
assert U.contains(0) == true
def test_Reals():
R = Reals()
assert R.union(Interval(2, 4)) == R
assert R.contains(0) == true
def test_Reals():
Z = Integers()
assert Z.union(FiniteSet(2, 4)) == Z
assert Z.contains(0) == true
def test_FiniteSet():
x = Symbol("x")
A = FiniteSet(1, 2, 3)
B = FiniteSet(3, 4, 5)
AorB = Union(A, B)
AandB = A.intersection(B)
assert AandB == FiniteSet(3)
assert FiniteSet(EmptySet()) != EmptySet()
assert FiniteSet(FiniteSet(1, 2, 3)) != FiniteSet(1, 2, 3)
def test_Union():
assert Union(Interval(1, 2), Interval(2, 3)) == Interval(1, 3)
assert Union(Interval(1, 2), Interval(2, 3, True)) == Interval(1, 3)
assert Union(Interval(1, 3), Interval(2, 4)) == Interval(1, 4)
assert Union(Interval(1, 2), Interval(1, 3)) == Interval(1, 3)
assert Union(Interval(1, 3), Interval(1, 2)) == Interval(1, 3)
assert Union(Interval(1, 3, False, True), Interval(1, 2)) == \
Interval(1, 3, False, True)
assert Union(Interval(1, 3), Interval(1, 2, False, True)) == Interval(1, 3)
assert Union(Interval(1, 2, True), Interval(1, 3)) == Interval(1, 3)
assert Union(Interval(1, 2, True), Interval(1, 3, True)) == \
Interval(1, 3, True)
assert Union(Interval(1, 2, True), Interval(1, 3, True, True)) == \
Interval(1, 3, True, True)
assert Union(Interval(1, 2, True, True), Interval(1, 3, True)) == \
Interval(1, 3, True)
assert Union(Interval(1, 3), Interval(2, 3)) == Interval(1, 3)
assert Union(Interval(1, 3, False, True), Interval(2, 3)) == \
Interval(1, 3)
assert Union(Interval(1, 2, False, True), Interval(2, 3, True)) != \
Interval(1, 3)
assert Union(Interval(1, 2), EmptySet()) == Interval(1, 2)
assert Union(EmptySet()) == EmptySet()
def test_Complement():
assert Complement(Interval(1, 3), Interval(1, 2)) == Interval(2, 3, True)
assert Complement(FiniteSet(1, 3, 4), FiniteSet(3, 4)) == FiniteSet(1)
assert Complement(Union(Interval(0, 2),
FiniteSet(2, 3, 4)), Interval(1, 3)) == \
Union(Interval(0, 1, False, True), FiniteSet(4))
def test_ConditionSet():
x = Symbol("x")
i1 = Interval(-oo, oo)
f1 = FiniteSet(0, 1, 2, 4)
cond1 = Ge(x**2, 9)
assert ConditionSet(x, And(Eq(0, 1), i1.contains(x))) == EmptySet()
assert ConditionSet(x, And(Gt(1, 0), i1.contains(x))) == i1
assert ConditionSet(x, And(cond1, f1.contains(x))) == FiniteSet(4)
def test_ImageSet():
x = Symbol("x")
i1 = Interval(0, 1)
assert ImageSet(x, x**2, EmptySet()) == EmptySet()
assert ImageSet(x, 1, i1) == FiniteSet(1)
assert ImageSet(x, x, i1) == i1
| mit |
simartin/servo | tests/wpt/web-platform-tests/tools/third_party/h2/examples/fragments/client_upgrade_fragment.py | 14 | 3726 | # -*- coding: utf-8 -*-
"""
Client Plaintext Upgrade
~~~~~~~~~~~~~~~~~~~~~~~~
This example code fragment demonstrates how to set up a HTTP/2 client that uses
the plaintext HTTP Upgrade mechanism to negotiate HTTP/2 connectivity. For
maximum explanatory value it uses the synchronous socket API that comes with
the Python standard library. In product code you will want to use an actual
HTTP/1.1 client if possible.
This code requires Python 3.5 or later.
"""
import h2.connection
import socket
def establish_tcp_connection():
"""
This function establishes a client-side TCP connection. How it works isn't
very important to this example. For the purpose of this example we connect
to localhost.
"""
return socket.create_connection(('localhost', 80))
def send_initial_request(connection, settings):
"""
For the sake of this upgrade demonstration, we're going to issue a GET
request against the root of the site. In principle the best request to
issue for an upgrade is actually ``OPTIONS *``, but this is remarkably
poorly supported and can break in weird ways.
"""
# Craft our initial request per RFC 7540 Section 3.2. This requires two
# special header fields: the Upgrade headre, and the HTTP2-Settings header.
# The value of the HTTP2-Settings header field comes from h2.
request = (
b"GET / HTTP/1.1\r\n" +
b"Host: localhost\r\n" +
b"Upgrade: h2c\r\n" +
b"HTTP2-Settings: " + settings + b"\r\n" +
b"\r\n"
)
connection.sendall(request)
def get_upgrade_response(connection):
"""
This function reads from the socket until the HTTP/1.1 end-of-headers
sequence (CRLFCRLF) is received. It then checks what the status code of the
response is.
This is not a substitute for proper HTTP/1.1 parsing, but it's good enough
for example purposes.
"""
data = b''
while b'\r\n\r\n' not in data:
data += connection.recv(8192)
headers, rest = data.split(b'\r\n\r\n', 1)
# An upgrade response begins HTTP/1.1 101 Switching Protocols. Look for the
# code. In production code you should also check that the upgrade is to
# h2c, but here we know we only offered one upgrade so there's only one
# possible upgrade in use.
split_headers = headers.split()
if split_headers[1] != b'101':
raise RuntimeError("Not upgrading!")
# We don't care about the HTTP/1.1 data anymore, but we do care about
# any other data we read from the socket: this is going to be HTTP/2 data
# that must be passed to the H2Connection.
return rest
def main():
"""
The client upgrade flow.
"""
# Step 1: Establish the TCP connecton.
connection = establish_tcp_connection()
# Step 2: Create H2 Connection object, put it in upgrade mode, and get the
# value of the HTTP2-Settings header we want to use.
h2_connection = h2.connection.H2Connection()
settings_header_value = h2_connection.initiate_upgrade_connection()
# Step 3: Send the initial HTTP/1.1 request with the upgrade fields.
send_initial_request(connection, settings_header_value)
# Step 4: Read the HTTP/1.1 response, look for 101 response.
extra_data = get_upgrade_response(connection)
# Step 5: Immediately send the pending HTTP/2 data.
connection.sendall(h2_connection.data_to_send())
# Step 6: Feed the body data to the connection.
events = connection.receive_data(extra_data)
# Now you can enter your main loop, beginning by processing the first set
# of events above. These events may include ResponseReceived, which will
# contain the response to the request we made in Step 3.
main_loop(events)
| mpl-2.0 |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/utils/inspect_checkpoint.py | 123 | 2686 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple script for inspect checkpoint files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.python.platform import app
FLAGS = None
def print_tensors_in_checkpoint_file(file_name, tensor_name):
"""Prints tensors in a checkpoint file.
If no `tensor_name` is provided, prints the tensor names and shapes
in the checkpoint file.
If `tensor_name` is provided, prints the content of the tensor.
Args:
file_name: Name of the checkpoint file.
tensor_name: Name of the tensor in the checkpoint file to print.
"""
try:
if not tensor_name:
variables = checkpoint_utils.list_variables(file_name)
for name, shape in variables:
print("%s\t%s" % (name, str(shape)))
else:
print("tensor_name: ", tensor_name)
print(checkpoint_utils.load_variable(file_name, tensor_name))
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print("It's likely that your checkpoint file has been compressed "
"with SNAPPY.")
def main(unused_argv):
if not FLAGS.file_name:
print("Usage: inspect_checkpoint --file_name=<checkpoint_file_name "
"or directory> [--tensor_name=tensor_to_print]")
sys.exit(1)
else:
print_tensors_in_checkpoint_file(FLAGS.file_name, FLAGS.tensor_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--file_name",
type=str,
default="",
help="Checkpoint filename"
)
parser.add_argument(
"--tensor_name",
type=str,
default="",
help="Name of the tensor to inspect"
)
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| bsd-2-clause |
thienluong/SAFplus-Availability-Scalability-Platform | src/ide/genshi/build/lib.linux-x86_64-2.7/genshi/template/tests/base.py | 25 | 1473 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import unittest
from genshi.template.base import Template, Context
class ContextTestCase(unittest.TestCase):
def test_copy(self):
# create a non-trivial context with some dummy
# frames, match templates and py:choice stacks.
orig_ctxt = Context(a=5, b=6)
orig_ctxt.push({'c': 7})
orig_ctxt._match_templates.append(object())
orig_ctxt._choice_stack.append(object())
ctxt = orig_ctxt.copy()
self.assertNotEqual(id(orig_ctxt), id(ctxt))
self.assertEqual(repr(orig_ctxt), repr(ctxt))
self.assertEqual(orig_ctxt._match_templates, ctxt._match_templates)
self.assertEqual(orig_ctxt._choice_stack, ctxt._choice_stack)
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(Template.__module__))
suite.addTest(unittest.makeSuite(ContextTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| gpl-2.0 |
ahmed-mahran/hue | desktop/core/ext-py/South-1.0.2/south/utils/__init__.py | 119 | 1945 | """
Generally helpful utility functions.
"""
def _ask_for_it_by_name(name):
"Returns an object referenced by absolute path."
bits = str(name).split(".")
## what if there is no absolute reference?
if len(bits) > 1:
modulename = ".".join(bits[:-1])
else:
modulename = bits[0]
module = __import__(modulename, {}, {}, bits[-1])
if len(bits) == 1:
return module
else:
return getattr(module, bits[-1])
def ask_for_it_by_name(name):
"Returns an object referenced by absolute path. (Memoised outer wrapper)"
if name not in ask_for_it_by_name.cache:
ask_for_it_by_name.cache[name] = _ask_for_it_by_name(name)
return ask_for_it_by_name.cache[name]
ask_for_it_by_name.cache = {}
def get_attribute(item, attribute):
"""
Like getattr, but recursive (i.e. you can ask for 'foo.bar.yay'.)
"""
value = item
for part in attribute.split("."):
value = getattr(value, part)
return value
def auto_through(field):
"Returns if the M2M class passed in has an autogenerated through table or not."
return (
# Django 1.0/1.1
(not field.rel.through)
or
# Django 1.2+
getattr(getattr(field.rel.through, "_meta", None), "auto_created", False)
)
def auto_model(model):
"Returns if the given model was automatically generated."
return getattr(model._meta, "auto_created", False)
def memoize(function):
"Standard memoization decorator."
name = function.__name__
_name = '_' + name
def method(self):
if not hasattr(self, _name):
value = function(self)
setattr(self, _name, value)
return getattr(self, _name)
def invalidate():
if hasattr(method, _name):
delattr(method, _name)
method.__name__ = function.__name__
method.__doc__ = function.__doc__
method._invalidate = invalidate
return method
| apache-2.0 |
lakshayg/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/distribution_test.py | 96 | 8140 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import distributions
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
ds = distributions
class DistributionTest(test.TestCase):
def testParamShapesAndFromParams(self):
classes = [
ds.Normal,
ds.Bernoulli,
ds.Beta,
ds.Chi2,
ds.Exponential,
ds.Gamma,
ds.InverseGamma,
ds.Laplace,
ds.StudentT,
ds.Uniform,
]
sample_shapes = [(), (10,), (10, 20, 30)]
with self.test_session():
for cls in classes:
for sample_shape in sample_shapes:
param_shapes = cls.param_shapes(sample_shape)
params = dict([(name, random_ops.random_normal(shape))
for name, shape in param_shapes.items()])
dist = cls(**params)
self.assertAllEqual(sample_shape,
array_ops.shape(dist.sample()).eval())
dist_copy = dist.copy()
self.assertAllEqual(sample_shape,
array_ops.shape(dist_copy.sample()).eval())
self.assertEqual(dist.parameters, dist_copy.parameters)
def testCopyExtraArgs(self):
with self.test_session():
# Note: we cannot easily test all distributions since each requires
# different initialization arguments. We therefore spot test a few.
normal = ds.Normal(loc=1., scale=2., validate_args=True)
self.assertEqual(normal.parameters, normal.copy().parameters)
wishart = ds.WishartFull(df=2, scale=[[1., 2], [2, 5]],
validate_args=True)
self.assertEqual(wishart.parameters, wishart.copy().parameters)
def testCopyOverride(self):
with self.test_session():
normal = ds.Normal(loc=1., scale=2., validate_args=True)
unused_normal_copy = normal.copy(validate_args=False)
base_params = normal.parameters.copy()
copy_params = normal.copy(validate_args=False).parameters.copy()
self.assertNotEqual(
base_params.pop("validate_args"), copy_params.pop("validate_args"))
self.assertEqual(base_params, copy_params)
def testIsScalar(self):
with self.test_session():
mu = 1.
sigma = 2.
normal = ds.Normal(mu, sigma, validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event()))
self.assertTrue(tensor_util.constant_value(normal.is_scalar_batch()))
normal = ds.Normal([mu], [sigma], validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event()))
self.assertFalse(tensor_util.constant_value(normal.is_scalar_batch()))
mvn = ds.MultivariateNormalDiag([mu], [sigma], validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event()))
self.assertTrue(tensor_util.constant_value(mvn.is_scalar_batch()))
mvn = ds.MultivariateNormalDiag([[mu]], [[sigma]], validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event()))
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_batch()))
# We now test every codepath within the underlying is_scalar_helper
# function.
# Test case 1, 2.
x = array_ops.placeholder(dtype=dtypes.int32, shape=[])
# None would fire an exception were it actually executed.
self.assertTrue(normal._is_scalar_helper(x.get_shape(), lambda: None))
self.assertTrue(
normal._is_scalar_helper(tensor_shape.TensorShape(None),
lambda: array_ops.shape(x)))
x = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
# None would fire an exception were it actually executed.
self.assertFalse(normal._is_scalar_helper(x.get_shape(), lambda: None))
self.assertFalse(
normal._is_scalar_helper(tensor_shape.TensorShape(None),
lambda: array_ops.shape(x)))
# Test case 3.
x = array_ops.placeholder(dtype=dtypes.int32)
is_scalar = normal._is_scalar_helper(x.get_shape(),
lambda: array_ops.shape(x))
self.assertTrue(is_scalar.eval(feed_dict={x: 1}))
self.assertFalse(is_scalar.eval(feed_dict={x: [1]}))
def _GetFakeDistribution(self):
class FakeDistribution(ds.Distribution):
"""Fake Distribution for testing _set_sample_static_shape."""
def __init__(self, batch_shape=None, event_shape=None):
self._static_batch_shape = tensor_shape.TensorShape(batch_shape)
self._static_event_shape = tensor_shape.TensorShape(event_shape)
super(FakeDistribution, self).__init__(
dtype=dtypes.float32,
reparameterization_type=distributions.NOT_REPARAMETERIZED,
validate_args=True,
allow_nan_stats=True,
name="DummyDistribution")
def _batch_shape(self):
return self._static_batch_shape
def _event_shape(self):
return self._static_event_shape
return FakeDistribution
def testSampleShapeHints(self):
fake_distribution = self._GetFakeDistribution()
with self.test_session():
# Make a new session since we're playing with static shapes. [And below.]
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[2, 3], event_shape=[5])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
# We use as_list since TensorShape comparison does not work correctly for
# unknown values, ie, Dimension(None).
self.assertAllEqual([6, 7, 2, 3, 5], y.get_shape().as_list())
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=[5])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertAllEqual([6, 7, None, 3, 5], y.get_shape().as_list())
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=[None])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertAllEqual([6, 7, None, 3, None], y.get_shape().as_list())
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=None, event_shape=None)
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=None)
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
if __name__ == "__main__":
test.main()
| apache-2.0 |
azlanismail/prismgames | examples/games/car/networkx/algorithms/isomorphism/vf2userfunc.py | 1 | 7715 | """
Module to simplify the specification of user-defined equality functions for
node and edge attributes during isomorphism checks.
During the construction of an isomorphism, the algorithm considers two
candidate nodes n1 in G1 and n2 in G2. The graphs G1 and G2 are then
compared with respect to properties involving n1 and n2, and if the outcome
is good, then the candidate nodes are considered isomorphic. NetworkX
provides a simple mechanism for users to extend the comparisons to include
node and edge attributes.
Node attributes are handled by the node_match keyword. When considering
n1 and n2, the algorithm passes their node attribute dictionaries to
node_match, and if it returns False, then n1 and n2 cannot be
considered to be isomorphic.
Edge attributes are handled by the edge_match keyword. When considering
n1 and n2, the algorithm must verify that outgoing edges from n1 are
commensurate with the outgoing edges for n2. If the graph is directed,
then a similar check is also performed for incoming edges.
Focusing only on outgoing edges, we consider pairs of nodes (n1, v1) from
G1 and (n2, v2) from G2. For graphs and digraphs, there is only one edge
between (n1, v1) and only one edge between (n2, v2). Those edge attribute
dictionaries are passed to edge_match, and if it returns False, then
n1 and n2 cannot be considered isomorphic. For multigraphs and
multidigraphs, there can be multiple edges between (n1, v1) and also
multiple edges between (n2, v2). Now, there must exist an isomorphism
from "all the edges between (n1, v1)" to "all the edges between (n2, v2)".
So, all of the edge attribute dictionaries are passed to edge_match, and
it must determine if there is an isomorphism between the two sets of edges.
"""
import networkx as nx
from . import isomorphvf2 as vf2
__all__ = ['GraphMatcher',
'DiGraphMatcher',
'MultiGraphMatcher',
'MultiDiGraphMatcher',
]
def _semantic_feasibility(self, G1_node, G2_node):
"""Returns True if mapping G1_node to G2_node is semantically feasible.
"""
# Make sure the nodes match
if self.node_match is not None:
nm = self.node_match(self.G1.node[G1_node], self.G2.node[G2_node])
if not nm:
return False
# Make sure the edges match
if self.edge_match is not None:
# Cached lookups
G1_adj = self.G1_adj
G2_adj = self.G2_adj
core_1 = self.core_1
edge_match = self.edge_match
for neighbor in G1_adj[G1_node]:
# G1_node is not in core_1, so we must handle R_self separately
if neighbor == G1_node:
if not edge_match(G1_adj[G1_node][G1_node],
G2_adj[G2_node][G2_node]):
return False
elif neighbor in core_1:
if not edge_match(G1_adj[G1_node][neighbor],
G2_adj[G2_node][core_1[neighbor]]):
return False
# syntactic check has already verified that neighbors are symmetric
return True
class GraphMatcher(vf2.GraphMatcher):
"""VF2 isomorphism checker for undirected graphs.
"""
def __init__(self, G1, G2, node_match=None, edge_match=None):
"""Initialize graph matcher.
Parameters
----------
G1, G2: graph
The graphs to be tested.
node_match: callable
A function that returns True iff node n1 in G1 and n2 in G2
should be considered equal during the isomorphism test. The
function will be called like::
node_match(G1.node[n1], G2.node[n2])
That is, the function will receive the node attribute dictionaries
of the nodes under consideration. If None, then no attributes are
considered when testing for an isomorphism.
edge_match: callable
A function that returns True iff the edge attribute dictionary for
the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should be
considered equal during the isomorphism test. The function will be
called like::
edge_match(G1[u1][v1], G2[u2][v2])
That is, the function will receive the edge attribute dictionaries
of the edges under consideration. If None, then no attributes are
considered when testing for an isomorphism.
"""
vf2.GraphMatcher.__init__(self, G1, G2)
self.node_match = node_match
self.edge_match = edge_match
# These will be modified during checks to minimize code repeat.
self.G1_adj = self.G1.adj
self.G2_adj = self.G2.adj
semantic_feasibility = _semantic_feasibility
class DiGraphMatcher(vf2.DiGraphMatcher):
"""VF2 isomorphism checker for directed graphs.
"""
def __init__(self, G1, G2, node_match=None, edge_match=None):
"""Initialize graph matcher.
Parameters
----------
G1, G2 : graph
The graphs to be tested.
node_match : callable
A function that returns True iff node n1 in G1 and n2 in G2
should be considered equal during the isomorphism test. The
function will be called like::
node_match(G1.node[n1], G2.node[n2])
That is, the function will receive the node attribute dictionaries
of the nodes under consideration. If None, then no attributes are
considered when testing for an isomorphism.
edge_match : callable
A function that returns True iff the edge attribute dictionary for
the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should be
considered equal during the isomorphism test. The function will be
called like::
edge_match(G1[u1][v1], G2[u2][v2])
That is, the function will receive the edge attribute dictionaries
of the edges under consideration. If None, then no attributes are
considered when testing for an isomorphism.
"""
vf2.DiGraphMatcher.__init__(self, G1, G2)
self.node_match = node_match
self.edge_match = edge_match
# These will be modified during checks to minimize code repeat.
self.G1_adj = self.G1.adj
self.G2_adj = self.G2.adj
def semantic_feasibility(self, G1_node, G2_node):
"""Returns True if mapping G1_node to G2_node is semantically feasible."""
# Test node_match and also test edge_match on successors
feasible = _semantic_feasibility(self, G1_node, G2_node)
if not feasible:
return False
# Test edge_match on predecessors
self.G1_adj = self.G1.pred
self.G2_adj = self.G2.pred
feasible = _semantic_feasibility(self, G1_node, G2_node)
self.G1_adj = self.G1.adj
self.G2_adj = self.G2.adj
return feasible
## The "semantics" of edge_match are different for multi(di)graphs, but
## the implementation is the same. So, technically we do not need to
## provide "multi" versions, but we do so to match NetworkX's base classes.
class MultiGraphMatcher(GraphMatcher):
"""VF2 isomorphism checker for undirected multigraphs. """
pass
class MultiDiGraphMatcher(DiGraphMatcher):
"""VF2 isomorphism checker for directed multigraphs. """
pass
| gpl-2.0 |
Donnerbart/hazelcast-simulator | dist/src/main/dist/bin/benchmark-report.py | 1 | 32024 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# todo:
# - writing html
# - pruning of dstats to match running time
# - when comparing benchmarks; use 1 color for all plots from 1 benchmark
# - if no latency info is found; print warning
# - when not a lot of data points, them time issues in gnuplot (use WORKER_PERFORMANCE_MONITOR_INTERVAL_SECONDS=default)
# - timeseries: avg
# - gnuplot y axis formatting; long numbers are unreadable because not dots or comma's
# - throughput per member in main output directory
# - latency distribution doesn't show the percentiles; doesn't load xlabels.csv
#
# done:
# - better commandline help
# - throughput per worker in a single plot
# - default gnuplot colors stink; often they are not distinguishable
#
# backlog
# - google chart option
# - svg option
# - latency per worker
# - option to plot with real time.
# - dstats merging for members?
# - cpu usage merging needs to be divided by number of agents.
# - option not to make low part of graph shrink
# - option to show real time
import argparse
import csv
import os
import re
import tempfile
parser = argparse.ArgumentParser(description='Creating a benchmark report from one or more benchmarks.')
parser.add_argument('benchmarks', metavar='B', nargs='+',
help='a benchmark to be used in the comparison')
# parser.add_argument('-r', '--realtime', default='report', help='print the real time of the datapoints.')
parser.add_argument('-o', '--output', nargs=1,
help='The output directory for the report. By default a report directory in the working directory is created.')
x = parser.parse_args()
args = x.benchmarks
simulator_home = os.environ['SIMULATOR_HOME']
if not x.output:
report_dir = "report"
else:
report_dir = x.output[0]
print("output directory '" + report_dir + "'")
# ================ utils ========================
def dump(obj):
for attr in dir(obj):
print "obj.%s = %s" % (attr, getattr(obj, attr))
def ensure_dir(file_path):
if not os.path.exists(file_path):
os.makedirs(file_path)
# ================ plotting =========================
class Gnuplot:
image_width = 1280
image_height = 1024
filepath = None
ylabel = None
is_bytes = None
def __init__(self, directory, title, basefilename=None):
self.tmp = tempfile.NamedTemporaryFile(delete=False)
self.title = title
self.directory = directory
self.ts_list = []
self.titles = {}
self.basefilename = basefilename
def _complete(self):
self.tmp.flush()
from os import system
system('gnuplot ' + self.tmp.name)
def _write(self, line):
self.tmp.write(line + '\n')
def add(self, ts, title=None):
self.ts_list.append(ts)
self.titles[ts] = title
return self
# returns a color for the time series. We are using some hard coded colors to make sure
# the the colors are predictable and very much different. If there are too many time series
# then we just rely on the default mechanism
def _color(self, ts):
if (len(self.ts_list)) > 8:
return None
# for list of colors: http://www.ss.scphys.kyoto-u.ac.jp/person/yonezawa/contents/program/gnuplot/colorname_list.html
if ts == self.ts_list[0]:
return "red"
elif ts == self.ts_list[1]:
return "blue"
elif ts == self.ts_list[2]:
return "forest-green"
elif ts == self.ts_list[3]:
return "gold"
elif ts == self.ts_list[4]:
return "grey"
elif ts == self.ts_list[5]:
return "brown"
elif ts == self.ts_list[6]:
return "violet"
else:
return "orchid"
def plot(self):
empty = True
for ts in self.ts_list:
if not ts.is_empty():
empty = False
break
if empty:
# print("Skipping plot of " + self.title + "; timeseries are empty")
return
ts_first = self.ts_list[0]
self.ylabel = ts_first.ylabel
if self.basefilename:
self.filepath = os.path.join(self.directory, self.basefilename + ".png")
else:
self.filepath = os.path.join(self.directory, ts_first.name + ".png")
self.is_bytes = ts_first.is_bytes
ensure_dir(self.directory)
self._plot()
print(self.filepath)
def _plot(self):
raise NotImplementedError("Please Implement this method")
class TimeseriesGnuplot(Gnuplot):
def __init__(self, directory, title, basefilename=None):
Gnuplot.__init__(self, directory, title, basefilename)
def _plot(self):
# self._write("unset autoscale y")
self._write("set title '" + self.title + "' noenhanced")
self._write("set style data lines")
self._write('set datafile separator ","')
self._write("set terminal png size " + str(self.image_width) + "," + str(self.image_height))
self._write("set grid")
self._write("set key below")
self._write("set xdata time")
self._write("set timefmt \"%s\"")
self._write("offset = 0")
self._write("t0(x)=(offset=($0==0) ? x : offset, x - offset)")
self._write("set xlabel 'Time minutes:seconds'")
self._write("set ylabel '" + self.ylabel + "'")
if self.is_bytes:
# the problem here is that this is 1000 based; not 1024
self._write("set format y '%.1s%cB'")
# else:
# self._write("set format y '%.0f'")
self._write("set output '" + self.filepath + "'")
self._write("plot \\")
tmp_files = []
for ts in self.ts_list:
ts_file = ts.to_tmp_file()
tmp_files.append(ts_file)
if len(self.ts_list) > 1:
title = self.titles[ts]
if not title:
title = ts.name
title_str = "title \"" + title + "\" noenhanced"
else:
title_str = "title \"\""
color = self._color(ts)
lt = ""
if color:
lt = "lt rgb \"" + color + "\""
self._write(" \'" + ts_file.name + "\' using (t0(timecolumn(1))):2 " + title_str + " " + lt + ", \\")
self._complete()
for tmp_file in tmp_files:
tmp_file.close()
class LatencyDistributionGnuplot(Gnuplot):
def __init__(self, directory, title):
Gnuplot.__init__(self, directory, title)
def _plot(self):
self._write("set datafile separator \",\"")
self._write("set title '" + self.title + "' noenhanced")
self._write("set terminal png size " + str(self.image_width) + "," + str(self.image_height))
self._write("set grid")
self._write("unset xtics")
self._write("set ylabel 'Latency (μs)'")
self._write("set logscale x")
self._write('set key top left')
self._write("set style line 1 lt 1 lw 3 pt 3 linecolor rgb \"red\"")
self._write("set output '" + self.filepath + "'")
self._write("plot '"+simulator_home+"/bin/xlabels.csv' notitle with labels center offset 0, 1.5 point,\\")
tmp_files = []
for ts in self.ts_list:
ts_file = ts.to_tmp_file()
tmp_files.append(ts_file)
if len(self.ts_list) > 1:
title = self.titles[ts]
if not title:
title = ts.name
title_str = "title \"" + title + "\" noenhanced"
else:
title_str = "title \"\""
color = self._color(ts)
lt = ""
if color:
lt = "lt rgb \"" + color + "\""
self._write(" \"" + ts_file.name + "\" using 1:2 " + title_str + " " + lt + " with lines, \\")
self._complete()
for tmp_file in tmp_files:
tmp_file.close()
print(self.tmp.name)
class GoogleCharts:
def __init__(self, ts, directory, title):
self.title = title
self.ts = ts
self.directory = directory
with open('chart_template.html', 'r') as f:
self.chart_template = f.read()
def plot(self):
filepath = os.path.join(self.directory, self.ts.name + ".html")
empty = True
for ts in ts_list:
if not ts.is_empty():
empty = False
break
if empty:
print("Skipping plot of " + filepath + "; timeseries are empty")
return
rows = ""
first = True
for item in self.ts.items:
rows += "[" + str(item.time) + "," + str(item.value) + "]"
if first:
rows += ","
rows += "\n"
chart = self.chart_template.replace("$rows", rows)
ensure_dir(self.directory)
with open(filepath, 'w') as f:
f.write(chart)
print filepath
# a series is effectively a list of key/values. It could be a time series where the key is the time and the value
# is the measured value e.g. cpu usage.
class Series:
name = None
def __init__(self, name, ylabel, is_bytes, ts_list=None, items=None, ):
if ts_list is None:
ts_list = []
self.is_bytes = is_bytes
self.name = name
self.ylabel = ylabel
if not items:
self.items = []
else:
self.items = items
self.attributes = {}
for source_ts in ts_list:
if source_ts.is_empty():
continue
# add all items in the source_ts, to the result_ts
for index in range(0, source_ts.length()):
source_item = source_ts.items[index]
if self.length() > index:
result_item = self.items[index]
result_item.value += source_item.value
else:
self.add(source_item.time, source_item.value)
def add(self, time, value):
self.items.append(KeyValue(time, value))
def start_time(self):
if not self.items:
return None
else:
return self.items[0].time
def end_time(self):
if not self.items:
return None
else:
return self.items[len(self.items) - 1].time
def to_tmp_file(self):
temp = tempfile.NamedTemporaryFile(delete=False)
for item in self.items:
temp.write(str(item.time) + ',' + str(item.value) + '\n')
temp.close()
return temp
def length(self):
return len(self.items)
def is_empty(self):
return self.length() == 0
def min(self):
result = None
for item in self.items:
if not result or item.value < result:
result = item.value
return result
def max(self):
result = None
for item in self.items:
if not result or item.value > result:
result = item.value
return result
# A key/value in a series
class KeyValue:
time = None
value = None
def __init__(self, time, value):
self.time = time
self.value = float(value)
# A handle to a series. With a handle you can refer to a series, without needing to pull it into memory. Since we could have
# a lot of measured data, we want to prevent getting it all in memory.
class SeriesHandle:
def __init__(self, src, name, title, ylabel, load_method, args=None, is_bytes=False):
if not args:
args = []
self.src = src
self.name = name
self.title = title
self.ylabel = ylabel
self.load_method = load_method
self.args = args
self.is_bytes = is_bytes
def load(self):
items = self.load_method(*self.args)
return Series(self.name, self.ylabel, self.is_bytes, items=items)
class Worker:
name = ""
directory = ""
performance_csv = None
def __init__(self, name, directory):
self.name = name
self.directory = directory
refs = []
self.ts_references = refs
refs.append(SeriesHandle("throughput", "throughput_" + name, "Throughput", "Operations/second",
self.__load_throughput))
refs.append(SeriesHandle("dstat", "memory_used", "Memory Used", "Memory used",
self.__load_dstat, args=[1], is_bytes=True))
refs.append(SeriesHandle("dstat", "memory_buffered", "Memory Buffered", "Memory Buffered",
self.__load_dstat, args=[2], is_bytes=True))
refs.append(SeriesHandle("dstat", "memory_cached", "Memory Cached", "Memory Cached",
self.__load_dstat, args=[3], is_bytes=True))
refs.append(SeriesHandle("dstat", "memory_free", "Memory Free", "Memory Free",
self.__load_dstat, args=[4], is_bytes=True))
refs.append(SeriesHandle("dstat", "cpu_user", "CPU User", "CPU User %",
self.__load_dstat, args=[5]))
refs.append(SeriesHandle("dstat", "cpu_system", "CPU System", "CPU System %",
self.__load_dstat, args=[6]))
refs.append(SeriesHandle("dstat", "cpu_idle", "CPU Idle", "CPU Idle %",
self.__load_dstat, args=[7]))
refs.append(SeriesHandle("dstat", "cpu_wait", "CPU Wait", "CPU Wait %",
self.__load_dstat, args=[8]))
refs.append(SeriesHandle("dstat", "cpu_total", "CPU Total", "CPU Total %",
self.__load_dstat_cpu_total_ts))
refs.append(SeriesHandle("dstat", "cpu_hardware_interrupts", "CPU Hardware Interrupts", "CPU Hardware Interrupts/sec",
self.__load_dstat, args=[9]))
refs.append(SeriesHandle("dstat", "cpu_software_interrupts", "CPU Software Interrupts", "CPU Software Interrupts/sec",
self.__load_dstat, args=[10]))
refs.append(SeriesHandle("dstat", "disk_read", "Disk Reads", "Disk Reads/sec",
self.__load_dstat, args=[11], is_bytes=True))
refs.append(SeriesHandle("dstat", "disk_write", "Disk Writes", "Disk writes/sec",
self.__load_dstat, args=[12], is_bytes=True))
refs.append(SeriesHandle("dstat", "net_receive", "Net Receive", "Receiving/second",
self.__load_dstat, args=[13], is_bytes=True))
refs.append(SeriesHandle("dstat", "net_send", "Net Send", "Sending/second",
self.__load_dstat, args=[14], is_bytes=True))
refs.append(SeriesHandle("dstat", "page_in", "Page in", "todo",
self.__load_dstat, args=[15]))
refs.append(SeriesHandle("dstat", "page_out", "Page out", "todo",
self.__load_dstat, args=[16]))
refs.append(SeriesHandle("dstat", "system_interrupts", "System Interrupts", "System Interrupts/sec",
self.__load_dstat, args=[17]))
refs.append(SeriesHandle("dstat", "system_context_switches", "System Context Switches", "System Context Switches/sec",
self.__load_dstat, args=[18]))
refs.append(SeriesHandle("dstat", "load_average_1m", "Load Average 1 Minute", "Load",
self.__load_dstat, args=[19]))
refs.append(SeriesHandle("dstat", "load_average_5m", "Load Average 5 Minutes", "Load",
self.__load_dstat, args=[20]))
refs.append(SeriesHandle("dstat", "load_average_15m", "Load Average 15 Minute", "Load",
self.__load_dstat, args=[21]))
refs.append(SeriesHandle("gc", "pause_time", "Pause time", "seconds",
self.__load_gc, args=[1, True]))
refs.append(SeriesHandle("gc", "young_size_before_gc", "Young size before gc", "Size",
self.__load_gc, args=[5, True], is_bytes=True))
refs.append(SeriesHandle("gc", "young_size_after_gc", "Young size after gc", "Size",
self.__load_gc, args=[6, True], is_bytes=True))
refs.append(SeriesHandle("gc", "young_size_max", "Young size max", "Size",
self.__load_gc, args=[7, True], is_bytes=True))
refs.append(SeriesHandle("gc", "young_collected", "Young collected", "Collected",
self.__load_gc, args=[8, True], is_bytes=True))
refs.append(SeriesHandle("gc", "young_collected_rate", "Young collection rate", "Collected/second",
self.__load_gc, args=[9, True], is_bytes=True))
refs.append(SeriesHandle("gc", "young_allocated", "Young allocated", "Allocation",
self.__load_gc, args=[10, True], is_bytes=True))
refs.append(SeriesHandle("gc", "allocation_rate", "Allocation rate", "Allocated/second",
self.__load_gc, args=[11, True], is_bytes=True))
refs.append(SeriesHandle("gc", "heap_size_before_gc", "Heap size before gc", "Size",
self.__load_gc, args=[12, False], is_bytes=True))
refs.append(SeriesHandle("gc", "heap_size_after_gc", "Heap size after gc", "Size",
self.__load_gc, args=[13, False], is_bytes=True))
refs.append(SeriesHandle("gc", "heap_size_max", "Heap size max", "Size",
self.__load_gc, args=[14, False], is_bytes=True))
refs.append(SeriesHandle("gc", "heap_collected", "Heap collected", "Size",
self.__load_gc, args=[15, False], is_bytes=True))
refs.append(SeriesHandle("gc", "heap_collected_rate", "Heap collected rate", "Collected/second",
self.__load_gc, args=[16, False], is_bytes=True))
refs.append(SeriesHandle("gc", "promotion", "Promoted", "Size",
self.__load_gc, args=[17, False], is_bytes=True))
refs.append(SeriesHandle("gc", "promotion_rate", "Promotion rate", "Promoted/second",
self.__load_gc, args=[18, True], is_bytes=True))
refs.append(SeriesHandle("gc", "old_size_before_gc", "Tenured size before gc", "Size",
self.__load_gc, args=[19, True], is_bytes=True))
refs.append(SeriesHandle("gc", "old_size_after_gc", "Tenured size after gc", "Size",
self.__load_gc, args=[20, True], is_bytes=True))
refs.append(SeriesHandle("gc", "old_total", "Tenured size total", "Size",
self.__load_gc, args=[21, True], is_bytes=True))
refs.append(SeriesHandle("gc", "meta_size_before_gc", "Meta/Perm size before gc", "Size",
self.__load_gc, args=[22, True], is_bytes=True))
refs.append(SeriesHandle("gc", "meta_size_after_gc", "Meta/Perm size after gc", "Size",
self.__load_gc, args=[23, True], is_bytes=True))
refs.append(SeriesHandle("gc", "meta_total", "Meta/Perm size total", "Size",
self.__load_gc, args=[24, True], is_bytes=True))
# Returns the name of the agent this worker belongs to
def agent(self):
index = self.name.index("_", 3)
return self.name[0:index]
def is_driver(self):
return os.path.exists(self.performance_csv)
def __load_throughput(self):
performance_csv = os.path.join(self.directory, "performance.csv")
result = []
if os.path.exists(performance_csv):
with open(performance_csv, 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='|')
# skip the first line
next(csvreader)
for row in csvreader:
result.append(KeyValue(row[0], row[4]))
return result
def __load_dstat(self, column):
dstat_csv = os.path.join(self.directory, "dstat.csv")
result = []
if os.path.exists(dstat_csv):
with open(dstat_csv, 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='|')
# we need to skip the first 7 lines
for x in range(0, 8):
next(csvreader)
for row in csvreader:
if column < len(row): # protection if column doesn't exist
result.append(KeyValue(row[0], row[column]))
return result
def __load_gc(self, column, filter_minus_one):
gc_csv = os.path.join(self.directory, "gc.csv")
result = []
if os.path.exists(gc_csv):
with open(gc_csv, 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='|')
# we need to skip the first line
next(csvreader)
for row in csvreader:
key = row[0]
value = row[column]
if value != "-1" or not filter_minus_one:
result.append(KeyValue(key, value))
return result
# total cpu usage isn't explicitly provided by dstat, so we just sum the user+system
def __load_dstat_cpu_total_ts(self):
dstat_csv = os.path.join(self.directory, "dstat.csv")
result = []
if os.path.exists(dstat_csv):
with open(dstat_csv, 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='|')
# we need to skip the first 7 lines
for x in range(0, 8):
next(csvreader)
for row in csvreader:
if len(row) > 6: # protection if column doesn't exist
result.append(KeyValue(row[0], float(row[5]) + float(row[6])))
return result
class Benchmark:
# the directory where the original files can be found
src_dir = ""
workers = None
name = ""
def __init__(self, src_dir, name):
self.src_dir = src_dir
self.name = name
# load all workers
self.workers = []
for subdir_name in os.listdir(src_dir):
subdir = os.path.join(src_dir, subdir_name)
if not os.path.isdir(subdir):
continue
if not subdir_name.startswith("C_A"):
continue
self.workers.append(Worker(subdir_name, subdir))
# making sure there are workers; otherwise it is an invalid benchmark
if len(self.workers) == 0:
print("Invalid Benchmark " + self.name + " from directory [" + self.src_dir + "]; no workers found")
exit(1)
# look for all latency info
refs = []
self.ts_references = refs
refs.append(SeriesHandle("throughput", "throughput", "Throughput", "Operations/sec", self.aggregated_throughput))
for file_name in os.listdir(self.src_dir):
if not file_name.endswith(".hgrm"):
continue
file_name = os.path.splitext(file_name)[0]
file_path = os.path.join(self.src_dir, file_name)
print(file_path)
name = file_name.split('-')[1]
refs.append(SeriesHandle("latency", "latency_interval_25_" + name, "Interval 25%", "Latency (μs)",
self.load_latency_ts, args=[file_path, 3]))
refs.append(SeriesHandle("latency", "latency_interval_50_" + name, "Interval 50%", "Latency (μs)",
self.load_latency_ts, args=[file_path, 4]))
refs.append(SeriesHandle("latency", "latency_interval_75_" + name, "Interval 75%", "Latency (μs)",
self.load_latency_ts, args=[file_path, 5]))
refs.append(SeriesHandle("latency", "latency_interval_90_" + name, "Interval 90%", "Latency (μs)",
self.load_latency_ts, args=[file_path, 6]))
refs.append(SeriesHandle("latency", "latency_interval_99_" + name, "Interval 99%", "Latency (μs)",
self.load_latency_ts, args=[file_path, 7]))
refs.append(SeriesHandle("latency", "latency_interval_999_" + name, "Interval 99.9%", "Latency (μs)",
self.load_latency_ts, args=[file_path, 8]))
refs.append(SeriesHandle("latency", "latency_interval_9999_" + name, "Interval 99.99%", "Latency (μs)",
self.load_latency_ts, args=[file_path, 9]))
refs.append(SeriesHandle("latency", "latency_interval_99999_" + name, "Interval 99.999%", "Latency (μs)",
self.load_latency_ts, args=[file_path, 10]))
refs.append(SeriesHandle("latency", "latency_interval_min_" + name, "Interval Min", "Latency (μs)",
self.load_latency_ts, args=[file_path, 11]))
refs.append(SeriesHandle("latency", "latency_interval_max_" + name, "Interval Max", "Latency (μs)",
self.load_latency_ts, args=[file_path, 12]))
refs.append(SeriesHandle("latency", "latency_interval_mean_" + name, "Interval Mean", "Latency (μs)",
self.load_latency_ts, args=[file_path, 13]))
refs.append(
SeriesHandle("latency", "latency_interval_std_deviation_" + name, "Interval Standard Deviation", "Latency (μs)",
self.load_latency_ts, args=[file_path, 14]))
hgrm_path = os.path.join(src_dir, file_name + ".hgrm")
refs.append(
SeriesHandle("latency-distribution", "latency_distribution_" + name, "Latency distribution", "Latency (μs)",
self.load_latency_distribution_ts, args=[hgrm_path]))
agents = {}
for worker in self.workers:
agent = worker.agent()
if not agents.get(agent):
agents[agent] = worker
for agent, worker in agents.iteritems():
for ref in worker.ts_references:
if ref.src == "dstat":
refs.append(SeriesHandle("dstat", ref.name + "_" + agent, ref.title, ref.ylabel, self.x, args=[ref],
is_bytes=ref.is_bytes))
def x(self, ref):
return ref.load().items
def aggregated_throughput(self):
list = []
for worker in self.workers:
for ref in worker.ts_references:
if ref.src == "throughput":
list.append(ref.load())
return Series("", "", False, ts_list=list).items
def load_latency_ts(self, path, column):
result = []
with open(path, 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='|')
# we need to skip the first 7 lines
for x in range(0, 3):
next(csvreader)
for row in csvreader:
result.append(KeyValue(row[0], row[column]))
return result
def load_latency_distribution_ts(self, path):
result = []
line_index = 0
with open(path) as f:
for line in f:
line = line.rstrip()
line_index += 1
if line_index < 4 or line.startswith("#"):
continue
row = re.split(" +", line)
if len(row) < 5:
continue
result.append(KeyValue(row[4], row[1]))
print path
return result
class Comparison:
def __init__(self):
benchmark_dirs = []
benchmark_names = {}
last_benchmark = None
print("Loading benchmarks")
# collect all benchmark directories and the names for the benchmarks
for arg in args:
if arg.startswith("[") and arg.endswith("]"):
if not last_benchmark:
print("Benchmark name " + arg + " must be preceded with a benchmark directory.")
exit()
benchmark_names[last_benchmark] = arg[1:len(arg) - 1]
last_benchmark = None
else:
benchmark_dir = arg
if not os.path.exists(benchmark_dir):
print("benchmark directory '" + benchmark_dir + "' does not exist!")
exit(1)
last_benchmark = arg
benchmark_dirs.append(benchmark_dir)
name = os.path.basename(os.path.normpath(benchmark_dir))
benchmark_names[benchmark_dir] = name
# Make the benchmarks
self.benchmarks = []
for benchmark_dir in benchmark_dirs:
self.benchmarks.append(Benchmark(benchmark_dir, benchmark_names[benchmark_dir]))
def output_dir(self, name):
output_dir = os.path.join(report_dir, name)
ensure_dir(output_dir)
return output_dir
def compare(self):
plots = {}
for benchmark in self.benchmarks:
if len(benchmark.ts_references) == 0:
print(" benchmark [" + benchmark.name + "] benchmark.dir [" + benchmark.src_dir + "] has no data")
exit(1)
for ref in benchmark.ts_references:
plot = plots.get(ref.name)
if not plot:
if ref.src == "latency-distribution":
plot = LatencyDistributionGnuplot(self.output_dir("latency"), ref.title)
else:
plot = TimeseriesGnuplot(self.output_dir(ref.src), ref.title)
plots[ref.name] = plot
plot.add(ref.load(), title=benchmark.name)
for benchmark in self.benchmarks:
for worker in benchmark.workers:
for ref in worker.ts_references:
if ref.src == "throughput":
plot = plots.get("throughput_per_worker")
if not plot:
plot = TimeseriesGnuplot(self.output_dir(ref.src),
"Throughput per member",
basefilename="throughput_per_worker")
plots["throughput_per_worker"] = plot
if len(self.benchmarks) > 1:
plot.add(ref.load(), benchmark.name + "_" + worker.name)
else:
plot.add(ref.load(), worker.name)
# make all plots for each individual worker
for benchmark in self.benchmarks:
for worker in benchmark.workers:
for ref in worker.ts_references:
if ref.src == "dstat":
continue # dstat is already plotted
name = ref.name+"_"+worker.name
plot = plots.get(name)
if not plot:
plot = TimeseriesGnuplot(self.output_dir(ref.src), worker.name + " " + ref.title, basefilename=name)
plots[name] = plot
plot.add(ref.load(), benchmark.name)
for plot in plots.values():
plot.plot()
print("Done writing report [" + report_dir + "]")
for benchmark in self.benchmarks:
print(" benchmark [" + benchmark.name + "] benchmark.dir [" + benchmark.src_dir + "]")
comparison = Comparison()
comparison.compare()
| apache-2.0 |
smsolivier/VEF | code/hlimit.py | 1 | 2247 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import ld as LD
import dd as DD
from hidespines import *
import sys
''' compares difference between Sn and moment equations as cell width --> 0 '''
if (len(sys.argv) > 1):
outfile = sys.argv[1]
else:
outfile = None
def getDiff(sol, tol=1e-6):
diff = np.zeros(len(sol))
for i in range(len(sol)):
x, phi, it = sol[i].sourceIteration(tol)
# diff[i] = np.linalg.norm(phi - sol[i].phi_SN, 2)/np.linalg.norm(sol[i].phi_SN, 2)
diff[i] = np.linalg.norm(phi - sol[i].phi_SN, 2)/np.linalg.norm(sol[i].phi_SN, 2)
return diff
N = 100
n = 8
xb = 1
Sigmaa = lambda x: .1
Sigmat = lambda x: 1
q = lambda x, mu: 1
tol = 1e-10
N = np.logspace(1, 3, 5)
N = np.array([int(x) for x in N])
ed00 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=0, GAUSS=0) for x in N]
ed01 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=0, GAUSS=1) for x in N]
ed10 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=1, GAUSS=0) for x in N]
ed11 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=1, GAUSS=1) for x in N]
ed20 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=2, GAUSS=0) for x in N]
ed21 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=2, GAUSS=1) for x in N]
diff00 = getDiff(ed00, tol)
diff01 = getDiff(ed01, tol)
diff10 = getDiff(ed10, tol)
diff11 = getDiff(ed11, tol)
diff20 = getDiff(ed20, tol)
diff21 = getDiff(ed21, tol)
fontsize=16
plt.loglog(xb/N, diff00, '-o', clip_on=False, label='MHFEM Edges, No Gauss')
plt.loglog(xb/N, diff01, '-o', clip_on=False, label='Maintain Slopes, No Gauss')
plt.loglog(xb/N, diff10, '-o', clip_on=False, label='MHFEM Edges, Gauss')
plt.loglog(xb/N, diff11, '-o', clip_on=False, label='Maintain Slopes, Gauss')
plt.loglog(xb/N, diff20, '-o', clip_on=False, label='vanLeer, No Gauss')
plt.loglog(xb/N, diff21, '-o', clip_on=False, label='vanLeer, Gauss')
plt.xlabel(r'$h$', fontsize=fontsize)
plt.ylabel('SN/MHFEM Convergence', fontsize=fontsize)
plt.legend(loc='best', frameon=False)
hidespines(plt.gca())
if (outfile != None):
plt.savefig(outfile, transparent=True)
else:
plt.show()
| mit |
instana/python-sensor | instana/instrumentation/urllib3.py | 1 | 3690 | # (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2017
from __future__ import absolute_import
import opentracing
import opentracing.ext.tags as ext
import wrapt
from ..log import logger
from ..singletons import agent
from ..util.traceutils import get_active_tracer
from ..util.secrets import strip_secrets_from_query
try:
import urllib3
def collect(instance, args, kwargs):
""" Build and return a fully qualified URL for this request """
kvs = dict()
try:
kvs['host'] = instance.host
kvs['port'] = instance.port
if args is not None and len(args) == 2:
kvs['method'] = args[0]
kvs['path'] = args[1]
else:
kvs['method'] = kwargs.get('method')
kvs['path'] = kwargs.get('path')
if kvs['path'] is None:
kvs['path'] = kwargs.get('url')
# Strip any secrets from potential query params
if kvs.get('path') is not None and ('?' in kvs['path']):
parts = kvs['path'].split('?')
kvs['path'] = parts[0]
if len(parts) == 2:
kvs['query'] = strip_secrets_from_query(parts[1], agent.options.secrets_matcher, agent.options.secrets_list)
if type(instance) is urllib3.connectionpool.HTTPSConnectionPool:
kvs['url'] = 'https://%s:%d%s' % (kvs['host'], kvs['port'], kvs['path'])
else:
kvs['url'] = 'http://%s:%d%s' % (kvs['host'], kvs['port'], kvs['path'])
except Exception:
logger.debug("urllib3 collect error", exc_info=True)
return kvs
else:
return kvs
def collect_response(scope, response):
try:
scope.span.set_tag(ext.HTTP_STATUS_CODE, response.status)
if agent.options.extra_http_headers is not None:
for custom_header in agent.options.extra_http_headers:
if custom_header in response.headers:
scope.span.set_tag("http.header.%s" % custom_header, response.headers[custom_header])
if 500 <= response.status <= 599:
scope.span.mark_as_errored()
except Exception:
logger.debug("collect_response", exc_info=True)
@wrapt.patch_function_wrapper('urllib3', 'HTTPConnectionPool.urlopen')
def urlopen_with_instana(wrapped, instance, args, kwargs):
active_tracer = get_active_tracer()
# If we're not tracing, just return; boto3 has it's own visibility
if active_tracer is None or active_tracer.active_span.operation_name == 'boto3':
return wrapped(*args, **kwargs)
with active_tracer.start_active_span("urllib3", child_of=active_tracer.active_span) as scope:
try:
kvs = collect(instance, args, kwargs)
if 'url' in kvs:
scope.span.set_tag(ext.HTTP_URL, kvs['url'])
if 'query' in kvs:
scope.span.set_tag("http.params", kvs['query'])
if 'method' in kvs:
scope.span.set_tag(ext.HTTP_METHOD, kvs['method'])
if 'headers' in kwargs:
active_tracer.inject(scope.span.context, opentracing.Format.HTTP_HEADERS, kwargs['headers'])
response = wrapped(*args, **kwargs)
collect_response(scope, response)
return response
except Exception as e:
scope.span.mark_as_errored({'message': e})
raise
logger.debug("Instrumenting urllib3")
except ImportError:
pass
| mit |
pulsar-chem/Pulsar-Core | test/system/TestBasisSet.py | 1 | 2740 | import pulsar as psr
def run_test():
tester = psr.PyTester("Testing the BasisSet and BasisSetShell")
cGTO = psr.ShellType.CartesianGaussian
sGTO = psr.ShellType.SphericalGaussian
alpha=[3.42525091, 0.62391373, 0.16885540]
c=[0.15432897, 0.53532814, 0.44463454]
FakeD=psr.BasisShellInfo(cGTO,2,3,1,alpha,c)
FakeD2=psr.BasisShellInfo(sGTO,2,3,1,alpha,c)
carts=[0.0,0.0,0.0]
H=psr.create_atom(carts,1)
BI=psr.BasisInfo()
BI.shells=[FakeD,FakeD2]
H.basis_sets={"PRIMARY" :BI }
GhH=psr.make_ghost_atom(H)
Atoms=psr.AtomSetUniverse([H,GhH])
Mol=psr.System(Atoms,True)
BS=Mol.get_basis_set("PRIMARY")
BS2=psr.BasisSet(BS)
tester.test_equal("Copy constructors work",BS,BS2)
BS3=psr.BasisSet(1,3,3,3)
tester.test_return("Inequality works",True,True,BS3.__ne__,BS2)
tester.test_return("Get types works",True,{cGTO,sGTO},BS.get_types)
tester.test_return("Get n shells",True,4,BS2.n_shell)
tester.test_return("Get n unique shells",True,2,BS.n_unique_shell)
tester.test_return("Get n primitives",True,12,BS2.n_primitives)
tester.test_return("Get n coeficients",True,12,BS2.n_coefficients)
tester.test_return("Get number of functions",True,22,BS.n_functions)
tester.test_return("Maximum number of primitivs",True,3,BS2.max_n_primitives)
tester.test_return("Max angular momentum",True,2,BS2.max_am)
tester.test_return("All angular momentum",True,{2},BS.all_am)
tester.test_return("Max n functions in a shell",True,6,BS2.max_n_functions)
tester.test_return("Shell start",True,6,BS2.shell_start,1)
tester.test_call("Invalid shell start",False,BS2.shell_start,99)
Si,Sj=BS.shell(3),BS.shell(2)
tester.test_return("Shell has right coordinates",True,carts,Si.get_coords)
tester.test_return("Shell has right coordinate",True,carts[1],Si.get_coord,1)
tester.test_call("Get invalid shell",False,BS.shell,99)
tester.test_return("Get unique shell",True,Si,BS.unique_shell,1)
tester.test_call("Get invalid unique shell",False,BS.unique_shell,99)
i=0
for Sk in BS2:
tester.test_equal("Iterator "+str(i),Sk,Si if i%2==1 else Sj)
i=i+1
tester.test_return("Get valid shell info",True,FakeD,BS2.shell_info,0)
tester.test_call("Get invalid shell info",False,FakeD,BS.shell_info,99)
BS4=psr.BasisSet(1,3,3,3)
tester.test_return("Add shell that fits",True,None,BS4.add_shell,FakeD,carts)
BS3.add_shell(FakeD,carts)
tester.test_return("Shrink to fit",True,BS3,BS4.shrink_fit)
tester.test_call("Add shell no fit",False,BS4.add_shell,FakeD2,carts)
tester.test_return("Hash BS",True,BS.my_hash(),BS2.my_hash)
tester.print_results()
return tester.nfailed()
| bsd-3-clause |
jimberlage/servo | components/script/dom/bindings/codegen/parser/tests/test_attr_sequence_type.py | 276 | 1626 | def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface AttrSequenceType {
attribute sequence<object> foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Attribute type must not be a sequence type")
parser.reset()
threw = False
try:
parser.parse("""
interface AttrUnionWithSequenceType {
attribute (sequence<object> or DOMString) foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Attribute type must not be a union with a sequence member type")
parser.reset()
threw = False
try:
parser.parse("""
interface AttrNullableUnionWithSequenceType {
attribute (sequence<object>? or DOMString) foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Attribute type must not be a union with a nullable sequence "
"member type")
parser.reset()
threw = False
try:
parser.parse("""
interface AttrUnionWithUnionWithSequenceType {
attribute ((sequence<object> or DOMString) or AttrUnionWithUnionWithSequenceType) foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Attribute type must not be a union type with a union member "
"type that has a sequence member type")
| mpl-2.0 |
WillianPaiva/1flow | oneflow/core/migrations/0097_auto__add_field_twitterfeed_backfill_completed.py | 2 | 54712 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TwitterFeed.backfill_completed'
db.add_column(u'core_twitterfeed', 'backfill_completed',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'TwitterFeed.backfill_completed'
db.delete_column(u'core_twitterfeed', 'backfill_completed')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'base.user': {
'Meta': {'object_name': 'User'},
'address_book': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'avatar_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'email_announcements': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'hash_codes': ('jsonfield.fields.JSONField', [], {'default': "{'unsubscribe': '1e102a7faa5a4d499ad1ac93b04bf0fa'}", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'register_data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'sent_emails': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.article': {
'Meta': {'object_name': 'Article', '_ormbases': ['core.BaseItem']},
u'baseitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseItem']", 'unique': 'True', 'primary_key': 'True'}),
'comments_feed_url': ('django.db.models.fields.URLField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'is_orphaned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publishers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'publications'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '512'}),
'url_absolute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'word_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'core.author': {
'Meta': {'unique_together': "(('origin_name', 'website'),)", 'object_name': 'Author'},
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Author']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_unsure': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '7168', 'null': 'True', 'blank': 'True'}),
'origin_name': ('django.db.models.fields.CharField', [], {'max_length': '7168', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'authors'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.WebSite']", 'null': 'True', 'blank': 'True'})
},
'core.baseaccount': {
'Meta': {'object_name': 'BaseAccount'},
'conn_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_conn': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_usable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.baseaccount_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'accounts'", 'to': u"orm['base.User']"})
},
'core.basefeed': {
'Meta': {'object_name': 'BaseFeed'},
'closed_reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_fetch': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BaseFeed']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'errors': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'fetch_interval': ('django.db.models.fields.IntegerField', [], {'default': '43200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_good': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_internal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.BaseItem']"}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'options': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.basefeed_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'to': u"orm['base.User']"})
},
'core.baseitem': {
'Meta': {'object_name': 'BaseItem'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'authored_items'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Author']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'date_published': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'default_rating': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BaseItem']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'origin': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.baseitem_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'sources': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'sources_rel_+'", 'null': 'True', 'to': "orm['core.BaseItem']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'text_direction': ('django.db.models.fields.CharField', [], {'default': "u'ltr'", 'max_length': '3', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'})
},
'core.combinedfeed': {
'Meta': {'object_name': 'CombinedFeed'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.combinedfeedrule': {
'Meta': {'ordering': "('position',)", 'object_name': 'CombinedFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeedRule']", 'null': 'True', 'blank': 'True'}),
'combinedfeed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.CombinedFeed']"}),
'feeds': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.BaseFeed']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.folder': {
'Meta': {'unique_together': "(('name', 'user', 'parent'),)", 'object_name': 'Folder'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'folders'", 'to': u"orm['base.User']"})
},
'core.helpcontent': {
'Meta': {'ordering': "['ordering', 'id']", 'object_name': 'HelpContent'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_en': ('django.db.models.fields.TextField', [], {}),
'content_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'name_nt': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'core.helpwizards': {
'Meta': {'object_name': 'HelpWizards'},
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'wizards'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'show_all': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'welcome_beta_shown': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.historyentry': {
'Meta': {'object_name': 'HistoryEntry'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.historyentry_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.homepreferences': {
'Meta': {'object_name': 'HomePreferences'},
'experimental_features': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'home'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'read_shows': ('django.db.models.fields.IntegerField', [], {'default': '2', 'blank': 'True'}),
'show_advanced_preferences': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'style': ('django.db.models.fields.CharField', [], {'default': "u'RL'", 'max_length': '2', 'blank': 'True'})
},
'core.language': {
'Meta': {'object_name': 'Language'},
'dj_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '16'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso639_1': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'iso639_2': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'iso639_3': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.Language']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'core.mailaccount': {
'Meta': {'object_name': 'MailAccount', '_ormbases': ['core.BaseAccount']},
u'baseaccount_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseAccount']", 'unique': 'True', 'primary_key': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'password': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'use_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'core.mailfeed': {
'Meta': {'object_name': 'MailFeed', '_ormbases': ['core.BaseFeed']},
'account': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'mail_feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.MailAccount']"}),
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'finish_action': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'match_action': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'rules_operation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'scrape_blacklist': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'scrape_whitelist': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'})
},
'core.mailfeedrule': {
'Meta': {'ordering': "('group', 'position')", 'object_name': 'MailFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeedRule']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'group_operation': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'header_field': ('django.db.models.fields.IntegerField', [], {'default': '4', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mailfeed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rules'", 'to': "orm['core.MailFeed']"}),
'match_case': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'match_type': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'match_value': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'other_header': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.nodepermissions': {
'Meta': {'object_name': 'NodePermissions'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.SyncNode']", 'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'5e39177b7d51400ab75eda55d353bae8'", 'max_length': '32', 'blank': 'True'})
},
'core.originaldata': {
'Meta': {'object_name': 'OriginalData'},
'feedparser': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feedparser_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'google_reader': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'google_reader_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'original_data'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.BaseItem']"}),
'raw_email': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'raw_email_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'twitter': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'twitter_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.preferences': {
'Meta': {'object_name': 'Preferences'},
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['base.User']", 'unique': 'True', 'primary_key': 'True'})
},
'core.read': {
'Meta': {'unique_together': "(('user', 'item'),)", 'object_name': 'Read'},
'bookmark_type': ('django.db.models.fields.CharField', [], {'default': "u'U'", 'max_length': '2'}),
'check_set_subscriptions_131004_done': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'date_analysis': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_archived': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_auto_read': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_bookmarked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'date_fact': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_fun': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_knowhow': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_knowledge': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_number': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_prospective': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_quote': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_read': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_rules': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_starred': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_analysis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_auto_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_bookmarked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_fact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_fun': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_good': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_knowhow': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_knowledge': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_number': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_prospective': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_quote': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_read': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_rules': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_starred': ('django.db.models.fields.NullBooleanField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reads'", 'to': "orm['core.BaseItem']"}),
'knowledge_type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'senders': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'reads_sent'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'all_reads'", 'to': u"orm['base.User']"})
},
'core.readpreferences': {
'Meta': {'object_name': 'ReadPreferences'},
'auto_mark_read_delay': ('django.db.models.fields.IntegerField', [], {'default': '4500', 'blank': 'True'}),
'bookmarked_marks_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bookmarked_marks_unread': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'read'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'read_switches_to_fullscreen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'reading_speed': ('django.db.models.fields.IntegerField', [], {'default': '200', 'blank': 'True'}),
'show_bottom_navbar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'starred_marks_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'starred_marks_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'starred_removes_bookmarked': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'watch_attributes_mark_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.rssatomfeed': {
'Meta': {'object_name': 'RssAtomFeed', '_ormbases': ['core.BaseFeed']},
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'last_etag': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '512'}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'to': "orm['core.WebSite']"})
},
'core.selectorpreferences': {
'Meta': {'object_name': 'SelectorPreferences'},
'extended_folders_depth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'folders_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lists_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'selector'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'show_closed_streams': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subscriptions_in_multiple_folders': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'titles_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.sharepreferences': {
'Meta': {'object_name': 'SharePreferences'},
'default_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'share'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"})
},
'core.simpletag': {
'Meta': {'unique_together': "(('name', 'language'),)", 'object_name': 'SimpleTag'},
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'origin_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'origin_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.SimpleTag']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'core.snappreferences': {
'Meta': {'object_name': 'SnapPreferences'},
'default_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'snap'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'select_paragraph': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.staffpreferences': {
'Meta': {'object_name': 'StaffPreferences'},
'no_home_redirect': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'staff'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'reading_lists_show_bad_articles': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'selector_shows_admin_links': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'super_powers_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'core.subscription': {
'Meta': {'unique_together': "(('feed', 'user'),)", 'object_name': 'Subscription'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptions'", 'blank': 'True', 'to': "orm['core.BaseFeed']"}),
'folders': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Folder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reads': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Read']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'all_subscriptions'", 'blank': 'True', 'to': u"orm['base.User']"})
},
'core.syncnode': {
'Meta': {'object_name': 'SyncNode'},
'broadcast': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_seen': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_local_instance': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'local_token': ('django.db.models.fields.CharField', [], {'default': "'f5031ed8ca344beb96c4544be478c632'", 'max_length': '32', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'remote_token': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'blank': 'True'}),
'strategy': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'sync_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '384', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'core.tweet': {
'Meta': {'object_name': 'Tweet', '_ormbases': ['core.BaseItem']},
u'baseitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseItem']", 'unique': 'True', 'primary_key': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tweet_id': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'unique': 'True', 'blank': 'True'})
},
'core.twitteraccount': {
'Meta': {'object_name': 'TwitterAccount', '_ormbases': ['core.BaseAccount']},
u'baseaccount_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseAccount']", 'unique': 'True', 'primary_key': 'True'}),
'fetch_owned_lists': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'fetch_subscribed_lists': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'social_auth': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'twitter_account'", 'unique': 'True', 'to': u"orm['default.UserSocialAuth']"}),
'timeline': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'twitter_account'", 'unique': 'True', 'null': 'True', 'to': "orm['core.TwitterFeed']"})
},
'core.twitterfeed': {
'Meta': {'object_name': 'TwitterFeed', '_ormbases': ['core.BaseFeed']},
'account': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'twitter_feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.TwitterAccount']"}),
'backfill_completed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'finish_action': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'is_backfilled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_timeline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'match_action': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'rules_operation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'scrape_blacklist': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'scrape_whitelist': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'track_locations': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'track_terms': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'core.twitterfeedrule': {
'Meta': {'ordering': "('group', 'position')", 'object_name': 'TwitterFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.TwitterFeedRule']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'group_operation': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'match_case': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'match_field': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'match_type': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'match_value': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'other_field': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'twitterfeed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rules'", 'to': "orm['core.TwitterFeed']"})
},
'core.usercounters': {
'Meta': {'object_name': 'UserCounters'},
'placeholder': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_counters'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"})
},
'core.userfeeds': {
'Meta': {'object_name': 'UserFeeds'},
'blogs': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.BaseFeed']", 'null': 'True', 'blank': 'True'}),
'imported_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'imported_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'received_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'received_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'sent_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'sent_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_feeds'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"}),
'written_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'written_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"})
},
'core.userimport': {
'Meta': {'object_name': 'UserImport', '_ormbases': ['core.HistoryEntry']},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'historyentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.HistoryEntry']", 'unique': 'True', 'primary_key': 'True'}),
'lines': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'results': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'urls': ('django.db.models.fields.TextField', [], {})
},
'core.usersubscriptions': {
'Meta': {'object_name': 'UserSubscriptions'},
'blogs': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'blogs'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Subscription']"}),
'imported_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'imported_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'received_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'received_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'sent_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'sent_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_subscriptions'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"}),
'written_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'written_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"})
},
'core.website': {
'Meta': {'object_name': 'WebSite'},
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.WebSite']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'fetch_limit_nr': ('django.db.models.fields.IntegerField', [], {'default': '16', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'mail_warned': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.WebSite']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200', 'blank': 'True'})
},
u'default.usersocialauth': {
'Meta': {'unique_together': "(('provider', 'uid'),)", 'object_name': 'UserSocialAuth', 'db_table': "'social_auth_usersocialauth'"},
'extra_data': ('social.apps.django_app.default.fields.JSONField', [], {'default': "'{}'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'social_auth'", 'to': u"orm['base.User']"})
}
}
complete_apps = ['core'] | agpl-3.0 |
geekboxzone/lollipop_external_chromium_org_third_party_WebKit | Tools/Scripts/webkitpy/common/find_files.py | 181 | 3872 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This module is used to find files used by run-webkit-tests and
perftestrunner. It exposes one public function - find() - which takes
an optional list of paths, optional set of skipped directories and optional
filter callback.
If a list is passed in, the returned list of files is constrained to those
found under the paths passed in. i.e. calling find(["LayoutTests/fast"])
will only return files under that directory.
If a set of skipped directories is passed in, the function will filter out
the files lying in these directories i.e. find(["LayoutTests"], set(["fast"]))
will return everything except files in fast subfolder.
If a callback is passed in, it will be called for the each file and the file
will be included into the result if the callback returns True.
The callback has to take three arguments: filesystem, dirname and filename."""
import itertools
def find(filesystem, base_dir, paths=None, skipped_directories=None, file_filter=None, directory_sort_key=None):
"""Finds the set of tests under a given list of sub-paths.
Args:
paths: a list of path expressions relative to base_dir
to search. Glob patterns are ok, as are path expressions with
forward slashes on Windows. If paths is empty, we look at
everything under the base_dir.
"""
paths = paths or ['*']
skipped_directories = skipped_directories or set(['.svn', '_svn'])
return _normalized_find(filesystem, _normalize(filesystem, base_dir, paths), skipped_directories, file_filter, directory_sort_key)
def _normalize(filesystem, base_dir, paths):
return [filesystem.normpath(filesystem.join(base_dir, path)) for path in paths]
def _normalized_find(filesystem, paths, skipped_directories, file_filter, directory_sort_key):
"""Finds the set of tests under the list of paths.
Args:
paths: a list of absolute path expressions to search.
Glob patterns are ok.
"""
paths_to_walk = itertools.chain(*(filesystem.glob(path) for path in paths))
def sort_by_directory_key(files_list):
if directory_sort_key:
files_list.sort(key=directory_sort_key)
return files_list
all_files = itertools.chain(*(sort_by_directory_key(filesystem.files_under(path, skipped_directories, file_filter)) for path in paths_to_walk))
return all_files
| bsd-3-clause |
Yannig/ansible | test/units/module_utils/facts/test_facts.py | 80 | 22585 | # This file is part of Ansible
# -*- coding: utf-8 -*-
#
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import os
import pytest
# for testing
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock, patch
from ansible.module_utils import facts
from ansible.module_utils.facts import hardware
from ansible.module_utils.facts import network
from ansible.module_utils.facts import virtual
class BaseTestFactsPlatform(unittest.TestCase):
platform_id = 'Generic'
fact_class = hardware.base.Hardware
collector_class = None
"""Verify that the automagic in Hardware.__new__ selects the right subclass."""
@patch('platform.system')
def test_new(self, mock_platform):
if not self.fact_class:
pytest.skip('This platform (%s) does not have a fact_class.' % self.platform_id)
mock_platform.return_value = self.platform_id
inst = self.fact_class(module=Mock(), load_on_init=False)
self.assertIsInstance(inst, self.fact_class)
self.assertEqual(inst.platform, self.platform_id)
def test_subclass(self):
if not self.fact_class:
pytest.skip('This platform (%s) does not have a fact_class.' % self.platform_id)
# 'Generic' will try to map to platform.system() that we are not mocking here
if self.platform_id == 'Generic':
return
inst = self.fact_class(module=Mock(), load_on_init=False)
self.assertIsInstance(inst, self.fact_class)
self.assertEqual(inst.platform, self.platform_id)
def test_collector(self):
if not self.collector_class:
pytest.skip('This test class needs to be updated to specify collector_class')
inst = self.collector_class()
self.assertIsInstance(inst, self.collector_class)
self.assertEqual(inst._platform, self.platform_id)
class TestLinuxFactsPlatform(BaseTestFactsPlatform):
platform_id = 'Linux'
fact_class = hardware.linux.LinuxHardware
collector_class = hardware.linux.LinuxHardwareCollector
class TestHurdFactsPlatform(BaseTestFactsPlatform):
platform_id = 'GNU'
fact_class = hardware.hurd.HurdHardware
collector_class = hardware.hurd.HurdHardwareCollector
class TestSunOSHardware(BaseTestFactsPlatform):
platform_id = 'SunOS'
fact_class = hardware.sunos.SunOSHardware
collector_class = hardware.sunos.SunOSHardwareCollector
class TestOpenBSDHardware(BaseTestFactsPlatform):
platform_id = 'OpenBSD'
fact_class = hardware.openbsd.OpenBSDHardware
collector_class = hardware.openbsd.OpenBSDHardwareCollector
class TestFreeBSDHardware(BaseTestFactsPlatform):
platform_id = 'FreeBSD'
fact_class = hardware.freebsd.FreeBSDHardware
collector_class = hardware.freebsd.FreeBSDHardwareCollector
class TestDragonFlyHardware(BaseTestFactsPlatform):
platform_id = 'DragonFly'
fact_class = None
collector_class = hardware.dragonfly.DragonFlyHardwareCollector
class TestNetBSDHardware(BaseTestFactsPlatform):
platform_id = 'NetBSD'
fact_class = hardware.netbsd.NetBSDHardware
collector_class = hardware.netbsd.NetBSDHardwareCollector
class TestAIXHardware(BaseTestFactsPlatform):
platform_id = 'AIX'
fact_class = hardware.aix.AIXHardware
collector_class = hardware.aix.AIXHardwareCollector
class TestHPUXHardware(BaseTestFactsPlatform):
platform_id = 'HP-UX'
fact_class = hardware.hpux.HPUXHardware
collector_class = hardware.hpux.HPUXHardwareCollector
class TestDarwinHardware(BaseTestFactsPlatform):
platform_id = 'Darwin'
fact_class = hardware.darwin.DarwinHardware
collector_class = hardware.darwin.DarwinHardwareCollector
class TestGenericNetwork(BaseTestFactsPlatform):
platform_id = 'Generic'
fact_class = network.base.Network
class TestHurdPfinetNetwork(BaseTestFactsPlatform):
platform_id = 'GNU'
fact_class = network.hurd.HurdPfinetNetwork
collector_class = network.hurd.HurdNetworkCollector
class TestLinuxNetwork(BaseTestFactsPlatform):
platform_id = 'Linux'
fact_class = network.linux.LinuxNetwork
collector_class = network.linux.LinuxNetworkCollector
class TestGenericBsdIfconfigNetwork(BaseTestFactsPlatform):
platform_id = 'Generic_BSD_Ifconfig'
fact_class = network.generic_bsd.GenericBsdIfconfigNetwork
collector_class = None
class TestHPUXNetwork(BaseTestFactsPlatform):
platform_id = 'HP-UX'
fact_class = network.hpux.HPUXNetwork
collector_class = network.hpux.HPUXNetworkCollector
class TestDarwinNetwork(BaseTestFactsPlatform):
platform_id = 'Darwin'
fact_class = network.darwin.DarwinNetwork
collector_class = network.darwin.DarwinNetworkCollector
class TestFreeBSDNetwork(BaseTestFactsPlatform):
platform_id = 'FreeBSD'
fact_class = network.freebsd.FreeBSDNetwork
collector_class = network.freebsd.FreeBSDNetworkCollector
class TestDragonFlyNetwork(BaseTestFactsPlatform):
platform_id = 'DragonFly'
fact_class = network.dragonfly.DragonFlyNetwork
collector_class = network.dragonfly.DragonFlyNetworkCollector
class TestAIXNetwork(BaseTestFactsPlatform):
platform_id = 'AIX'
fact_class = network.aix.AIXNetwork
collector_class = network.aix.AIXNetworkCollector
class TestNetBSDNetwork(BaseTestFactsPlatform):
platform_id = 'NetBSD'
fact_class = network.netbsd.NetBSDNetwork
collector_class = network.netbsd.NetBSDNetworkCollector
class TestOpenBSDNetwork(BaseTestFactsPlatform):
platform_id = 'OpenBSD'
fact_class = network.openbsd.OpenBSDNetwork
collector_class = network.openbsd.OpenBSDNetworkCollector
class TestSunOSNetwork(BaseTestFactsPlatform):
platform_id = 'SunOS'
fact_class = network.sunos.SunOSNetwork
collector_class = network.sunos.SunOSNetworkCollector
class TestLinuxVirtual(BaseTestFactsPlatform):
platform_id = 'Linux'
fact_class = virtual.linux.LinuxVirtual
collector_class = virtual.linux.LinuxVirtualCollector
class TestFreeBSDVirtual(BaseTestFactsPlatform):
platform_id = 'FreeBSD'
fact_class = virtual.freebsd.FreeBSDVirtual
collector_class = virtual.freebsd.FreeBSDVirtualCollector
class TestNetBSDVirtual(BaseTestFactsPlatform):
platform_id = 'NetBSD'
fact_class = virtual.netbsd.NetBSDVirtual
collector_class = virtual.netbsd.NetBSDVirtualCollector
class TestOpenBSDVirtual(BaseTestFactsPlatform):
platform_id = 'OpenBSD'
fact_class = virtual.openbsd.OpenBSDVirtual
collector_class = virtual.openbsd.OpenBSDVirtualCollector
class TestHPUXVirtual(BaseTestFactsPlatform):
platform_id = 'HP-UX'
fact_class = virtual.hpux.HPUXVirtual
collector_class = virtual.hpux.HPUXVirtualCollector
class TestSunOSVirtual(BaseTestFactsPlatform):
platform_id = 'SunOS'
fact_class = virtual.sunos.SunOSVirtual
collector_class = virtual.sunos.SunOSVirtualCollector
LSBLK_OUTPUT = b"""
/dev/sda
/dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
/dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK
/dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d
/dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce
/dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d
/dev/sr0
/dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390
/dev/loop1 7c1b0f30-cf34-459f-9a70-2612f82b870a
/dev/loop9 0f031512-ab15-497d-9abd-3a512b4a9390
/dev/loop9 7c1b4444-cf34-459f-9a70-2612f82b870a
/dev/mapper/docker-253:1-1050967-pool
/dev/loop2
/dev/mapper/docker-253:1-1050967-pool
"""
LSBLK_OUTPUT_2 = b"""
/dev/sda
/dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
/dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK
/dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d
/dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce
/dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d
/dev/mapper/an-example-mapper with a space in the name 84639acb-013f-4d2f-9392-526a572b4373
/dev/sr0
/dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390
"""
LSBLK_UUIDS = {'/dev/sda1': '66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK'}
MTAB = """
sysfs /sys sysfs rw,seclabel,nosuid,nodev,noexec,relatime 0 0
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
devtmpfs /dev devtmpfs rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755 0 0
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
tmpfs /dev/shm tmpfs rw,seclabel,nosuid,nodev 0 0
devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
tmpfs /run tmpfs rw,seclabel,nosuid,nodev,mode=755 0 0
tmpfs /sys/fs/cgroup tmpfs ro,seclabel,nosuid,nodev,noexec,mode=755 0 0
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0
pstore /sys/fs/pstore pstore rw,seclabel,nosuid,nodev,noexec,relatime 0 0
cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0
cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0
cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0
cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0
configfs /sys/kernel/config configfs rw,relatime 0 0
/dev/mapper/fedora_dhcp129--186-root / ext4 rw,seclabel,relatime,data=ordered 0 0
selinuxfs /sys/fs/selinux selinuxfs rw,relatime 0 0
systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct 0 0
debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
tmpfs /tmp tmpfs rw,seclabel 0 0
mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
/dev/loop0 /var/lib/machines btrfs rw,seclabel,relatime,space_cache,subvolid=5,subvol=/ 0 0
/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0
/dev/mapper/fedora_dhcp129--186-home /home ext4 rw,seclabel,relatime,data=ordered 0 0
tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000 0 0
gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
fusectl /sys/fs/fuse/connections fusectl rw,relatime 0 0
grimlock.g.a: /home/adrian/sshfs-grimlock fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
grimlock.g.a:test_path/path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
grimlock.g.a:path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote-2 fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
grimlock.g.a:/mnt/data/foto's /home/adrian/fotos fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
"""
MTAB_ENTRIES = [
[
'sysfs',
'/sys',
'sysfs',
'rw,seclabel,nosuid,nodev,noexec,relatime',
'0',
'0'
],
['proc', '/proc', 'proc', 'rw,nosuid,nodev,noexec,relatime', '0', '0'],
[
'devtmpfs',
'/dev',
'devtmpfs',
'rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755',
'0',
'0'
],
[
'securityfs',
'/sys/kernel/security',
'securityfs',
'rw,nosuid,nodev,noexec,relatime',
'0',
'0'
],
['tmpfs', '/dev/shm', 'tmpfs', 'rw,seclabel,nosuid,nodev', '0', '0'],
[
'devpts',
'/dev/pts',
'devpts',
'rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000',
'0',
'0'
],
['tmpfs', '/run', 'tmpfs', 'rw,seclabel,nosuid,nodev,mode=755', '0', '0'],
[
'tmpfs',
'/sys/fs/cgroup',
'tmpfs',
'ro,seclabel,nosuid,nodev,noexec,mode=755',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/systemd',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd',
'0',
'0'
],
[
'pstore',
'/sys/fs/pstore',
'pstore',
'rw,seclabel,nosuid,nodev,noexec,relatime',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/devices',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,devices',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/freezer',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,freezer',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/memory',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,memory',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/pids',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,pids',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/blkio',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,blkio',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/cpuset',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,cpuset',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/cpu,cpuacct',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,cpu,cpuacct',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/hugetlb',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,hugetlb',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/perf_event',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,perf_event',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/net_cls,net_prio',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,net_cls,net_prio',
'0',
'0'
],
['configfs', '/sys/kernel/config', 'configfs', 'rw,relatime', '0', '0'],
[
'/dev/mapper/fedora_dhcp129--186-root',
'/',
'ext4',
'rw,seclabel,relatime,data=ordered',
'0',
'0'
],
['selinuxfs', '/sys/fs/selinux', 'selinuxfs', 'rw,relatime', '0', '0'],
[
'systemd-1',
'/proc/sys/fs/binfmt_misc',
'autofs',
'rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct',
'0',
'0'
],
['debugfs', '/sys/kernel/debug', 'debugfs', 'rw,seclabel,relatime', '0', '0'],
[
'hugetlbfs',
'/dev/hugepages',
'hugetlbfs',
'rw,seclabel,relatime',
'0',
'0'
],
['tmpfs', '/tmp', 'tmpfs', 'rw,seclabel', '0', '0'],
['mqueue', '/dev/mqueue', 'mqueue', 'rw,seclabel,relatime', '0', '0'],
[
'/dev/loop0',
'/var/lib/machines',
'btrfs',
'rw,seclabel,relatime,space_cache,subvolid=5,subvol=/',
'0',
'0'
],
['/dev/sda1', '/boot', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'],
# A 'none' fstype
['/dev/sdz3', '/not/a/real/device', 'none', 'rw,seclabel,relatime,data=ordered', '0', '0'],
# lets assume this is a bindmount
['/dev/sdz4', '/not/a/real/bind_mount', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'],
[
'/dev/mapper/fedora_dhcp129--186-home',
'/home',
'ext4',
'rw,seclabel,relatime,data=ordered',
'0',
'0'
],
[
'tmpfs',
'/run/user/1000',
'tmpfs',
'rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000',
'0',
'0'
],
[
'gvfsd-fuse',
'/run/user/1000/gvfs',
'fuse.gvfsd-fuse',
'rw,nosuid,nodev,relatime,user_id=1000,group_id=1000',
'0',
'0'
],
['fusectl', '/sys/fs/fuse/connections', 'fusectl', 'rw,relatime', '0', '0']]
BIND_MOUNTS = ['/not/a/real/bind_mount']
with open(os.path.join(os.path.dirname(__file__), 'fixtures/findmount_output.txt')) as f:
FINDMNT_OUTPUT = f.read()
class TestFactsLinuxHardwareGetMountFacts(unittest.TestCase):
# FIXME: mock.patch instead
def setUp(self):
# The @timeout tracebacks if there isn't a GATHER_TIMEOUT is None (the default until get_all_facts sets it via global)
facts.GATHER_TIMEOUT = 10
def tearDown(self):
facts.GATHER_TIMEOUT = None
# The Hardware subclasses freakout if instaniated directly, so
# mock platform.system and inst Hardware() so we get a LinuxHardware()
# we can test.
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._mtab_entries', return_value=MTAB_ENTRIES)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._find_bind_mounts', return_value=BIND_MOUNTS)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._lsblk_uuid', return_value=LSBLK_UUIDS)
def test_get_mount_facts(self,
mock_lsblk_uuid,
mock_find_bind_mounts,
mock_mtab_entries):
module = Mock()
# Returns a LinuxHardware-ish
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
# Nothing returned, just self.facts modified as a side effect
mount_facts = lh.get_mount_facts()
self.assertIsInstance(mount_facts, dict)
self.assertIn('mounts', mount_facts)
self.assertIsInstance(mount_facts['mounts'], list)
self.assertIsInstance(mount_facts['mounts'][0], dict)
@patch('ansible.module_utils.facts.hardware.linux.get_file_content', return_value=MTAB)
def test_get_mtab_entries(self, mock_get_file_content):
module = Mock()
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
mtab_entries = lh._mtab_entries()
self.assertIsInstance(mtab_entries, list)
self.assertIsInstance(mtab_entries[0], list)
self.assertEqual(len(mtab_entries), 38)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_findmnt', return_value=(0, FINDMNT_OUTPUT, ''))
def test_find_bind_mounts(self, mock_run_findmnt):
module = Mock()
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
bind_mounts = lh._find_bind_mounts()
# If bind_mounts becomes another seq type, feel free to change
self.assertIsInstance(bind_mounts, set)
self.assertEqual(len(bind_mounts), 1)
self.assertIn('/not/a/real/bind_mount', bind_mounts)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_findmnt', return_value=(37, '', ''))
def test_find_bind_mounts_non_zero(self, mock_run_findmnt):
module = Mock()
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
bind_mounts = lh._find_bind_mounts()
self.assertIsInstance(bind_mounts, set)
self.assertEqual(len(bind_mounts), 0)
def test_find_bind_mounts_no_findmnts(self):
module = Mock()
module.get_bin_path = Mock(return_value=None)
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
bind_mounts = lh._find_bind_mounts()
self.assertIsInstance(bind_mounts, set)
self.assertEqual(len(bind_mounts), 0)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT, ''))
def test_lsblk_uuid(self, mock_run_lsblk):
module = Mock()
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertIn(b'/dev/loop9', lsblk_uuids)
self.assertIn(b'/dev/sda1', lsblk_uuids)
self.assertEqual(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0')
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(37, LSBLK_OUTPUT, ''))
def test_lsblk_uuid_non_zero(self, mock_run_lsblk):
module = Mock()
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertEqual(len(lsblk_uuids), 0)
def test_lsblk_uuid_no_lsblk(self):
module = Mock()
module.get_bin_path = Mock(return_value=None)
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertEqual(len(lsblk_uuids), 0)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT_2, ''))
def test_lsblk_uuid_dev_with_space_in_name(self, mock_run_lsblk):
module = Mock()
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertIn(b'/dev/loop0', lsblk_uuids)
self.assertIn(b'/dev/sda1', lsblk_uuids)
self.assertEqual(lsblk_uuids[b'/dev/mapper/an-example-mapper with a space in the name'], b'84639acb-013f-4d2f-9392-526a572b4373')
self.assertEqual(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0')
| gpl-3.0 |
undp-aprc/undp-alm-old | modules/contrib/proj4js/lib/proj4js/build/build.py | 129 | 3416 | #!/usr/bin/env python
import sys
sys.path.append("../tools")
import mergejs
import optparse
def build(config_file = None, output_file = None, options = None):
have_compressor = []
try:
import jsmin
have_compressor.append("jsmin")
except ImportError:
print "No jsmin"
try:
import closure
have_compressor.append("closure")
except Exception, E:
print "No closure (%s)" % E
try:
import closure_ws
have_compressor.append("closure_ws")
except ImportError:
print "No closure_ws"
try:
import minimize
have_compressor.append("minimize")
except ImportError:
print "No minimize"
use_compressor = None
if options.compressor and options.compressor in have_compressor:
use_compressor = options.compressor
sourceDirectory = "../lib"
configFilename = "library.cfg"
filename = "proj4js-compressed.js"
outputFilename = "../lib/" + filename
if config_file:
configFilename = config_file
extension = configFilename[-4:]
if extension != ".cfg":
configFilename = config_file + ".cfg"
if output_file:
outputFilename = output_file
print "Merging libraries."
merged = mergejs.run(sourceDirectory, None, configFilename)
print "Setting the filename to "+filename
merged = merged.replace('scriptName: "proj4js.js",','scriptName: "'+filename+'",');
print "Compressing using %s" % use_compressor
if use_compressor == "jsmin":
minimized = jsmin.jsmin(merged)
elif use_compressor == "minimize":
minimized = minimize.minimize(merged)
elif use_compressor == "closure_ws":
if len(merged) > 1000000: # The maximum file size for this web service is 1000 KB.
print "\nPre-compressing using jsmin"
merged = jsmin.jsmin(merged)
print "\nIs being compressed using Closure Compiler Service."
try:
minimized = closure_ws.minimize(merged)
except Exception, E:
print "\nAbnormal termination."
sys.exit("ERROR: Closure Compilation using Web service failed!\n%s" % E)
if len(minimized) <= 2:
print "\nAbnormal termination due to compilation errors."
sys.exit("ERROR: Closure Compilation using Web service failed!")
else:
print '\nClosure Compilation using Web service has completed successfully.'
elif use_compressor == "closure":
minimized = closure.minimize(merged)
else: # fallback
minimized = merged
print "Adding license file."
minimized = file("license.txt").read() + minimized
print "Writing to %s." % outputFilename
file(outputFilename, "w").write(minimized)
print "Done."
if __name__ == '__main__':
opt = optparse.OptionParser(usage="%s [options] [config_file] [output_file]\n Default config_file is 'full.cfg', Default output_file is 'OpenLayers.js'")
opt.add_option("-c", "--compressor", dest="compressor", help="compression method: one of 'jsmin', 'minimize', 'closure_ws', 'closure', or 'none'", default="jsmin")
(options, args) = opt.parse_args()
if not len(args):
build(options=options)
elif len(args) == 1:
build(args[0], options=options)
elif len(args) == 2:
build(args[0], args[1], options=options)
else:
print "Wrong number of arguments"
| gpl-2.0 |
jhaux/tensorflow | tensorflow/contrib/signal/python/kernel_tests/shape_ops_test.py | 23 | 2282 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for shape_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.signal.python.ops import shape_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class FramesTest(test.TestCase):
def test_mapping_of_indices_without_padding(self):
with self.test_session():
tensor = constant_op.constant(np.arange(9152), dtypes.int32)
tensor = array_ops.expand_dims(tensor, 0)
result = shape_ops.frames(tensor, 512, 180)
result = result.eval()
expected = np.tile(np.arange(512), (49, 1))
expected += np.tile(np.arange(49) * 180, (512, 1)).T
expected = np.expand_dims(expected, axis=0)
expected = np.array(expected, dtype=np.int32)
self.assertAllEqual(expected, result)
def test_mapping_of_indices_with_padding(self):
with self.test_session():
tensor = constant_op.constant(np.arange(10000), dtypes.int32)
tensor = array_ops.expand_dims(tensor, 0)
result = shape_ops.frames(tensor, 512, 192)
result = result.eval()
expected = np.tile(np.arange(512), (51, 1))
expected += np.tile(np.arange(51) * 192, (512, 1)).T
expected[expected >= 10000] = 0
expected = np.expand_dims(expected, axis=0)
expected = np.array(expected, dtype=np.int32)
self.assertAllEqual(expected, result)
if __name__ == "__main__":
test.main()
| apache-2.0 |
tongwang01/tensorflow | tensorflow/contrib/slim/python/slim/data/parallel_reader.py | 12 | 10476 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements a parallel data reader with queues and optional shuffling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import summary
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import gfile
from tensorflow.python.training import input as tf_input
from tensorflow.python.training import queue_runner
class ParallelReader(io_ops.ReaderBase):
"""Reader class that uses multiple readers in parallel to improve speed.
See ReaderBase for supported methods.
"""
def __init__(self,
reader_class,
common_queue,
num_readers=4,
reader_kwargs=None):
"""ParallelReader creates num_readers instances of the reader_class.
Each instance is created by calling the `reader_class` function passing
the arguments specified in `reader_kwargs` as in:
reader_class(**read_kwargs)
When you read from a ParallelReader, with its `read()` method,
you just dequeue examples from the `common_queue`.
The readers will read different files in parallel, asynchronously enqueueing
their output into `common_queue`. The `common_queue.dtypes` must be
[tf.string, tf.string]
Because each reader can read from a different file, the examples in the
`common_queue` could be from different files. Due to the asynchronous
reading there is no guarantee that all the readers will read the same
number of examples.
If the `common_queue` is a shuffling queue, then the examples are shuffled.
Usage:
common_queue = tf.RandomShuffleQueue(
capacity=256,
min_after_dequeue=128,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(tf.TFRecordReader, common_queue)
common_queue = tf.FIFOQueue(
capacity=256,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(readers, common_queue, num_readers=2)
Args:
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader
common_queue: a Queue to hold (key, value pairs) with `dtypes` equal to
[tf.string, tf.string]. Must be one of the data_flow_ops.Queues
instances, ex. `tf.FIFOQueue()`, `tf.RandomShuffleQueue()`, ...
num_readers: a integer, number of instances of reader_class to create.
reader_kwargs: an optional dict of kwargs to create the readers.
Raises:
TypeError: if `common_queue.dtypes` is not [tf.string, tf.string].
"""
if len(common_queue.dtypes) != 2:
raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
for dtype in common_queue.dtypes:
if not dtype.is_compatible_with(tf_dtypes.string):
raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
reader_kwargs = reader_kwargs or {}
self._readers = [reader_class(**reader_kwargs) for _ in range(num_readers)]
self._common_queue = common_queue
@property
def num_readers(self):
return len(self._readers)
@property
def common_queue(self):
return self._common_queue
def read(self, queue, name=None):
"""Returns the next record (key, value pair) produced by the reader.
The multiple reader instances are all configured to `read()` from the
filenames listed in `queue` and enqueue their output into the `common_queue`
passed to the constructor, and this method returns the next record dequeued
from that `common_queue`.
Readers dequeue a work unit from `queue` if necessary (e.g. when a
reader needs to start reading from a new file since it has finished with
the previous file).
A queue runner for enqueing in the `common_queue` is automatically added to
the TF QueueRunners collection.
Args:
queue: A Queue or a mutable string Tensor representing a handle
to a Queue, with string work items.
name: A name for the operation (optional).
Returns:
The next record (i.e. (key, value pair)) from the common_queue.
"""
enqueue_ops = []
for reader in self._readers:
enqueue_ops.append(self._common_queue.enqueue(reader.read(queue)))
queue_runner.add_queue_runner(queue_runner.QueueRunner(
self._common_queue, enqueue_ops))
return self._common_queue.dequeue(name=name)
def num_records_produced(self, name=None):
"""Returns the number of records this reader has produced.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
num_records = [r.num_records_produced() for r in self._readers]
return math_ops.add_n(num_records, name=name)
def num_work_units_completed(self, name=None):
"""Returns the number of work units this reader has finished processing.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
num_work_units = [r.num_work_units_completed() for r in self._readers]
return math_ops.add_n(num_work_units, name=name)
def parallel_read(data_sources,
reader_class,
num_epochs=None,
num_readers=4,
reader_kwargs=None,
shuffle=True,
dtypes=None,
capacity=256,
min_after_dequeue=128,
seed=None):
"""Reads multiple records in parallel from data_sources using n readers.
It uses a ParallelReader to read from multiple files in parallel using
multiple readers created using `reader_class` with `reader_kwargs'.
If shuffle is True the common_queue would be a RandomShuffleQueue otherwise
it would be a FIFOQueue.
Usage:
data_sources = ['path_to/train*']
key, value = parallel_read(data_sources, tf.CSVReader, num_readers=4)
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
/path/to/train@128, /path/to/train* or /tmp/.../train*
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader
num_epochs: The number of times each data source is read. If left as None,
the data will be cycled through indefinitely.
num_readers: a integer, number of Readers to create.
reader_kwargs: an optional dict, of kwargs for the reader.
shuffle: boolean, wether should shuffle the files and the records by using
RandomShuffleQueue as common_queue.
dtypes: A list of types. The length of dtypes must equal the number
of elements in each record. If it is None it will default to
[tf.string, tf.string] for (key, value).
capacity: integer, capacity of the common_queue.
min_after_dequeue: integer, minimum number of records in the common_queue
after dequeue. Needed for a good shuffle.
seed: A seed for RandomShuffleQueue.
Returns:
key, value: a tuple of keys and values from the data_source.
"""
data_files = get_data_files(data_sources)
with ops.name_scope('parallel_read'):
filename_queue = tf_input.string_input_producer(
data_files, num_epochs=num_epochs, shuffle=shuffle)
dtypes = dtypes or [tf_dtypes.string, tf_dtypes.string]
if shuffle:
common_queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity,
min_after_dequeue=min_after_dequeue,
dtypes=dtypes,
seed=seed)
else:
common_queue = data_flow_ops.FIFOQueue(capacity=capacity, dtypes=dtypes)
summary.scalar('queue/%s/fraction_of_%d_full' %
(common_queue.name, capacity),
math_ops.to_float(common_queue.size()) * (1. / capacity))
return ParallelReader(reader_class,
common_queue,
num_readers=num_readers,
reader_kwargs=reader_kwargs).read(filename_queue)
def single_pass_read(data_sources,
reader_class,
reader_kwargs=None):
"""Reads sequentially the data_sources using the reader, doing a single pass.
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
/path/to/train@128, /path/to/train* or /tmp/.../train*
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader.
reader_kwargs: an optional dict, of kwargs for the reader.
Returns:
key, value: a tuple of keys and values from the data_source.
"""
data_files = get_data_files(data_sources)
with ops.name_scope('single_pass_read'):
filename_queue = tf_input.string_input_producer(data_files,
num_epochs=1,
shuffle=False,
capacity=1)
reader_kwargs = reader_kwargs or {}
return reader_class(**reader_kwargs).read(filename_queue)
def get_data_files(data_sources):
"""Get data_files from data_sources.
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
/path/to/train@128, /path/to/train* or /tmp/.../train*
Returns:
a list of data_files.
Raises:
ValueError: if not data files are not found
"""
if isinstance(data_sources, (list, tuple)):
data_files = []
for source in data_sources:
data_files += get_data_files(source)
else:
if '*' in data_sources or '?' in data_sources or '[' in data_sources:
data_files = gfile.Glob(data_sources)
else:
data_files = [data_sources]
if not data_files:
raise ValueError('No data files found in %s', data_sources)
return data_files
| apache-2.0 |
bintoro/schematics | tests/test_dict_type.py | 12 | 3771 | from schematics.models import Model
from schematics.types import IntType, StringType
from schematics.types.serializable import serializable
from schematics.types.compound import ModelType, DictType
try:
long
except NameError:
long = int
def test_basic_type():
class PlayerInfo(Model):
categories = DictType(StringType)
info = PlayerInfo(dict(categories={
"math": "math",
"batman": "batman",
}))
assert info.categories["math"] == "math"
d = info.serialize()
assert d == {
"categories": {
"math": "math",
"batman": "batman",
}
}
def test_dict_type_with_model_type():
class CategoryStats(Model):
category_slug = StringType()
total_wins = IntType()
class PlayerInfo(Model):
categories = DictType(ModelType(CategoryStats))
# TODO: Maybe it would be cleaner to have
# DictType(CategoryStats) and implicitly convert to ModelType(CategoryStats)
info = PlayerInfo(dict(categories={
"math": {
"category_slug": "math",
"total_wins": 1
},
"batman": {
"category_slug": "batman",
"total_wins": 3
}
}))
math_stats = CategoryStats({"category_slug": "math", "total_wins": 1})
assert info.categories["math"] == math_stats
d = info.serialize()
assert d == {
"categories": {
"math": {
"category_slug": "math",
"total_wins": 1
},
"batman": {
"category_slug": "batman",
"total_wins": 3
}
}
}
def test_dict_type_with_model_type_init_with_instance():
class ExperienceLevel(Model):
level = IntType()
class CategoryStats(Model):
category_slug = StringType()
total_wins = IntType()
@serializable(type=ModelType(ExperienceLevel))
def xp_level(self):
return ExperienceLevel(dict(level=self.total_wins))
class PlayerInfo(Model):
id = IntType()
categories = DictType(ModelType(CategoryStats))
# TODO: Maybe it would be cleaner to have
# DictType(CategoryStats) and implicitly convert to ModelType(CategoryStats)
math_stats = CategoryStats({
"category_slug": "math",
"total_wins": 1
})
info = PlayerInfo(dict(id=1, categories={
"math": math_stats,
}))
assert info.categories["math"] == math_stats
d = info.serialize()
assert d == {
"id": 1,
"categories": {
"math": {
"category_slug": "math",
"total_wins": 1,
"xp_level": {
"level": 1
}
},
}
}
def test_with_empty():
class CategoryStatsInfo(Model):
slug = StringType()
class PlayerInfo(Model):
categories = DictType(
ModelType(CategoryStatsInfo),
default=lambda: {},
serialize_when_none=True,
)
info = PlayerInfo()
assert info.categories == {}
d = info.serialize()
assert d == {
"categories": {},
}
def test_key_type():
def player_id(value):
return long(value)
class CategoryStatsInfo(Model):
slug = StringType()
class PlayerInfo(Model):
categories = DictType(ModelType(CategoryStatsInfo), coerce_key=player_id)
stats = CategoryStatsInfo({
"slug": "math",
})
info = PlayerInfo({
"categories": {
1: {"slug": "math"}
},
})
assert info.categories == {1: stats}
d = info.serialize()
assert d == {
"categories": {1: {"slug": "math"}}
}
| bsd-3-clause |
jakevdp/pelican-plugins | sub_parts/sub_parts.py | 59 | 2671 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from pelican import signals
import logging
logger = logging.getLogger(__name__)
def patch_subparts(generator):
generator.subparts = []
slugs = {}
for article in generator.articles:
slugs[article.slug] = article
if '--' in article.slug:
generator.subparts.append(article)
for article in generator.subparts:
logger.info('sub_part: Detected %s', article.slug)
(pslug, _) = article.slug.rsplit('--', 1)
if pslug in slugs:
parent = slugs[pslug]
if not hasattr(parent, 'subparts'):
parent.subparts = []
parent.subparts.append(article)
article.subpart_of = parent
article.subtitle = article.title
article.title = article.title + ", " + parent.title
generator.dates.remove(article)
generator.articles.remove(article)
if article.category:
for cat, arts in generator.categories:
if cat.name == article.category.name:
arts.remove(article)
break
else:
logger.error(
'sub_part: Cannot remove sub-part from category %s',
article.category)
if (hasattr(article, 'subphotos') or
hasattr(article, 'photo_gallery')):
parent.subphotos = (
getattr(parent, 'subphotos',
len(getattr(parent, 'photo_gallery', []))) +
getattr(article, 'subphotos', 0) +
len(getattr(article, 'photo_gallery', [])))
else:
logger.error('sub_part: No parent for %s', pslug)
generator._update_context(('articles', 'dates', 'subparts'))
def write_subparts(generator, writer):
for article in generator.subparts:
signals.article_generator_write_article.send(generator,
content=article)
writer.write_file(
article.save_as, generator.get_template(article.template),
generator.context, article=article, category=article.category,
override_output=hasattr(article, 'override_save_as'),
relative_urls=generator.settings['RELATIVE_URLS'])
if len(generator.subparts) > 0:
print('sub_part: processed {} sub-parts.'.format(
len(generator.subparts)))
def register():
signals.article_generator_finalized.connect(patch_subparts)
signals.article_writer_finalized.connect(write_subparts)
| agpl-3.0 |
otherness-space/myProject002 | my_project_002/lib/python2.7/site-packages/django/utils/translation/__init__.py | 110 | 4690 | """
Internationalization support.
"""
from __future__ import unicode_literals
from django.utils.encoding import force_text
from django.utils.functional import lazy
from django.utils import six
__all__ = [
'activate', 'deactivate', 'override', 'deactivate_all',
'get_language', 'get_language_from_request',
'get_language_info', 'get_language_bidi',
'check_for_language', 'to_locale', 'templatize', 'string_concat',
'gettext', 'gettext_lazy', 'gettext_noop',
'ugettext', 'ugettext_lazy', 'ugettext_noop',
'ngettext', 'ngettext_lazy',
'ungettext', 'ungettext_lazy',
'pgettext', 'pgettext_lazy',
'npgettext', 'npgettext_lazy',
]
# Here be dragons, so a short explanation of the logic won't hurt:
# We are trying to solve two problems: (1) access settings, in particular
# settings.USE_I18N, as late as possible, so that modules can be imported
# without having to first configure Django, and (2) if some other code creates
# a reference to one of these functions, don't break that reference when we
# replace the functions with their real counterparts (once we do access the
# settings).
class Trans(object):
"""
The purpose of this class is to store the actual translation function upon
receiving the first call to that function. After this is done, changes to
USE_I18N will have no effect to which function is served upon request. If
your tests rely on changing USE_I18N, you can delete all the functions
from _trans.__dict__.
Note that storing the function with setattr will have a noticeable
performance effect, as access to the function goes the normal path,
instead of using __getattr__.
"""
def __getattr__(self, real_name):
from django.conf import settings
if settings.USE_I18N:
from django.utils.translation import trans_real as trans
else:
from django.utils.translation import trans_null as trans
setattr(self, real_name, getattr(trans, real_name))
return getattr(trans, real_name)
_trans = Trans()
# The Trans class is no more needed, so remove it from the namespace.
del Trans
def gettext_noop(message):
return _trans.gettext_noop(message)
ugettext_noop = gettext_noop
def gettext(message):
return _trans.gettext(message)
def ngettext(singular, plural, number):
return _trans.ngettext(singular, plural, number)
def ugettext(message):
return _trans.ugettext(message)
def ungettext(singular, plural, number):
return _trans.ungettext(singular, plural, number)
def pgettext(context, message):
return _trans.pgettext(context, message)
def npgettext(context, singular, plural, number):
return _trans.npgettext(context, singular, plural, number)
gettext_lazy = lazy(gettext, str)
ngettext_lazy = lazy(ngettext, str)
ugettext_lazy = lazy(ugettext, six.text_type)
ungettext_lazy = lazy(ungettext, six.text_type)
pgettext_lazy = lazy(pgettext, six.text_type)
npgettext_lazy = lazy(npgettext, six.text_type)
def activate(language):
return _trans.activate(language)
def deactivate():
return _trans.deactivate()
class override(object):
def __init__(self, language, deactivate=False):
self.language = language
self.deactivate = deactivate
self.old_language = get_language()
def __enter__(self):
if self.language is not None:
activate(self.language)
else:
deactivate_all()
def __exit__(self, exc_type, exc_value, traceback):
if self.deactivate:
deactivate()
else:
activate(self.old_language)
def get_language():
return _trans.get_language()
def get_language_bidi():
return _trans.get_language_bidi()
def check_for_language(lang_code):
return _trans.check_for_language(lang_code)
def to_locale(language):
return _trans.to_locale(language)
def get_language_from_request(request, check_path=False):
return _trans.get_language_from_request(request, check_path)
def get_language_from_path(path):
return _trans.get_language_from_path(path)
def templatize(src, origin=None):
return _trans.templatize(src, origin)
def deactivate_all():
return _trans.deactivate_all()
def _string_concat(*strings):
"""
Lazy variant of string concatenation, needed for translations that are
constructed from multiple parts.
"""
return ''.join([force_text(s) for s in strings])
string_concat = lazy(_string_concat, six.text_type)
def get_language_info(lang_code):
from django.conf.locale import LANG_INFO
try:
return LANG_INFO[lang_code]
except KeyError:
raise KeyError("Unknown language code %r." % lang_code)
| mit |
biotrump/xbmc | tools/Fake Episode Maker/main.py | 169 | 2669 | # -*- coding: utf-8 -*-
# Copyright (C) 2008-2013 Team XBMC
# http://xbmc.org
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, see
# <http://www.gnu.org/licenses/>.
#
import urllib
import os
import openAnything
from xml.dom import minidom
def parseShow(seriesID, show_name):
safe_show_name = show_name.replace(":", "")
details_url = "http://thetvdb.com/api/EB49E8B9E78EBEE1/series/"+seriesID+"/all/en.xml"
details = openAnything.fetch(details_url)
details_xml = minidom.parseString(details['data'])
seasons = details_xml.getElementsByTagName("SeasonNumber")
episodes = details_xml.getElementsByTagName("EpisodeNumber")
# check to see if parent show path needs to be made
if not os.access(safe_show_name, os.F_OK):
os.makedirs(safe_show_name)
i = 0
for item in episodes:
season = seasons[i].firstChild.data
episode = item.firstChild.data
filename = safe_show_name+" S"+season+"E"+episode+".avi"
# seeif season path exists or not, and make it if not
if os.access(safe_show_name + "\\Season " + season, os.F_OK):
# just go ahead and create the file
file = open(safe_show_name + "\\Season " + season + "\\" + filename, "w")
file.close()
else:
os.makedirs(safe_show_name + "\\Season " + season)
file = open(safe_show_name + "\\Season " + season + "\\" + filename, "w")
file.close()
print "Creating %s" % filename
i = i + 1
show_file = open("shows.txt")
shows = show_file.read().split("\n")
show_file.close()
for item in shows:
show_url = "http://thetvdb.com/api/GetSeries.php?"+urllib.urlencode({"seriesname":item})
print "Building "+item+"..."
show_xml = openAnything.fetch(show_url)
xmldoc = minidom.parseString(show_xml['data'])
node = xmldoc.getElementsByTagName("seriesid")
if ("node" in dir()):
seriesID = node[0].firstChild.data
parseShow(seriesID, item)
else:
print "Could not find any data for "+show_name+" on TVDB.\nURL: "+show_url
| gpl-2.0 |
nguyentu1602/numpy | numpy/testing/tests/test_decorators.py | 66 | 4157 | from __future__ import division, absolute_import, print_function
from numpy.testing import dec, assert_, assert_raises, run_module_suite
from numpy.testing.noseclasses import KnownFailureTest
import nose
def test_slow():
@dec.slow
def slow_func(x, y, z):
pass
assert_(slow_func.slow)
def test_setastest():
@dec.setastest()
def f_default(a):
pass
@dec.setastest(True)
def f_istest(a):
pass
@dec.setastest(False)
def f_isnottest(a):
pass
assert_(f_default.__test__)
assert_(f_istest.__test__)
assert_(not f_isnottest.__test__)
class DidntSkipException(Exception):
pass
def test_skip_functions_hardcoded():
@dec.skipif(True)
def f1(x):
raise DidntSkipException
try:
f1('a')
except DidntSkipException:
raise Exception('Failed to skip')
except nose.SkipTest:
pass
@dec.skipif(False)
def f2(x):
raise DidntSkipException
try:
f2('a')
except DidntSkipException:
pass
except nose.SkipTest:
raise Exception('Skipped when not expected to')
def test_skip_functions_callable():
def skip_tester():
return skip_flag == 'skip me!'
@dec.skipif(skip_tester)
def f1(x):
raise DidntSkipException
try:
skip_flag = 'skip me!'
f1('a')
except DidntSkipException:
raise Exception('Failed to skip')
except nose.SkipTest:
pass
@dec.skipif(skip_tester)
def f2(x):
raise DidntSkipException
try:
skip_flag = 'five is right out!'
f2('a')
except DidntSkipException:
pass
except nose.SkipTest:
raise Exception('Skipped when not expected to')
def test_skip_generators_hardcoded():
@dec.knownfailureif(True, "This test is known to fail")
def g1(x):
for i in range(x):
yield i
try:
for j in g1(10):
pass
except KnownFailureTest:
pass
else:
raise Exception('Failed to mark as known failure')
@dec.knownfailureif(False, "This test is NOT known to fail")
def g2(x):
for i in range(x):
yield i
raise DidntSkipException('FAIL')
try:
for j in g2(10):
pass
except KnownFailureTest:
raise Exception('Marked incorretly as known failure')
except DidntSkipException:
pass
def test_skip_generators_callable():
def skip_tester():
return skip_flag == 'skip me!'
@dec.knownfailureif(skip_tester, "This test is known to fail")
def g1(x):
for i in range(x):
yield i
try:
skip_flag = 'skip me!'
for j in g1(10):
pass
except KnownFailureTest:
pass
else:
raise Exception('Failed to mark as known failure')
@dec.knownfailureif(skip_tester, "This test is NOT known to fail")
def g2(x):
for i in range(x):
yield i
raise DidntSkipException('FAIL')
try:
skip_flag = 'do not skip'
for j in g2(10):
pass
except KnownFailureTest:
raise Exception('Marked incorretly as known failure')
except DidntSkipException:
pass
def test_deprecated():
@dec.deprecated(True)
def non_deprecated_func():
pass
@dec.deprecated()
def deprecated_func():
import warnings
warnings.warn("TEST: deprecated func", DeprecationWarning)
@dec.deprecated()
def deprecated_func2():
import warnings
warnings.warn("AHHHH")
raise ValueError
@dec.deprecated()
def deprecated_func3():
import warnings
warnings.warn("AHHHH")
# marked as deprecated, but does not raise DeprecationWarning
assert_raises(AssertionError, non_deprecated_func)
# should be silent
deprecated_func()
# fails if deprecated decorator just disables test. See #1453.
assert_raises(ValueError, deprecated_func2)
# first warnings is not a DeprecationWarning
assert_raises(AssertionError, deprecated_func3)
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
tuhangdi/django | tests/model_validation/tests.py | 292 | 2117 | from django.core import management
from django.core.checks import Error, run_checks
from django.db.models.signals import post_init
from django.test import SimpleTestCase
from django.test.utils import override_settings
from django.utils import six
class OnPostInit(object):
def __call__(self, **kwargs):
pass
def on_post_init(**kwargs):
pass
@override_settings(
INSTALLED_APPS=['django.contrib.auth', 'django.contrib.contenttypes'],
SILENCED_SYSTEM_CHECKS=['fields.W342'], # ForeignKey(unique=True)
)
class ModelValidationTest(SimpleTestCase):
def test_models_validate(self):
# All our models should validate properly
# Validation Tests:
# * choices= Iterable of Iterables
# See: https://code.djangoproject.com/ticket/20430
# * related_name='+' doesn't clash with another '+'
# See: https://code.djangoproject.com/ticket/21375
management.call_command("check", stdout=six.StringIO())
def test_model_signal(self):
unresolved_references = post_init.unresolved_references.copy()
post_init.connect(on_post_init, sender='missing-app.Model')
post_init.connect(OnPostInit(), sender='missing-app.Model')
errors = run_checks()
expected = [
Error(
"The 'on_post_init' function was connected to the 'post_init' "
"signal with a lazy reference to the 'missing-app.Model' "
"sender, which has not been installed.",
hint=None,
obj='model_validation.tests',
id='signals.E001',
),
Error(
"An instance of the 'OnPostInit' class was connected to "
"the 'post_init' signal with a lazy reference to the "
"'missing-app.Model' sender, which has not been installed.",
hint=None,
obj='model_validation.tests',
id='signals.E001',
)
]
self.assertEqual(errors, expected)
post_init.unresolved_references = unresolved_references
| bsd-3-clause |
jphilipsen05/zulip | zproject/local_settings.py | 2 | 5562 | # This file is the Zulip local_settings.py configuration for the
# zulip.com installation of Zulip. It shouldn't be used in other
# environments, but you may find it to be a a helpful reference when
# setting up your own Zulip installation to see how Zulip can be
# configured.
#
# On a normal Zulip production server, zproject/local_settings.py is a
# symlink to /etc/zulip/settings.py (based off prod_settings_template.py).
import platform
import six.moves.configparser
from base64 import b64decode
from typing import Set
config_file = six.moves.configparser.RawConfigParser() # type: ignore # https://github.com/python/typeshed/pull/206
config_file.read("/etc/zulip/zulip.conf")
# Whether we're running in a production environment. Note that PRODUCTION does
# **not** mean hosted on Zulip.com; customer sites are PRODUCTION and VOYAGER
# and as such should not assume they are the main Zulip site.
PRODUCTION = config_file.has_option('machine', 'deploy_type')
# The following flags are left over from the various configurations of
# Zulip run by Zulip, Inc. We will eventually be able to get rid of
# them and just have the PRODUCTION flag, but we need them for now.
ZULIP_COM_STAGING = PRODUCTION and config_file.get('machine', 'deploy_type') == 'zulip.com-staging'
ZULIP_COM = ((PRODUCTION and config_file.get('machine', 'deploy_type') == 'zulip.com-prod') or
ZULIP_COM_STAGING)
if not ZULIP_COM:
raise Exception("You should create your own local settings from prod_settings_template.")
ZULIP_FRIENDS_LIST_ID = '84b2f3da6b'
SHARE_THE_LOVE = True
SHOW_OSS_ANNOUNCEMENT = True
REGISTER_LINK_DISABLED = True
CUSTOM_LOGO_URL = "/static/images/logo/zulip-dropbox.png"
VERBOSE_SUPPORT_OFFERS = True
# This can be filled in automatically from the database, maybe
DEPLOYMENT_ROLE_NAME = 'zulip.com'
# XXX: replace me
CAMO_URI = 'https://external-content.zulipcdn.net/'
# Leave EMAIL_HOST unset or empty if you do not wish for emails to be sent
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = "Zulip <[email protected]>"
# The noreply address to be used as Reply-To for certain generated emails.
NOREPLY_EMAIL_ADDRESS = "Zulip <[email protected]>"
WELCOME_EMAIL_SENDER = {'email': '[email protected]', 'name': 'Waseem Daher'}
SESSION_SERIALIZER = "django.contrib.sessions.serializers.PickleSerializer"
REMOTE_POSTGRES_HOST = "postgres.zulip.net"
STATSD_HOST = 'stats.zulip.net'
if ZULIP_COM_STAGING:
EXTERNAL_HOST = 'staging.zulip.com'
STATSD_PREFIX = 'staging'
STAGING_ERROR_NOTIFICATIONS = True
SAVE_FRONTEND_STACKTRACES = True
else:
EXTERNAL_HOST = 'zulip.com'
EXTERNAL_API_PATH = 'api.zulip.com'
STATSD_PREFIX = 'app'
# Terms of Service
TERMS_OF_SERVICE = 'corporate/terms.md'
# Major version number (the stuff before the first '.') has to be an integer.
# Users will be asked to re-sign the TOS only when the major version number increases.
# A TOS_VERSION of None has a major version number of -1.
# TOS_VERSION = '1.0'
# FIRST_TIME_TOS_TEMPLATE = 'zulipchat_migration_tos.html'
# Buckets used for Amazon S3 integration for storing files and user avatars.
S3_AUTH_UPLOADS_BUCKET = "zulip-user-uploads"
S3_AVATAR_BUCKET = "humbug-user-avatars"
APNS_SANDBOX = False
APNS_FEEDBACK = "feedback_production"
APNS_CERT_FILE = "/etc/ssl/django-private/apns-dist.pem"
DBX_APNS_CERT_FILE = "/etc/ssl/django-private/dbx-apns-dist.pem"
GOOGLE_OAUTH2_CLIENT_ID = '835904834568-ag4p18v0sd9a0tero14r3gekn6shoen3.apps.googleusercontent.com'
# The email address pattern to use for auto-generated stream emails
# The %s will be replaced with a unique token.
if ZULIP_COM_STAGING:
EMAIL_GATEWAY_PATTERN = "%[email protected]"
else:
EMAIL_GATEWAY_PATTERN = "%[email protected]"
EMAIL_GATEWAY_EXTRA_PATTERN_HACK = r'@[\w-]*\.zulip\.net'
# Email mirror configuration
# The email of the Zulip bot that the email gateway should post as.
EMAIL_GATEWAY_BOT = "[email protected]"
SSO_APPEND_DOMAIN = None # type: str
AUTHENTICATION_BACKENDS = ('zproject.backends.EmailAuthBackend',
'zproject.backends.GoogleMobileOauth2Backend')
# ALLOWED_HOSTS is used by django to determine which addresses
# Zulip can serve. This is a security measure.
# The following are the zulip.com hosts
ALLOWED_HOSTS = ['localhost', '.humbughq.com', '54.214.48.144', '54.213.44.54',
'54.213.41.54', '54.213.44.58', '54.213.44.73',
'54.200.19.65', '54.201.95.104', '54.201.95.206',
'54.201.186.29', '54.200.111.22',
'54.245.120.64', '54.213.44.83', '.zulip.com', '.zulip.net',
'54.244.50.66', '54.244.50.67', '54.244.50.68', '54.244.50.69', '54.244.50.70',
'54.244.50.64', '54.244.50.65', '54.244.50.74',
'chat.dropboxer.net']
NOTIFICATION_BOT = "[email protected]"
ERROR_BOT = "[email protected]"
NEW_USER_BOT = "[email protected]"
NAGIOS_SEND_BOT = '[email protected]'
NAGIOS_RECEIVE_BOT = '[email protected]'
# Our internal deployment has nagios checks for both staging and prod
NAGIOS_STAGING_SEND_BOT = '[email protected]'
NAGIOS_STAGING_RECEIVE_BOT = '[email protected]'
# Also used for support email in emails templates
ZULIP_ADMINISTRATOR = '[email protected]'
ADMINS = (
('Zulip Error Reports', '[email protected]'),
)
EXTRA_INSTALLED_APPS = [
'analytics',
'zilencer',
'corporate',
]
EVENT_LOGS_ENABLED = True
SYSTEM_ONLY_REALMS = set() # type: Set[str]
| apache-2.0 |
LaoZhongGu/kbengine | kbe/src/lib/python/Tools/scripts/mailerdaemon.py | 97 | 8039 | #!/usr/bin/env python3
"""Classes to parse mailer-daemon messages."""
import calendar
import email.message
import re
import os
import sys
class Unparseable(Exception):
pass
class ErrorMessage(email.message.Message):
def __init__(self):
email.message.Message.__init__(self)
self.sub = ''
def is_warning(self):
sub = self.get('Subject')
if not sub:
return 0
sub = sub.lower()
if sub.startswith('waiting mail'):
return 1
if 'warning' in sub:
return 1
self.sub = sub
return 0
def get_errors(self):
for p in EMPARSERS:
self.rewindbody()
try:
return p(self.fp, self.sub)
except Unparseable:
pass
raise Unparseable
# List of re's or tuples of re's.
# If a re, it should contain at least a group (?P<email>...) which
# should refer to the email address. The re can also contain a group
# (?P<reason>...) which should refer to the reason (error message).
# If no reason is present, the emparse_list_reason list is used to
# find a reason.
# If a tuple, the tuple should contain 2 re's. The first re finds a
# location, the second re is repeated one or more times to find
# multiple email addresses. The second re is matched (not searched)
# where the previous match ended.
# The re's are compiled using the re module.
emparse_list_list = [
'error: (?P<reason>unresolvable): (?P<email>.+)',
('----- The following addresses had permanent fatal errors -----\n',
'(?P<email>[^ \n].*)\n( .*\n)?'),
'remote execution.*\n.*rmail (?P<email>.+)',
('The following recipients did not receive your message:\n\n',
' +(?P<email>.*)\n(The following recipients did not receive your message:\n\n)?'),
'------- Failure Reasons --------\n\n(?P<reason>.*)\n(?P<email>.*)',
'^<(?P<email>.*)>:\n(?P<reason>.*)',
'^(?P<reason>User mailbox exceeds allowed size): (?P<email>.+)',
'^5\\d{2} <(?P<email>[^\n>]+)>\\.\\.\\. (?P<reason>.+)',
'^Original-Recipient: rfc822;(?P<email>.*)',
'^did not reach the following recipient\\(s\\):\n\n(?P<email>.*) on .*\n +(?P<reason>.*)',
'^ <(?P<email>[^\n>]+)> \\.\\.\\. (?P<reason>.*)',
'^Report on your message to: (?P<email>.*)\nReason: (?P<reason>.*)',
'^Your message was not delivered to +(?P<email>.*)\n +for the following reason:\n +(?P<reason>.*)',
'^ was not +(?P<email>[^ \n].*?) *\n.*\n.*\n.*\n because:.*\n +(?P<reason>[^ \n].*?) *\n',
]
# compile the re's in the list and store them in-place.
for i in range(len(emparse_list_list)):
x = emparse_list_list[i]
if type(x) is type(''):
x = re.compile(x, re.MULTILINE)
else:
xl = []
for x in x:
xl.append(re.compile(x, re.MULTILINE))
x = tuple(xl)
del xl
emparse_list_list[i] = x
del x
del i
# list of re's used to find reasons (error messages).
# if a string, "<>" is replaced by a copy of the email address.
# The expressions are searched for in order. After the first match,
# no more expressions are searched for. So, order is important.
emparse_list_reason = [
r'^5\d{2} <>\.\.\. (?P<reason>.*)',
'<>\.\.\. (?P<reason>.*)',
re.compile(r'^<<< 5\d{2} (?P<reason>.*)', re.MULTILINE),
re.compile('===== stderr was =====\nrmail: (?P<reason>.*)'),
re.compile('^Diagnostic-Code: (?P<reason>.*)', re.MULTILINE),
]
emparse_list_from = re.compile('^From:', re.IGNORECASE|re.MULTILINE)
def emparse_list(fp, sub):
data = fp.read()
res = emparse_list_from.search(data)
if res is None:
from_index = len(data)
else:
from_index = res.start(0)
errors = []
emails = []
reason = None
for regexp in emparse_list_list:
if type(regexp) is type(()):
res = regexp[0].search(data, 0, from_index)
if res is not None:
try:
reason = res.group('reason')
except IndexError:
pass
while 1:
res = regexp[1].match(data, res.end(0), from_index)
if res is None:
break
emails.append(res.group('email'))
break
else:
res = regexp.search(data, 0, from_index)
if res is not None:
emails.append(res.group('email'))
try:
reason = res.group('reason')
except IndexError:
pass
break
if not emails:
raise Unparseable
if not reason:
reason = sub
if reason[:15] == 'returned mail: ':
reason = reason[15:]
for regexp in emparse_list_reason:
if type(regexp) is type(''):
for i in range(len(emails)-1,-1,-1):
email = emails[i]
exp = re.compile(re.escape(email).join(regexp.split('<>')), re.MULTILINE)
res = exp.search(data)
if res is not None:
errors.append(' '.join((email.strip()+': '+res.group('reason')).split()))
del emails[i]
continue
res = regexp.search(data)
if res is not None:
reason = res.group('reason')
break
for email in emails:
errors.append(' '.join((email.strip()+': '+reason).split()))
return errors
EMPARSERS = [emparse_list]
def sort_numeric(a, b):
a = int(a)
b = int(b)
if a < b:
return -1
elif a > b:
return 1
else:
return 0
def parsedir(dir, modify):
os.chdir(dir)
pat = re.compile('^[0-9]*$')
errordict = {}
errorfirst = {}
errorlast = {}
nok = nwarn = nbad = 0
# find all numeric file names and sort them
files = list(filter(lambda fn, pat=pat: pat.match(fn) is not None, os.listdir('.')))
files.sort(sort_numeric)
for fn in files:
# Lets try to parse the file.
fp = open(fn)
m = email.message_from_file(fp, _class=ErrorMessage)
sender = m.getaddr('From')
print('%s\t%-40s\t'%(fn, sender[1]), end=' ')
if m.is_warning():
fp.close()
print('warning only')
nwarn = nwarn + 1
if modify:
os.rename(fn, ','+fn)
## os.unlink(fn)
continue
try:
errors = m.get_errors()
except Unparseable:
print('** Not parseable')
nbad = nbad + 1
fp.close()
continue
print(len(errors), 'errors')
# Remember them
for e in errors:
try:
mm, dd = m.getdate('date')[1:1+2]
date = '%s %02d' % (calendar.month_abbr[mm], dd)
except:
date = '??????'
if e not in errordict:
errordict[e] = 1
errorfirst[e] = '%s (%s)' % (fn, date)
else:
errordict[e] = errordict[e] + 1
errorlast[e] = '%s (%s)' % (fn, date)
fp.close()
nok = nok + 1
if modify:
os.rename(fn, ','+fn)
## os.unlink(fn)
print('--------------')
print(nok, 'files parsed,',nwarn,'files warning-only,', end=' ')
print(nbad,'files unparseable')
print('--------------')
list = []
for e in errordict.keys():
list.append((errordict[e], errorfirst[e], errorlast[e], e))
list.sort()
for num, first, last, e in list:
print('%d %s - %s\t%s' % (num, first, last, e))
def main():
modify = 0
if len(sys.argv) > 1 and sys.argv[1] == '-d':
modify = 1
del sys.argv[1]
if len(sys.argv) > 1:
for folder in sys.argv[1:]:
parsedir(folder, modify)
else:
parsedir('/ufs/jack/Mail/errorsinbox', modify)
if __name__ == '__main__' or sys.argv[0] == __name__:
main()
| lgpl-3.0 |
lidabing/xgyp | Python27/Tools/Scripts/checkappend.py | 100 | 4658 | #! /usr/bin/env python
# Released to the public domain, by Tim Peters, 28 February 2000.
"""checkappend.py -- search for multi-argument .append() calls.
Usage: specify one or more file or directory paths:
checkappend [-v] file_or_dir [file_or_dir] ...
Each file_or_dir is checked for multi-argument .append() calls. When
a directory, all .py files in the directory, and recursively in its
subdirectories, are checked.
Use -v for status msgs. Use -vv for more status msgs.
In the absence of -v, the only output is pairs of the form
filename(linenumber):
line containing the suspicious append
Note that this finds multi-argument append calls regardless of whether
they're attached to list objects. If a module defines a class with an
append method that takes more than one argument, calls to that method
will be listed.
Note that this will not find multi-argument list.append calls made via a
bound method object. For example, this is not caught:
somelist = []
push = somelist.append
push(1, 2, 3)
"""
__version__ = 1, 0, 0
import os
import sys
import getopt
import tokenize
verbose = 0
def errprint(*args):
msg = ' '.join(args)
sys.stderr.write(msg)
sys.stderr.write("\n")
def main():
args = sys.argv[1:]
global verbose
try:
opts, args = getopt.getopt(sys.argv[1:], "v")
except getopt.error, msg:
errprint(str(msg) + "\n\n" + __doc__)
return
for opt, optarg in opts:
if opt == '-v':
verbose = verbose + 1
if not args:
errprint(__doc__)
return
for arg in args:
check(arg)
def check(file):
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print "%r: listing directory" % (file,)
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if ((os.path.isdir(fullname) and
not os.path.islink(fullname))
or os.path.normcase(name[-3:]) == ".py"):
check(fullname)
return
try:
f = open(file)
except IOError, msg:
errprint("%r: I/O Error: %s" % (file, msg))
return
if verbose > 1:
print "checking %r ..." % (file,)
ok = AppendChecker(file, f).run()
if verbose and ok:
print "%r: Clean bill of health." % (file,)
[FIND_DOT,
FIND_APPEND,
FIND_LPAREN,
FIND_COMMA,
FIND_STMT] = range(5)
class AppendChecker:
def __init__(self, fname, file):
self.fname = fname
self.file = file
self.state = FIND_DOT
self.nerrors = 0
def run(self):
try:
tokenize.tokenize(self.file.readline, self.tokeneater)
except tokenize.TokenError, msg:
errprint("%r: Token Error: %s" % (self.fname, msg))
self.nerrors = self.nerrors + 1
return self.nerrors == 0
def tokeneater(self, type, token, start, end, line,
NEWLINE=tokenize.NEWLINE,
JUNK=(tokenize.COMMENT, tokenize.NL),
OP=tokenize.OP,
NAME=tokenize.NAME):
state = self.state
if type in JUNK:
pass
elif state is FIND_DOT:
if type is OP and token == ".":
state = FIND_APPEND
elif state is FIND_APPEND:
if type is NAME and token == "append":
self.line = line
self.lineno = start[0]
state = FIND_LPAREN
else:
state = FIND_DOT
elif state is FIND_LPAREN:
if type is OP and token == "(":
self.level = 1
state = FIND_COMMA
else:
state = FIND_DOT
elif state is FIND_COMMA:
if type is OP:
if token in ("(", "{", "["):
self.level = self.level + 1
elif token in (")", "}", "]"):
self.level = self.level - 1
if self.level == 0:
state = FIND_DOT
elif token == "," and self.level == 1:
self.nerrors = self.nerrors + 1
print "%s(%d):\n%s" % (self.fname, self.lineno,
self.line)
# don't gripe about this stmt again
state = FIND_STMT
elif state is FIND_STMT:
if type is NEWLINE:
state = FIND_DOT
else:
raise SystemError("unknown internal state '%r'" % (state,))
self.state = state
if __name__ == '__main__':
main()
| bsd-3-clause |
ubuntu/ubuntu-make | tests/small/test_ui.py | 15 | 6281 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Tests for the generic ui module"""
from concurrent import futures
from gi.repository import GLib
from time import time
from unittest.mock import Mock, patch
from ..tools import LoggedTestCase
import threading
from umake.tools import MainLoop, Singleton
from umake.ui import UI
class TestUI(LoggedTestCase):
"""This will test the UI generic module"""
def setUp(self):
super().setUp()
self.mockUIPlug = Mock()
self.mockUIPlug._display.side_effect = self.display_UIPlug
self.contentType = Mock()
self.ui = UI(self.mockUIPlug)
self.mainloop_object = MainLoop()
self.mainloop_thread = None
self.function_thread = None
self.display_thread = None
self.time_display_call = 0
def tearDown(self):
Singleton._instances = {}
super().tearDown()
# function that will complete once the mainloop is started
def wait_for_mainloop_function(self):
timeout_time = time() + 5
while not self.mainloop_object.mainloop.is_running():
if time() > timeout_time:
raise(BaseException("Mainloop not started in 5 seconds"))
def wait_for_mainloop_shutdown(self):
timeout_time = time() + 5
while self.mainloop_object.mainloop.is_running():
if time() > timeout_time:
raise(BaseException("Mainloop not stopped in 5 seconds"))
def get_mainloop_thread(self):
self.mainloop_thread = threading.current_thread().ident
def start_glib_mainloop(self):
# quit after 5 seconds if nothing made the mainloop to end
GLib.timeout_add_seconds(5, self.mainloop_object.mainloop.quit)
GLib.idle_add(self.get_mainloop_thread)
self.mainloop_object.run()
def display_UIPlug(self, contentType):
"""handler to mock _display and save the current thread"""
self.time_display_call = time()
self.assertEqual(self.contentType, contentType)
self.display_thread = threading.current_thread().ident
self.mainloop_object.quit(raise_exception=False)
def test_singleton(self):
"""Ensure we are delivering a singleton for UI"""
other = UI(self.mockUIPlug)
self.assertEqual(self.ui, other)
def test_return_to_mainscreen(self):
"""We call the return to main screen on the UIPlug"""
UI.return_main_screen()
self.assertTrue(self.mockUIPlug._return_main_screen.called)
@patch("umake.tools.sys")
def test_call_display(self, mocksys):
"""We call the display method from the UIPlug"""
UI.display(self.contentType)
self.start_glib_mainloop()
self.wait_for_mainloop_shutdown()
self.assertTrue(self.mockUIPlug._display.called)
self.assertIsNotNone(self.mainloop_thread)
self.assertIsNotNone(self.display_thread)
self.assertEqual(self.mainloop_thread, self.display_thread)
@patch("umake.tools.sys")
def test_call_display_other_thread(self, mocksys):
"""We call the display method on UIPlug in the main thread from another thread"""
def run_display(future):
self.function_thread = threading.current_thread().ident
UI.display(self.contentType)
executor = futures.ThreadPoolExecutor(max_workers=1)
future = executor.submit(self.wait_for_mainloop_function)
future.add_done_callback(run_display)
self.start_glib_mainloop()
self.wait_for_mainloop_shutdown()
self.assertTrue(self.mockUIPlug._display.called)
self.assertIsNotNone(self.mainloop_thread)
self.assertIsNotNone(self.function_thread)
self.assertIsNotNone(self.display_thread)
self.assertNotEqual(self.mainloop_thread, self.function_thread)
self.assertEqual(self.mainloop_thread, self.display_thread)
@patch("umake.tools.sys")
def test_call_delayed_display(self, mocksys):
"""We call the display method from the UIPlug in delayed_display with 50ms waiting"""
UI.delayed_display(self.contentType)
now = time()
self.start_glib_mainloop()
self.wait_for_mainloop_shutdown()
self.assertTrue(self.mockUIPlug._display.called)
self.assertIsNotNone(self.mainloop_thread)
self.assertIsNotNone(self.display_thread)
self.assertEqual(self.mainloop_thread, self.display_thread)
self.assertTrue(self.time_display_call - now > 0.05)
@patch("umake.tools.sys")
def test_call_delayed_display_from_other_thread(self, mocksys):
"""We call the display method from the UIPlug in delayed_display with 50ms waiting, even on other thread"""
now = 0
def run_display(future):
nonlocal now
self.function_thread = threading.current_thread().ident
now = time()
UI.delayed_display(self.contentType)
executor = futures.ThreadPoolExecutor(max_workers=1)
future = executor.submit(self.wait_for_mainloop_function)
future.add_done_callback(run_display)
self.start_glib_mainloop()
self.wait_for_mainloop_shutdown()
self.assertTrue(self.mockUIPlug._display.called)
self.assertIsNotNone(self.mainloop_thread)
self.assertIsNotNone(self.function_thread)
self.assertIsNotNone(self.display_thread)
self.assertNotEqual(self.mainloop_thread, self.function_thread)
self.assertEqual(self.mainloop_thread, self.display_thread)
self.assertTrue(self.time_display_call - now > 0.05)
| gpl-3.0 |
thorwhalen/ut | pdict/special.py | 1 | 5569 | """Special dicts"""
__author__ = 'thor'
from collections import defaultdict, UserDict
from ut.pdict.get import set_value_in_nested_key_path
val_unlikely_to_be_value_of_dict = (1987654321, 8239080923)
class keydefaultdict(defaultdict):
def __missing__(self, key):
ret = self[key] = self.default_factory(key)
return ret
class DictDefaultDict(dict):
"""
Acts similarly to collections.defaultdict, except
(1) the defaults depend on the key (given by a dict of key-->default_val at construction)
(2) it is not a function that is called to create the default value (so careful with referenced variables)
"""
def __init__(self, default_dict):
super(DictDefaultDict, self).__init__()
self.default_dict = default_dict
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
return self.default_dict[item]
class KeyPathDict(dict):
"""
NOTE: Might want to check out key_path.py (in https://github.com/i2mint/py2mint/) instead.
A dict where you can get and set values from key_paths (i.e. dot-separated strings or lists of nested keys).
Use with care.
Some functionalities that would be expected from such a subclass of dict aren't implemented yet, or only partially.
Further, operating with KeyPathDict is slower. One test showed that getting a value was 80 times slower
But, to be fair, it was in micro-seconds instead of nano-seconds, so this class can still be useful for
convenience when it is not in a bottle neck of a process.
>>> input_dict = {
... 'a': {
... 'b': 1,
... 'c': 'val of a.c',
... 'd': [1, 2]
... },
... 'b': {
... 'A': 'val of b.A',
... 'B': {
... 'AA': 'val of b.B.AA'
... }
... },
... 10: 'val for 10',
... '10': 10
... }
>>>
>>> d = KeyPathDict(input_dict)
>>> d
{'a': {'b': 1, 'c': 'val of a.c', 'd': [1, 2]}, 'b': {'A': 'val of b.A', 'B': {'AA': 'val of b.B.AA'}}, 10: 'val for 10', '10': 10}
>>> d.get('a.c')
'val of a.c'
>>> d.get(['a', 'c']) == d['a.c']
True
>>> d[['a', 'c']] == d['a.c']
True
>>> d.get('non.existent.key', 'default')
'default'
>>> d['b.B.AA']
'val of b.B.AA'
>>> d['b.B.AA'] = 3 # assigning another value to EXISTING key path
>>> d['b.B.AA']
3
>>> d['10'] = 0 # assigning another value to EXISTING key path
>>> d['10']
0
>>> d['new_key'] = 7 # assigning another value to new SINGLE key
>>> d['new_key']
7
>>> d['new.key.path'] = 8 # assigning a value to new key path
>>> d['new.key']
{'path': 8}
>>> d['new.key.old.path'] = 9 # assigning a value to new key path, intersecting with another
>>> d['new.key']
{'path': 8, 'old': {'path': 9}}
>>> d['new.key'] = 'something new' # assigning a value to a key (sub-)path that already exists
>>> d['new.key']
'something new'
"""
def get(self, key_path, d=None):
# return get_value_in_key_path(dict(KeyPathDict), key_path, d)
if isinstance(key_path, str):
key_path = key_path.split('.')
if isinstance(key_path, list):
k_length = len(key_path)
if k_length == 0:
return super(KeyPathDict, self).get(key_path[0], d)
else:
val_so_far = super(KeyPathDict, self).get(key_path[0], d)
for key in key_path[1:]:
if isinstance(val_so_far, dict):
val_so_far = val_so_far.get(key, val_unlikely_to_be_value_of_dict)
if val_so_far == val_unlikely_to_be_value_of_dict:
return d
else:
return d
return val_so_far
else:
return super(KeyPathDict, self).get(key_path, d)
def __getitem__(self, val):
return self.get(val, None)
def __setitem__(self, key_path, val):
"""
Only works with EXISTING key_paths or SINGLE keys
:param key_path:
:param val:
:return:
"""
if isinstance(key_path, str):
key_path = key_path.split('.')
if isinstance(key_path, list):
first_key = key_path[0]
if len(key_path) == 1:
super(KeyPathDict, self).__setitem__(first_key, val)
# self[first_key] = val
else:
if first_key in self:
set_value_in_nested_key_path(self[first_key], key_path[1:], val)
else:
self[first_key] = {}
set_value_in_nested_key_path(self[first_key], key_path[1:], val)
else:
super(KeyPathDict, self).__setitem__(key_path, val)
def __contains__(self, key_path):
if isinstance(key_path, str):
key_path = key_path.split('.')
if isinstance(key_path, list):
if len(key_path) == 1:
return super(KeyPathDict, self).__contains__(key_path[0])
else:
tmp = super(KeyPathDict, self).__getitem__(key_path[0])
for k in key_path[1:]:
if not isinstance(tmp, dict) or k not in tmp:
return False
tmp = tmp[k]
return True
else:
return super(KeyPathDict, self).__contains__(key_path)
| mit |
MOA-2011/enigma2.pli4.0 | lib/python/Components/Sources/CurrentService.py | 72 | 1404 | from Components.PerServiceDisplay import PerServiceBase
from enigma import iPlayableService
from Source import Source
from Components.Element import cached
import NavigationInstance
class CurrentService(PerServiceBase, Source):
def __init__(self, navcore):
Source.__init__(self)
PerServiceBase.__init__(self, navcore,
{
iPlayableService.evStart: self.serviceEvent,
iPlayableService.evEnd: self.serviceEvent,
# FIXME: we should check 'interesting_events'
# which is not always provided.
iPlayableService.evUpdatedInfo: self.serviceEvent,
iPlayableService.evUpdatedEventInfo: self.serviceEvent,
iPlayableService.evNewProgramInfo: self.serviceEvent,
iPlayableService.evCuesheetChanged: self.serviceEvent,
iPlayableService.evVideoSizeChanged: self.serviceEvent,
iPlayableService.evHBBTVInfo: self.serviceEvent
}, with_event=True)
self.navcore = navcore
def serviceEvent(self, event):
self.changed((self.CHANGED_SPECIFIC, event))
@cached
def getCurrentService(self):
return self.navcore.getCurrentService()
service = property(getCurrentService)
@cached
def getCurrentServiceRef(self):
if NavigationInstance.instance is not None:
return NavigationInstance.instance.getCurrentlyPlayingServiceOrGroup()
return None
serviceref = property(getCurrentServiceRef)
def destroy(self):
PerServiceBase.destroy(self)
Source.destroy(self)
| gpl-2.0 |
Yelp/elastalert | tests/loaders_test.py | 1 | 18593 | # -*- coding: utf-8 -*-
import copy
import datetime
import os
import mock
import pytest
import elastalert.alerts
import elastalert.ruletypes
from elastalert.config import load_conf
from elastalert.loaders import FileRulesLoader
from elastalert.util import EAException
test_config = {'rules_folder': 'test_folder',
'run_every': {'minutes': 10},
'buffer_time': {'minutes': 10},
'es_host': 'elasticsearch.test',
'es_port': 12345,
'writeback_index': 'test_index',
'writeback_alias': 'test_alias'}
test_rule = {'es_host': 'test_host',
'es_port': 12345,
'name': 'testrule',
'type': 'spike',
'spike_height': 2,
'spike_type': 'up',
'timeframe': {'minutes': 10},
'index': 'test_index',
'query_key': 'testkey',
'compare_key': 'comparekey',
'filter': [{'term': {'key': 'value'}}],
'alert': 'email',
'use_count_query': True,
'doc_type': 'blsh',
'email': '[email protected]',
'aggregation': {'hours': 2},
'include': ['comparekey', '@timestamp']}
test_args = mock.Mock()
test_args.config = 'test_config'
test_args.rule = None
test_args.debug = False
test_args.es_debug_trace = None
def test_import_rules():
rules_loader = FileRulesLoader(test_config)
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy['type'] = 'testing.test.RuleType'
with mock.patch.object(rules_loader, 'load_yaml') as mock_open:
mock_open.return_value = test_rule_copy
# Test that type is imported
with mock.patch('builtins.__import__') as mock_import:
mock_import.return_value = elastalert.ruletypes
rules_loader.load_configuration('test_config', test_config)
assert mock_import.call_args_list[0][0][0] == 'testing.test'
assert mock_import.call_args_list[0][0][3] == ['RuleType']
# Test that alerts are imported
test_rule_copy = copy.deepcopy(test_rule)
mock_open.return_value = test_rule_copy
test_rule_copy['alert'] = 'testing2.test2.Alerter'
with mock.patch('builtins.__import__') as mock_import:
mock_import.return_value = elastalert.alerts
rules_loader.load_configuration('test_config', test_config)
assert mock_import.call_args_list[0][0][0] == 'testing2.test2'
assert mock_import.call_args_list[0][0][3] == ['Alerter']
def test_import_import():
rules_loader = FileRulesLoader(test_config)
import_rule = copy.deepcopy(test_rule)
del(import_rule['es_host'])
del(import_rule['es_port'])
import_rule['import'] = 'importme.ymlt'
import_me = {
'es_host': 'imported_host',
'es_port': 12349,
'email': 'ignored@email', # overwritten by the email in import_rule
}
with mock.patch.object(rules_loader, 'get_yaml') as mock_open:
mock_open.side_effect = [import_rule, import_me]
rules = rules_loader.load_configuration('blah.yaml', test_config)
assert mock_open.call_args_list[0][0] == ('blah.yaml',)
assert mock_open.call_args_list[1][0] == ('importme.ymlt',)
assert len(mock_open.call_args_list) == 2
assert rules['es_port'] == 12349
assert rules['es_host'] == 'imported_host'
assert rules['email'] == ['[email protected]']
assert rules['filter'] == import_rule['filter']
# check global import_rule dependency
assert rules_loader.import_rules == {'blah.yaml': ['importme.ymlt']}
def test_import_absolute_import():
rules_loader = FileRulesLoader(test_config)
import_rule = copy.deepcopy(test_rule)
del(import_rule['es_host'])
del(import_rule['es_port'])
import_rule['import'] = '/importme.ymlt'
import_me = {
'es_host': 'imported_host',
'es_port': 12349,
'email': 'ignored@email', # overwritten by the email in import_rule
}
with mock.patch.object(rules_loader, 'get_yaml') as mock_open:
mock_open.side_effect = [import_rule, import_me]
rules = rules_loader.load_configuration('blah.yaml', test_config)
assert mock_open.call_args_list[0][0] == ('blah.yaml',)
assert mock_open.call_args_list[1][0] == ('/importme.ymlt',)
assert len(mock_open.call_args_list) == 2
assert rules['es_port'] == 12349
assert rules['es_host'] == 'imported_host'
assert rules['email'] == ['[email protected]']
assert rules['filter'] == import_rule['filter']
def test_import_filter():
# Check that if a filter is specified the rules are merged:
rules_loader = FileRulesLoader(test_config)
import_rule = copy.deepcopy(test_rule)
del(import_rule['es_host'])
del(import_rule['es_port'])
import_rule['import'] = 'importme.ymlt'
import_me = {
'es_host': 'imported_host',
'es_port': 12349,
'filter': [{'term': {'ratchet': 'clank'}}],
}
with mock.patch.object(rules_loader, 'get_yaml') as mock_open:
mock_open.side_effect = [import_rule, import_me]
rules = rules_loader.load_configuration('blah.yaml', test_config)
assert rules['filter'] == [{'term': {'ratchet': 'clank'}}, {'term': {'key': 'value'}}]
def test_load_inline_alert_rule():
rules_loader = FileRulesLoader(test_config)
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy['alert'] = [
{
'email': {
'email': '[email protected]'
}
},
{
'email': {
'email': '[email protected]'
}
}
]
test_config_copy = copy.deepcopy(test_config)
with mock.patch.object(rules_loader, 'get_yaml') as mock_open:
mock_open.side_effect = [test_config_copy, test_rule_copy]
rules_loader.load_modules(test_rule_copy)
assert isinstance(test_rule_copy['alert'][0], elastalert.alerts.EmailAlerter)
assert isinstance(test_rule_copy['alert'][1], elastalert.alerts.EmailAlerter)
assert '[email protected]' in test_rule_copy['alert'][0].rule['email']
assert '[email protected]' in test_rule_copy['alert'][1].rule['email']
def test_file_rules_loader_get_names_recursive():
conf = {'scan_subdirectories': True, 'rules_folder': 'root'}
rules_loader = FileRulesLoader(conf)
walk_paths = (('root', ('folder_a', 'folder_b'), ('rule.yaml',)),
('root/folder_a', (), ('a.yaml', 'ab.yaml')),
('root/folder_b', (), ('b.yaml',)))
with mock.patch('os.walk') as mock_walk:
mock_walk.return_value = walk_paths
paths = rules_loader.get_names(conf)
paths = [p.replace(os.path.sep, '/') for p in paths]
assert 'root/rule.yaml' in paths
assert 'root/folder_a/a.yaml' in paths
assert 'root/folder_a/ab.yaml' in paths
assert 'root/folder_b/b.yaml' in paths
assert len(paths) == 4
def test_file_rules_loader_get_names():
# Check for no subdirectory
conf = {'scan_subdirectories': False, 'rules_folder': 'root'}
rules_loader = FileRulesLoader(conf)
files = ['badfile', 'a.yaml', 'b.yaml']
with mock.patch('os.listdir') as mock_list:
with mock.patch('os.path.isfile') as mock_path:
mock_path.return_value = True
mock_list.return_value = files
paths = rules_loader.get_names(conf)
paths = [p.replace(os.path.sep, '/') for p in paths]
assert 'root/a.yaml' in paths
assert 'root/b.yaml' in paths
assert len(paths) == 2
def test_load_rules():
test_rule_copy = copy.deepcopy(test_rule)
test_config_copy = copy.deepcopy(test_config)
with mock.patch('elastalert.config.yaml_loader') as mock_conf_open:
mock_conf_open.return_value = test_config_copy
with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open:
mock_rule_open.return_value = test_rule_copy
with mock.patch('os.walk') as mock_ls:
mock_ls.return_value = [('', [], ['testrule.yaml'])]
rules = load_conf(test_args)
rules['rules'] = rules['rules_loader'].load(rules)
assert isinstance(rules['rules'][0]['type'], elastalert.ruletypes.RuleType)
assert isinstance(rules['rules'][0]['alert'][0], elastalert.alerts.Alerter)
assert isinstance(rules['rules'][0]['timeframe'], datetime.timedelta)
assert isinstance(rules['run_every'], datetime.timedelta)
for included_key in ['comparekey', 'testkey', '@timestamp']:
assert included_key in rules['rules'][0]['include']
# Assert include doesn't contain duplicates
assert rules['rules'][0]['include'].count('@timestamp') == 1
assert rules['rules'][0]['include'].count('comparekey') == 1
def test_load_default_host_port():
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy.pop('es_host')
test_rule_copy.pop('es_port')
test_config_copy = copy.deepcopy(test_config)
with mock.patch('elastalert.config.yaml_loader') as mock_conf_open:
mock_conf_open.return_value = test_config_copy
with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open:
mock_rule_open.return_value = test_rule_copy
with mock.patch('os.walk') as mock_ls:
mock_ls.return_value = [('', [], ['testrule.yaml'])]
rules = load_conf(test_args)
rules['rules'] = rules['rules_loader'].load(rules)
# Assert include doesn't contain duplicates
assert rules['es_port'] == 12345
assert rules['es_host'] == 'elasticsearch.test'
def test_load_ssl_env_false():
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy.pop('es_host')
test_rule_copy.pop('es_port')
test_config_copy = copy.deepcopy(test_config)
with mock.patch('elastalert.config.yaml_loader') as mock_conf_open:
mock_conf_open.return_value = test_config_copy
with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open:
mock_rule_open.return_value = test_rule_copy
with mock.patch('os.listdir') as mock_ls:
with mock.patch.dict(os.environ, {'ES_USE_SSL': 'false'}):
mock_ls.return_value = ['testrule.yaml']
rules = load_conf(test_args)
rules['rules'] = rules['rules_loader'].load(rules)
assert rules['use_ssl'] is False
def test_load_ssl_env_true():
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy.pop('es_host')
test_rule_copy.pop('es_port')
test_config_copy = copy.deepcopy(test_config)
with mock.patch('elastalert.config.yaml_loader') as mock_conf_open:
mock_conf_open.return_value = test_config_copy
with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open:
mock_rule_open.return_value = test_rule_copy
with mock.patch('os.listdir') as mock_ls:
with mock.patch.dict(os.environ, {'ES_USE_SSL': 'true'}):
mock_ls.return_value = ['testrule.yaml']
rules = load_conf(test_args)
rules['rules'] = rules['rules_loader'].load(rules)
assert rules['use_ssl'] is True
def test_load_url_prefix_env():
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy.pop('es_host')
test_rule_copy.pop('es_port')
test_config_copy = copy.deepcopy(test_config)
with mock.patch('elastalert.config.yaml_loader') as mock_conf_open:
mock_conf_open.return_value = test_config_copy
with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open:
mock_rule_open.return_value = test_rule_copy
with mock.patch('os.listdir') as mock_ls:
with mock.patch.dict(os.environ, {'ES_URL_PREFIX': 'es/'}):
mock_ls.return_value = ['testrule.yaml']
rules = load_conf(test_args)
rules['rules'] = rules['rules_loader'].load(rules)
assert rules['es_url_prefix'] == 'es/'
def test_load_disabled_rules():
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy['is_enabled'] = False
test_config_copy = copy.deepcopy(test_config)
with mock.patch('elastalert.config.yaml_loader') as mock_conf_open:
mock_conf_open.return_value = test_config_copy
with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open:
mock_rule_open.return_value = test_rule_copy
with mock.patch('os.listdir') as mock_ls:
mock_ls.return_value = ['testrule.yaml']
rules = load_conf(test_args)
rules['rules'] = rules['rules_loader'].load(rules)
# The rule is not loaded for it has "is_enabled=False"
assert len(rules['rules']) == 0
def test_raises_on_missing_config():
optional_keys = ('aggregation', 'use_count_query', 'query_key', 'compare_key', 'filter', 'include', 'es_host', 'es_port', 'name')
test_rule_copy = copy.deepcopy(test_rule)
for key in list(test_rule_copy.keys()):
test_rule_copy = copy.deepcopy(test_rule)
test_config_copy = copy.deepcopy(test_config)
test_rule_copy.pop(key)
# Non required keys
if key in optional_keys:
continue
with mock.patch('elastalert.config.yaml_loader') as mock_conf_open:
mock_conf_open.return_value = test_config_copy
with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open:
mock_rule_open.return_value = test_rule_copy
with mock.patch('os.walk') as mock_walk:
mock_walk.return_value = [('', [], ['testrule.yaml'])]
with pytest.raises(EAException, message='key %s should be required' % key):
rules = load_conf(test_args)
rules['rules'] = rules['rules_loader'].load(rules)
def test_compound_query_key():
test_config_copy = copy.deepcopy(test_config)
rules_loader = FileRulesLoader(test_config_copy)
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy.pop('use_count_query')
test_rule_copy['query_key'] = ['field1', 'field2']
rules_loader.load_options(test_rule_copy, test_config, 'filename.yaml')
assert 'field1' in test_rule_copy['include']
assert 'field2' in test_rule_copy['include']
assert test_rule_copy['query_key'] == 'field1,field2'
assert test_rule_copy['compound_query_key'] == ['field1', 'field2']
def test_query_key_with_single_value():
test_config_copy = copy.deepcopy(test_config)
rules_loader = FileRulesLoader(test_config_copy)
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy.pop('use_count_query')
test_rule_copy['query_key'] = ['field1']
rules_loader.load_options(test_rule_copy, test_config, 'filename.yaml')
assert 'field1' in test_rule_copy['include']
assert test_rule_copy['query_key'] == 'field1'
assert 'compound_query_key' not in test_rule_copy
def test_query_key_with_no_values():
test_config_copy = copy.deepcopy(test_config)
rules_loader = FileRulesLoader(test_config_copy)
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy.pop('use_count_query')
test_rule_copy['query_key'] = []
rules_loader.load_options(test_rule_copy, test_config, 'filename.yaml')
assert 'query_key' not in test_rule_copy
assert 'compound_query_key' not in test_rule_copy
def test_name_inference():
test_config_copy = copy.deepcopy(test_config)
rules_loader = FileRulesLoader(test_config_copy)
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy.pop('name')
rules_loader.load_options(test_rule_copy, test_config, 'msmerc woz ere.yaml')
assert test_rule_copy['name'] == 'msmerc woz ere'
def test_raises_on_bad_generate_kibana_filters():
test_rule['generate_kibana_link'] = True
bad_filters = [[{'not': {'terms': {'blah': 'blah'}}}],
[{'terms': {'blah': 'blah'}}],
[{'query': {'not_querystring': 'this:that'}}],
[{'query': {'wildcard': 'this*that'}}],
[{'blah': 'blah'}]]
good_filters = [[{'term': {'field': 'value'}}],
[{'not': {'term': {'this': 'that'}}}],
[{'not': {'query': {'query_string': {'query': 'this:that'}}}}],
[{'query': {'query_string': {'query': 'this:that'}}}],
[{'range': {'blah': {'from': 'a', 'to': 'b'}}}],
[{'not': {'range': {'blah': {'from': 'a', 'to': 'b'}}}}]]
# Test that all the good filters work, but fail with a bad filter added
for good in good_filters:
test_config_copy = copy.deepcopy(test_config)
rules_loader = FileRulesLoader(test_config_copy)
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy['filter'] = good
with mock.patch.object(rules_loader, 'get_yaml') as mock_open:
mock_open.return_value = test_rule_copy
rules_loader.load_configuration('blah', test_config)
for bad in bad_filters:
test_rule_copy['filter'] = good + bad
with pytest.raises(EAException):
rules_loader.load_configuration('blah', test_config)
def test_kibana_discover_from_timedelta():
test_config_copy = copy.deepcopy(test_config)
rules_loader = FileRulesLoader(test_config_copy)
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy['kibana_discover_from_timedelta'] = {'minutes': 2}
rules_loader.load_options(test_rule_copy, test_config, 'filename.yaml')
assert isinstance(test_rule_copy['kibana_discover_from_timedelta'], datetime.timedelta)
assert test_rule_copy['kibana_discover_from_timedelta'] == datetime.timedelta(minutes=2)
def test_kibana_discover_to_timedelta():
test_config_copy = copy.deepcopy(test_config)
rules_loader = FileRulesLoader(test_config_copy)
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy['kibana_discover_to_timedelta'] = {'minutes': 2}
rules_loader.load_options(test_rule_copy, test_config, 'filename.yaml')
assert isinstance(test_rule_copy['kibana_discover_to_timedelta'], datetime.timedelta)
assert test_rule_copy['kibana_discover_to_timedelta'] == datetime.timedelta(minutes=2)
| apache-2.0 |
nelmiux/CarnotKE | jyhton/lib-python/2.7/test/test_datetime.py | 72 | 134923 | """Test date/time type.
See http://www.zope.org/Members/fdrake/DateTimeWiki/TestCases
"""
from __future__ import division
import sys
import pickle
import cPickle
import unittest
from test import test_support
from datetime import MINYEAR, MAXYEAR
from datetime import timedelta
from datetime import tzinfo
from datetime import time
from datetime import date, datetime
pickle_choices = [(pickler, unpickler, proto)
for pickler in pickle, cPickle
for unpickler in pickle, cPickle
for proto in range(3)]
assert len(pickle_choices) == 2*2*3
# An arbitrary collection of objects of non-datetime types, for testing
# mixed-type comparisons.
OTHERSTUFF = (10, 10L, 34.5, "abc", {}, [], ())
#############################################################################
# module tests
class TestModule(unittest.TestCase):
def test_constants(self):
import datetime
self.assertEqual(datetime.MINYEAR, 1)
self.assertEqual(datetime.MAXYEAR, 9999)
#############################################################################
# tzinfo tests
class FixedOffset(tzinfo):
def __init__(self, offset, name, dstoffset=42):
if isinstance(offset, int):
offset = timedelta(minutes=offset)
if isinstance(dstoffset, int):
dstoffset = timedelta(minutes=dstoffset)
self.__offset = offset
self.__name = name
self.__dstoffset = dstoffset
def __repr__(self):
return self.__name.lower()
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return self.__dstoffset
class PicklableFixedOffset(FixedOffset):
def __init__(self, offset=None, name=None, dstoffset=None):
FixedOffset.__init__(self, offset, name, dstoffset)
class TestTZInfo(unittest.TestCase):
def test_non_abstractness(self):
# In order to allow subclasses to get pickled, the C implementation
# wasn't able to get away with having __init__ raise
# NotImplementedError.
useless = tzinfo()
dt = datetime.max
self.assertRaises(NotImplementedError, useless.tzname, dt)
self.assertRaises(NotImplementedError, useless.utcoffset, dt)
self.assertRaises(NotImplementedError, useless.dst, dt)
def test_subclass_must_override(self):
class NotEnough(tzinfo):
def __init__(self, offset, name):
self.__offset = offset
self.__name = name
self.assertTrue(issubclass(NotEnough, tzinfo))
ne = NotEnough(3, "NotByALongShot")
self.assertIsInstance(ne, tzinfo)
dt = datetime.now()
self.assertRaises(NotImplementedError, ne.tzname, dt)
self.assertRaises(NotImplementedError, ne.utcoffset, dt)
self.assertRaises(NotImplementedError, ne.dst, dt)
def test_normal(self):
fo = FixedOffset(3, "Three")
self.assertIsInstance(fo, tzinfo)
for dt in datetime.now(), None:
self.assertEqual(fo.utcoffset(dt), timedelta(minutes=3))
self.assertEqual(fo.tzname(dt), "Three")
self.assertEqual(fo.dst(dt), timedelta(minutes=42))
def test_pickling_base(self):
# There's no point to pickling tzinfo objects on their own (they
# carry no data), but they need to be picklable anyway else
# concrete subclasses can't be pickled.
orig = tzinfo.__new__(tzinfo)
self.assertTrue(type(orig) is tzinfo)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertTrue(type(derived) is tzinfo)
def test_pickling_subclass(self):
# Make sure we can pickle/unpickle an instance of a subclass.
offset = timedelta(minutes=-300)
orig = PicklableFixedOffset(offset, 'cookie')
self.assertIsInstance(orig, tzinfo)
self.assertTrue(type(orig) is PicklableFixedOffset)
self.assertEqual(orig.utcoffset(None), offset)
self.assertEqual(orig.tzname(None), 'cookie')
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertIsInstance(derived, tzinfo)
self.assertTrue(type(derived) is PicklableFixedOffset)
self.assertEqual(derived.utcoffset(None), offset)
self.assertEqual(derived.tzname(None), 'cookie')
#############################################################################
# Base clase for testing a particular aspect of timedelta, time, date and
# datetime comparisons.
class HarmlessMixedComparison:
# Test that __eq__ and __ne__ don't complain for mixed-type comparisons.
# Subclasses must define 'theclass', and theclass(1, 1, 1) must be a
# legit constructor.
def test_harmless_mixed_comparison(self):
me = self.theclass(1, 1, 1)
self.assertFalse(me == ())
self.assertTrue(me != ())
self.assertFalse(() == me)
self.assertTrue(() != me)
self.assertIn(me, [1, 20L, [], me])
self.assertIn([], [me, 1, 20L, []])
def test_harmful_mixed_comparison(self):
me = self.theclass(1, 1, 1)
self.assertRaises(TypeError, lambda: me < ())
self.assertRaises(TypeError, lambda: me <= ())
self.assertRaises(TypeError, lambda: me > ())
self.assertRaises(TypeError, lambda: me >= ())
self.assertRaises(TypeError, lambda: () < me)
self.assertRaises(TypeError, lambda: () <= me)
self.assertRaises(TypeError, lambda: () > me)
self.assertRaises(TypeError, lambda: () >= me)
self.assertRaises(TypeError, cmp, (), me)
self.assertRaises(TypeError, cmp, me, ())
#############################################################################
# timedelta tests
class TestTimeDelta(HarmlessMixedComparison, unittest.TestCase):
theclass = timedelta
def test_constructor(self):
eq = self.assertEqual
td = timedelta
# Check keyword args to constructor
eq(td(), td(weeks=0, days=0, hours=0, minutes=0, seconds=0,
milliseconds=0, microseconds=0))
eq(td(1), td(days=1))
eq(td(0, 1), td(seconds=1))
eq(td(0, 0, 1), td(microseconds=1))
eq(td(weeks=1), td(days=7))
eq(td(days=1), td(hours=24))
eq(td(hours=1), td(minutes=60))
eq(td(minutes=1), td(seconds=60))
eq(td(seconds=1), td(milliseconds=1000))
eq(td(milliseconds=1), td(microseconds=1000))
# Check float args to constructor
eq(td(weeks=1.0/7), td(days=1))
eq(td(days=1.0/24), td(hours=1))
eq(td(hours=1.0/60), td(minutes=1))
eq(td(minutes=1.0/60), td(seconds=1))
eq(td(seconds=0.001), td(milliseconds=1))
eq(td(milliseconds=0.001), td(microseconds=1))
def test_computations(self):
eq = self.assertEqual
td = timedelta
a = td(7) # One week
b = td(0, 60) # One minute
c = td(0, 0, 1000) # One millisecond
eq(a+b+c, td(7, 60, 1000))
eq(a-b, td(6, 24*3600 - 60))
eq(-a, td(-7))
eq(+a, td(7))
eq(-b, td(-1, 24*3600 - 60))
eq(-c, td(-1, 24*3600 - 1, 999000))
eq(abs(a), a)
eq(abs(-a), a)
eq(td(6, 24*3600), a)
eq(td(0, 0, 60*1000000), b)
eq(a*10, td(70))
eq(a*10, 10*a)
eq(a*10L, 10*a)
eq(b*10, td(0, 600))
eq(10*b, td(0, 600))
eq(b*10L, td(0, 600))
eq(c*10, td(0, 0, 10000))
eq(10*c, td(0, 0, 10000))
eq(c*10L, td(0, 0, 10000))
eq(a*-1, -a)
eq(b*-2, -b-b)
eq(c*-2, -c+-c)
eq(b*(60*24), (b*60)*24)
eq(b*(60*24), (60*b)*24)
eq(c*1000, td(0, 1))
eq(1000*c, td(0, 1))
eq(a//7, td(1))
eq(b//10, td(0, 6))
eq(c//1000, td(0, 0, 1))
eq(a//10, td(0, 7*24*360))
eq(a//3600000, td(0, 0, 7*24*1000))
# Issue #11576
eq(td(999999999, 86399, 999999) - td(999999999, 86399, 999998),
td(0, 0, 1))
eq(td(999999999, 1, 1) - td(999999999, 1, 0),
td(0, 0, 1))
def test_disallowed_computations(self):
a = timedelta(42)
# Add/sub ints, longs, floats should be illegal
for i in 1, 1L, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# Mul/div by float isn't supported.
x = 2.3
self.assertRaises(TypeError, lambda: a*x)
self.assertRaises(TypeError, lambda: x*a)
self.assertRaises(TypeError, lambda: a/x)
self.assertRaises(TypeError, lambda: x/a)
self.assertRaises(TypeError, lambda: a // x)
self.assertRaises(TypeError, lambda: x // a)
# Division of int by timedelta doesn't make sense.
# Division by zero doesn't make sense.
for zero in 0, 0L:
self.assertRaises(TypeError, lambda: zero // a)
self.assertRaises(ZeroDivisionError, lambda: a // zero)
def test_basic_attributes(self):
days, seconds, us = 1, 7, 31
td = timedelta(days, seconds, us)
self.assertEqual(td.days, days)
self.assertEqual(td.seconds, seconds)
self.assertEqual(td.microseconds, us)
def test_total_seconds(self):
td = timedelta(days=365)
self.assertEqual(td.total_seconds(), 31536000.0)
for total_seconds in [123456.789012, -123456.789012, 0.123456, 0, 1e6]:
td = timedelta(seconds=total_seconds)
self.assertEqual(td.total_seconds(), total_seconds)
# Issue8644: Test that td.total_seconds() has the same
# accuracy as td / timedelta(seconds=1).
for ms in [-1, -2, -123]:
td = timedelta(microseconds=ms)
self.assertEqual(td.total_seconds(),
((24*3600*td.days + td.seconds)*10**6
+ td.microseconds)/10**6)
def test_carries(self):
t1 = timedelta(days=100,
weeks=-7,
hours=-24*(100-49),
minutes=-3,
seconds=12,
microseconds=(3*60 - 12) * 1e6 + 1)
t2 = timedelta(microseconds=1)
self.assertEqual(t1, t2)
def test_hash_equality(self):
t1 = timedelta(days=100,
weeks=-7,
hours=-24*(100-49),
minutes=-3,
seconds=12,
microseconds=(3*60 - 12) * 1000000)
t2 = timedelta()
self.assertEqual(hash(t1), hash(t2))
t1 += timedelta(weeks=7)
t2 += timedelta(days=7*7)
self.assertEqual(t1, t2)
self.assertEqual(hash(t1), hash(t2))
d = {t1: 1}
d[t2] = 2
self.assertEqual(len(d), 1)
self.assertEqual(d[t1], 2)
def test_pickling(self):
args = 12, 34, 56
orig = timedelta(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_compare(self):
t1 = timedelta(2, 3, 4)
t2 = timedelta(2, 3, 4)
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertTrue(not t1 != t2)
self.assertTrue(not t1 < t2)
self.assertTrue(not t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for args in (3, 3, 3), (2, 4, 4), (2, 3, 5):
t2 = timedelta(*args) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertTrue(not t1 == t2)
self.assertTrue(not t2 == t1)
self.assertTrue(not t1 > t2)
self.assertTrue(not t2 < t1)
self.assertTrue(not t1 >= t2)
self.assertTrue(not t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 <= badarg)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_str(self):
td = timedelta
eq = self.assertEqual
eq(str(td(1)), "1 day, 0:00:00")
eq(str(td(-1)), "-1 day, 0:00:00")
eq(str(td(2)), "2 days, 0:00:00")
eq(str(td(-2)), "-2 days, 0:00:00")
eq(str(td(hours=12, minutes=58, seconds=59)), "12:58:59")
eq(str(td(hours=2, minutes=3, seconds=4)), "2:03:04")
eq(str(td(weeks=-30, hours=23, minutes=12, seconds=34)),
"-210 days, 23:12:34")
eq(str(td(milliseconds=1)), "0:00:00.001000")
eq(str(td(microseconds=3)), "0:00:00.000003")
eq(str(td(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)),
"999999999 days, 23:59:59.999999")
def test_roundtrip(self):
for td in (timedelta(days=999999999, hours=23, minutes=59,
seconds=59, microseconds=999999),
timedelta(days=-999999999),
timedelta(days=1, seconds=2, microseconds=3)):
# Verify td -> string -> td identity.
s = repr(td)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
td2 = eval(s)
self.assertEqual(td, td2)
# Verify identity via reconstructing from pieces.
td2 = timedelta(td.days, td.seconds, td.microseconds)
self.assertEqual(td, td2)
def test_resolution_info(self):
self.assertIsInstance(timedelta.min, timedelta)
self.assertIsInstance(timedelta.max, timedelta)
self.assertIsInstance(timedelta.resolution, timedelta)
self.assertTrue(timedelta.max > timedelta.min)
self.assertEqual(timedelta.min, timedelta(-999999999))
self.assertEqual(timedelta.max, timedelta(999999999, 24*3600-1, 1e6-1))
self.assertEqual(timedelta.resolution, timedelta(0, 0, 1))
def test_overflow(self):
tiny = timedelta.resolution
td = timedelta.min + tiny
td -= tiny # no problem
self.assertRaises(OverflowError, td.__sub__, tiny)
self.assertRaises(OverflowError, td.__add__, -tiny)
td = timedelta.max - tiny
td += tiny # no problem
self.assertRaises(OverflowError, td.__add__, tiny)
self.assertRaises(OverflowError, td.__sub__, -tiny)
self.assertRaises(OverflowError, lambda: -timedelta.max)
def test_microsecond_rounding(self):
td = timedelta
eq = self.assertEqual
# Single-field rounding.
eq(td(milliseconds=0.4/1000), td(0)) # rounds to 0
eq(td(milliseconds=-0.4/1000), td(0)) # rounds to 0
eq(td(milliseconds=0.6/1000), td(microseconds=1))
eq(td(milliseconds=-0.6/1000), td(microseconds=-1))
# Rounding due to contributions from more than one field.
us_per_hour = 3600e6
us_per_day = us_per_hour * 24
eq(td(days=.4/us_per_day), td(0))
eq(td(hours=.2/us_per_hour), td(0))
eq(td(days=.4/us_per_day, hours=.2/us_per_hour), td(microseconds=1))
eq(td(days=-.4/us_per_day), td(0))
eq(td(hours=-.2/us_per_hour), td(0))
eq(td(days=-.4/us_per_day, hours=-.2/us_per_hour), td(microseconds=-1))
def test_massive_normalization(self):
td = timedelta(microseconds=-1)
self.assertEqual((td.days, td.seconds, td.microseconds),
(-1, 24*3600-1, 999999))
def test_bool(self):
self.assertTrue(timedelta(1))
self.assertTrue(timedelta(0, 1))
self.assertTrue(timedelta(0, 0, 1))
self.assertTrue(timedelta(microseconds=1))
self.assertTrue(not timedelta(0))
def test_subclass_timedelta(self):
class T(timedelta):
@staticmethod
def from_td(td):
return T(td.days, td.seconds, td.microseconds)
def as_hours(self):
sum = (self.days * 24 +
self.seconds / 3600.0 +
self.microseconds / 3600e6)
return round(sum)
t1 = T(days=1)
self.assertTrue(type(t1) is T)
self.assertEqual(t1.as_hours(), 24)
t2 = T(days=-1, seconds=-3600)
self.assertTrue(type(t2) is T)
self.assertEqual(t2.as_hours(), -25)
t3 = t1 + t2
self.assertTrue(type(t3) is timedelta)
t4 = T.from_td(t3)
self.assertTrue(type(t4) is T)
self.assertEqual(t3.days, t4.days)
self.assertEqual(t3.seconds, t4.seconds)
self.assertEqual(t3.microseconds, t4.microseconds)
self.assertEqual(str(t3), str(t4))
self.assertEqual(t4.as_hours(), -1)
#############################################################################
# date tests
class TestDateOnly(unittest.TestCase):
# Tests here won't pass if also run on datetime objects, so don't
# subclass this to test datetimes too.
def test_delta_non_days_ignored(self):
dt = date(2000, 1, 2)
delta = timedelta(days=1, hours=2, minutes=3, seconds=4,
microseconds=5)
days = timedelta(delta.days)
self.assertEqual(days, timedelta(1))
dt2 = dt + delta
self.assertEqual(dt2, dt + days)
dt2 = delta + dt
self.assertEqual(dt2, dt + days)
dt2 = dt - delta
self.assertEqual(dt2, dt - days)
delta = -delta
days = timedelta(delta.days)
self.assertEqual(days, timedelta(-2))
dt2 = dt + delta
self.assertEqual(dt2, dt + days)
dt2 = delta + dt
self.assertEqual(dt2, dt + days)
dt2 = dt - delta
self.assertEqual(dt2, dt - days)
class SubclassDate(date):
sub_var = 1
class TestDate(HarmlessMixedComparison, unittest.TestCase):
# Tests here should pass for both dates and datetimes, except for a
# few tests that TestDateTime overrides.
theclass = date
def test_basic_attributes(self):
dt = self.theclass(2002, 3, 1)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
def test_roundtrip(self):
for dt in (self.theclass(1, 2, 3),
self.theclass.today()):
# Verify dt -> string -> date identity.
s = repr(dt)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
dt2 = eval(s)
self.assertEqual(dt, dt2)
# Verify identity via reconstructing from pieces.
dt2 = self.theclass(dt.year, dt.month, dt.day)
self.assertEqual(dt, dt2)
def test_ordinal_conversions(self):
# Check some fixed values.
for y, m, d, n in [(1, 1, 1, 1), # calendar origin
(1, 12, 31, 365),
(2, 1, 1, 366),
# first example from "Calendrical Calculations"
(1945, 11, 12, 710347)]:
d = self.theclass(y, m, d)
self.assertEqual(n, d.toordinal())
fromord = self.theclass.fromordinal(n)
self.assertEqual(d, fromord)
if hasattr(fromord, "hour"):
# if we're checking something fancier than a date, verify
# the extra fields have been zeroed out
self.assertEqual(fromord.hour, 0)
self.assertEqual(fromord.minute, 0)
self.assertEqual(fromord.second, 0)
self.assertEqual(fromord.microsecond, 0)
# Check first and last days of year spottily across the whole
# range of years supported.
for year in xrange(MINYEAR, MAXYEAR+1, 7):
# Verify (year, 1, 1) -> ordinal -> y, m, d is identity.
d = self.theclass(year, 1, 1)
n = d.toordinal()
d2 = self.theclass.fromordinal(n)
self.assertEqual(d, d2)
# Verify that moving back a day gets to the end of year-1.
if year > 1:
d = self.theclass.fromordinal(n-1)
d2 = self.theclass(year-1, 12, 31)
self.assertEqual(d, d2)
self.assertEqual(d2.toordinal(), n-1)
# Test every day in a leap-year and a non-leap year.
dim = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
for year, isleap in (2000, True), (2002, False):
n = self.theclass(year, 1, 1).toordinal()
for month, maxday in zip(range(1, 13), dim):
if month == 2 and isleap:
maxday += 1
for day in range(1, maxday+1):
d = self.theclass(year, month, day)
self.assertEqual(d.toordinal(), n)
self.assertEqual(d, self.theclass.fromordinal(n))
n += 1
def test_extreme_ordinals(self):
a = self.theclass.min
a = self.theclass(a.year, a.month, a.day) # get rid of time parts
aord = a.toordinal()
b = a.fromordinal(aord)
self.assertEqual(a, b)
self.assertRaises(ValueError, lambda: a.fromordinal(aord - 1))
b = a + timedelta(days=1)
self.assertEqual(b.toordinal(), aord + 1)
self.assertEqual(b, self.theclass.fromordinal(aord + 1))
a = self.theclass.max
a = self.theclass(a.year, a.month, a.day) # get rid of time parts
aord = a.toordinal()
b = a.fromordinal(aord)
self.assertEqual(a, b)
self.assertRaises(ValueError, lambda: a.fromordinal(aord + 1))
b = a - timedelta(days=1)
self.assertEqual(b.toordinal(), aord - 1)
self.assertEqual(b, self.theclass.fromordinal(aord - 1))
def test_bad_constructor_arguments(self):
# bad years
self.theclass(MINYEAR, 1, 1) # no exception
self.theclass(MAXYEAR, 1, 1) # no exception
self.assertRaises(ValueError, self.theclass, MINYEAR-1, 1, 1)
self.assertRaises(ValueError, self.theclass, MAXYEAR+1, 1, 1)
# bad months
self.theclass(2000, 1, 1) # no exception
self.theclass(2000, 12, 1) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 0, 1)
self.assertRaises(ValueError, self.theclass, 2000, 13, 1)
# bad days
self.theclass(2000, 2, 29) # no exception
self.theclass(2004, 2, 29) # no exception
self.theclass(2400, 2, 29) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 2, 30)
self.assertRaises(ValueError, self.theclass, 2001, 2, 29)
self.assertRaises(ValueError, self.theclass, 2100, 2, 29)
self.assertRaises(ValueError, self.theclass, 1900, 2, 29)
self.assertRaises(ValueError, self.theclass, 2000, 1, 0)
self.assertRaises(ValueError, self.theclass, 2000, 1, 32)
def test_hash_equality(self):
d = self.theclass(2000, 12, 31)
# same thing
e = self.theclass(2000, 12, 31)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(2001, 1, 1)
# same thing
e = self.theclass(2001, 1, 1)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_computations(self):
a = self.theclass(2002, 1, 31)
b = self.theclass(1956, 1, 31)
diff = a-b
self.assertEqual(diff.days, 46*365 + len(range(1956, 2002, 4)))
self.assertEqual(diff.seconds, 0)
self.assertEqual(diff.microseconds, 0)
day = timedelta(1)
week = timedelta(7)
a = self.theclass(2002, 3, 2)
self.assertEqual(a + day, self.theclass(2002, 3, 3))
self.assertEqual(day + a, self.theclass(2002, 3, 3))
self.assertEqual(a - day, self.theclass(2002, 3, 1))
self.assertEqual(-day + a, self.theclass(2002, 3, 1))
self.assertEqual(a + week, self.theclass(2002, 3, 9))
self.assertEqual(a - week, self.theclass(2002, 2, 23))
self.assertEqual(a + 52*week, self.theclass(2003, 3, 1))
self.assertEqual(a - 52*week, self.theclass(2001, 3, 3))
self.assertEqual((a + week) - a, week)
self.assertEqual((a + day) - a, day)
self.assertEqual((a - week) - a, -week)
self.assertEqual((a - day) - a, -day)
self.assertEqual(a - (a + week), -week)
self.assertEqual(a - (a + day), -day)
self.assertEqual(a - (a - week), week)
self.assertEqual(a - (a - day), day)
# Add/sub ints, longs, floats should be illegal
for i in 1, 1L, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# delta - date is senseless.
self.assertRaises(TypeError, lambda: day - a)
# mixing date and (delta or date) via * or // is senseless
self.assertRaises(TypeError, lambda: day * a)
self.assertRaises(TypeError, lambda: a * day)
self.assertRaises(TypeError, lambda: day // a)
self.assertRaises(TypeError, lambda: a // day)
self.assertRaises(TypeError, lambda: a * a)
self.assertRaises(TypeError, lambda: a // a)
# date + date is senseless
self.assertRaises(TypeError, lambda: a + a)
def test_overflow(self):
tiny = self.theclass.resolution
for delta in [tiny, timedelta(1), timedelta(2)]:
dt = self.theclass.min + delta
dt -= delta # no problem
self.assertRaises(OverflowError, dt.__sub__, delta)
self.assertRaises(OverflowError, dt.__add__, -delta)
dt = self.theclass.max - delta
dt += delta # no problem
self.assertRaises(OverflowError, dt.__add__, delta)
self.assertRaises(OverflowError, dt.__sub__, -delta)
def test_fromtimestamp(self):
import time
# Try an arbitrary fixed value.
year, month, day = 1999, 9, 19
ts = time.mktime((year, month, day, 0, 0, 0, 0, 0, -1))
d = self.theclass.fromtimestamp(ts)
self.assertEqual(d.year, year)
self.assertEqual(d.month, month)
self.assertEqual(d.day, day)
def test_insane_fromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.fromtimestamp,
insane)
def test_today(self):
import time
# We claim that today() is like fromtimestamp(time.time()), so
# prove it.
for dummy in range(3):
today = self.theclass.today()
ts = time.time()
todayagain = self.theclass.fromtimestamp(ts)
if today == todayagain:
break
# There are several legit reasons that could fail:
# 1. It recently became midnight, between the today() and the
# time() calls.
# 2. The platform time() has such fine resolution that we'll
# never get the same value twice.
# 3. The platform time() has poor resolution, and we just
# happened to call today() right before a resolution quantum
# boundary.
# 4. The system clock got fiddled between calls.
# In any case, wait a little while and try again.
time.sleep(0.1)
# It worked or it didn't. If it didn't, assume it's reason #2, and
# let the test pass if they're within half a second of each other.
self.assertTrue(today == todayagain or
abs(todayagain - today) < timedelta(seconds=0.5))
def test_weekday(self):
for i in range(7):
# March 4, 2002 is a Monday
self.assertEqual(self.theclass(2002, 3, 4+i).weekday(), i)
self.assertEqual(self.theclass(2002, 3, 4+i).isoweekday(), i+1)
# January 2, 1956 is a Monday
self.assertEqual(self.theclass(1956, 1, 2+i).weekday(), i)
self.assertEqual(self.theclass(1956, 1, 2+i).isoweekday(), i+1)
def test_isocalendar(self):
# Check examples from
# http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
for i in range(7):
d = self.theclass(2003, 12, 22+i)
self.assertEqual(d.isocalendar(), (2003, 52, i+1))
d = self.theclass(2003, 12, 29) + timedelta(i)
self.assertEqual(d.isocalendar(), (2004, 1, i+1))
d = self.theclass(2004, 1, 5+i)
self.assertEqual(d.isocalendar(), (2004, 2, i+1))
d = self.theclass(2009, 12, 21+i)
self.assertEqual(d.isocalendar(), (2009, 52, i+1))
d = self.theclass(2009, 12, 28) + timedelta(i)
self.assertEqual(d.isocalendar(), (2009, 53, i+1))
d = self.theclass(2010, 1, 4+i)
self.assertEqual(d.isocalendar(), (2010, 1, i+1))
def test_iso_long_years(self):
# Calculate long ISO years and compare to table from
# http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
ISO_LONG_YEARS_TABLE = """
4 32 60 88
9 37 65 93
15 43 71 99
20 48 76
26 54 82
105 133 161 189
111 139 167 195
116 144 172
122 150 178
128 156 184
201 229 257 285
207 235 263 291
212 240 268 296
218 246 274
224 252 280
303 331 359 387
308 336 364 392
314 342 370 398
320 348 376
325 353 381
"""
iso_long_years = map(int, ISO_LONG_YEARS_TABLE.split())
iso_long_years.sort()
L = []
for i in range(400):
d = self.theclass(2000+i, 12, 31)
d1 = self.theclass(1600+i, 12, 31)
self.assertEqual(d.isocalendar()[1:], d1.isocalendar()[1:])
if d.isocalendar()[1] == 53:
L.append(i)
self.assertEqual(L, iso_long_years)
def test_isoformat(self):
t = self.theclass(2, 3, 2)
self.assertEqual(t.isoformat(), "0002-03-02")
def test_ctime(self):
t = self.theclass(2002, 3, 2)
self.assertEqual(t.ctime(), "Sat Mar 2 00:00:00 2002")
def test_strftime(self):
t = self.theclass(2005, 3, 2)
self.assertEqual(t.strftime("m:%m d:%d y:%y"), "m:03 d:02 y:05")
self.assertEqual(t.strftime(""), "") # SF bug #761337
self.assertEqual(t.strftime('x'*1000), 'x'*1000) # SF bug #1556784
self.assertRaises(TypeError, t.strftime) # needs an arg
self.assertRaises(TypeError, t.strftime, "one", "two") # too many args
self.assertRaises(TypeError, t.strftime, 42) # arg wrong type
# test that unicode input is allowed (issue 2782)
self.assertEqual(t.strftime(u"%m"), "03")
# A naive object replaces %z and %Z w/ empty strings.
self.assertEqual(t.strftime("'%z' '%Z'"), "'' ''")
#make sure that invalid format specifiers are handled correctly
#self.assertRaises(ValueError, t.strftime, "%e")
#self.assertRaises(ValueError, t.strftime, "%")
#self.assertRaises(ValueError, t.strftime, "%#")
#oh well, some systems just ignore those invalid ones.
#at least, excercise them to make sure that no crashes
#are generated
for f in ["%e", "%", "%#"]:
try:
t.strftime(f)
except ValueError:
pass
#check that this standard extension works
t.strftime("%f")
def test_format(self):
dt = self.theclass(2007, 9, 10)
self.assertEqual(dt.__format__(''), str(dt))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(2007, 9, 10)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(2007, 9, 10)
self.assertEqual(b.__format__(''), str(dt))
for fmt in ["m:%m d:%d y:%y",
"m:%m d:%d y:%y H:%H M:%M S:%S",
"%z %Z",
]:
self.assertEqual(dt.__format__(fmt), dt.strftime(fmt))
self.assertEqual(a.__format__(fmt), dt.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_resolution_info(self):
self.assertIsInstance(self.theclass.min, self.theclass)
self.assertIsInstance(self.theclass.max, self.theclass)
self.assertIsInstance(self.theclass.resolution, timedelta)
self.assertTrue(self.theclass.max > self.theclass.min)
def test_extreme_timedelta(self):
big = self.theclass.max - self.theclass.min
# 3652058 days, 23 hours, 59 minutes, 59 seconds, 999999 microseconds
n = (big.days*24*3600 + big.seconds)*1000000 + big.microseconds
# n == 315537897599999999 ~= 2**58.13
justasbig = timedelta(0, 0, n)
self.assertEqual(big, justasbig)
self.assertEqual(self.theclass.min + big, self.theclass.max)
self.assertEqual(self.theclass.max - big, self.theclass.min)
def test_timetuple(self):
for i in range(7):
# January 2, 1956 is a Monday (0)
d = self.theclass(1956, 1, 2+i)
t = d.timetuple()
self.assertEqual(t, (1956, 1, 2+i, 0, 0, 0, i, 2+i, -1))
# February 1, 1956 is a Wednesday (2)
d = self.theclass(1956, 2, 1+i)
t = d.timetuple()
self.assertEqual(t, (1956, 2, 1+i, 0, 0, 0, (2+i)%7, 32+i, -1))
# March 1, 1956 is a Thursday (3), and is the 31+29+1 = 61st day
# of the year.
d = self.theclass(1956, 3, 1+i)
t = d.timetuple()
self.assertEqual(t, (1956, 3, 1+i, 0, 0, 0, (3+i)%7, 61+i, -1))
self.assertEqual(t.tm_year, 1956)
self.assertEqual(t.tm_mon, 3)
self.assertEqual(t.tm_mday, 1+i)
self.assertEqual(t.tm_hour, 0)
self.assertEqual(t.tm_min, 0)
self.assertEqual(t.tm_sec, 0)
self.assertEqual(t.tm_wday, (3+i)%7)
self.assertEqual(t.tm_yday, 61+i)
self.assertEqual(t.tm_isdst, -1)
def test_pickling(self):
args = 6, 7, 23
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_compare(self):
t1 = self.theclass(2, 3, 4)
t2 = self.theclass(2, 3, 4)
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertTrue(not t1 != t2)
self.assertTrue(not t1 < t2)
self.assertTrue(not t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for args in (3, 3, 3), (2, 4, 4), (2, 3, 5):
t2 = self.theclass(*args) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertTrue(not t1 == t2)
self.assertTrue(not t2 == t1)
self.assertTrue(not t1 > t2)
self.assertTrue(not t2 < t1)
self.assertTrue(not t1 >= t2)
self.assertTrue(not t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_mixed_compare(self):
our = self.theclass(2000, 4, 5)
self.assertRaises(TypeError, cmp, our, 1)
self.assertRaises(TypeError, cmp, 1, our)
class AnotherDateTimeClass(object):
def __cmp__(self, other):
# Return "equal" so calling this can't be confused with
# compare-by-address (which never says "equal" for distinct
# objects).
return 0
__hash__ = None # Silence Py3k warning
# This still errors, because date and datetime comparison raise
# TypeError instead of NotImplemented when they don't know what to
# do, in order to stop comparison from falling back to the default
# compare-by-address.
their = AnotherDateTimeClass()
self.assertRaises(TypeError, cmp, our, their)
# Oops: The next stab raises TypeError in the C implementation,
# but not in the Python implementation of datetime. The difference
# is due to that the Python implementation defines __cmp__ but
# the C implementation defines tp_richcompare. This is more pain
# to fix than it's worth, so commenting out the test.
# self.assertEqual(cmp(their, our), 0)
# But date and datetime comparison return NotImplemented instead if the
# other object has a timetuple attr. This gives the other object a
# chance to do the comparison.
class Comparable(AnotherDateTimeClass):
def timetuple(self):
return ()
their = Comparable()
self.assertEqual(cmp(our, their), 0)
self.assertEqual(cmp(their, our), 0)
self.assertTrue(our == their)
self.assertTrue(their == our)
def test_bool(self):
# All dates are considered true.
self.assertTrue(self.theclass.min)
self.assertTrue(self.theclass.max)
def test_strftime_out_of_range(self):
# For nasty technical reasons, we can't handle years before 1900.
cls = self.theclass
self.assertEqual(cls(1900, 1, 1).strftime("%Y"), "1900")
for y in 1, 49, 51, 99, 100, 1000, 1899:
self.assertRaises(ValueError, cls(y, 1, 1).strftime, "%Y")
def test_replace(self):
cls = self.theclass
args = [1, 2, 3]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_subclass_date(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.year + self.month
args = 2003, 4, 14
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.toordinal(), dt2.toordinal())
self.assertEqual(dt2.newmeth(-7), dt1.year + dt1.month - 7)
def test_pickling_subclass_date(self):
args = 6, 7, 23
orig = SubclassDate(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_backdoor_resistance(self):
# For fast unpickling, the constructor accepts a pickle string.
# This is a low-overhead backdoor. A user can (by intent or
# mistake) pass a string directly, which (if it's the right length)
# will get treated like a pickle, and bypass the normal sanity
# checks in the constructor. This can create insane objects.
# The constructor doesn't want to burn the time to validate all
# fields, but does check the month field. This stops, e.g.,
# datetime.datetime('1995-03-25') from yielding an insane object.
base = '1995-03-25'
if not issubclass(self.theclass, datetime):
base = base[:4]
for month_byte in '9', chr(0), chr(13), '\xff':
self.assertRaises(TypeError, self.theclass,
base[:2] + month_byte + base[3:])
for ord_byte in range(1, 13):
# This shouldn't blow up because of the month byte alone. If
# the implementation changes to do more-careful checking, it may
# blow up because other fields are insane.
self.theclass(base[:2] + chr(ord_byte) + base[3:])
#############################################################################
# datetime tests
class SubclassDatetime(datetime):
sub_var = 1
class TestDateTime(TestDate):
theclass = datetime
def test_basic_attributes(self):
dt = self.theclass(2002, 3, 1, 12, 0)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
self.assertEqual(dt.hour, 12)
self.assertEqual(dt.minute, 0)
self.assertEqual(dt.second, 0)
self.assertEqual(dt.microsecond, 0)
def test_basic_attributes_nonzero(self):
# Make sure all attributes are non-zero so bugs in
# bit-shifting access show up.
dt = self.theclass(2002, 3, 1, 12, 59, 59, 8000)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
self.assertEqual(dt.hour, 12)
self.assertEqual(dt.minute, 59)
self.assertEqual(dt.second, 59)
self.assertEqual(dt.microsecond, 8000)
def test_roundtrip(self):
for dt in (self.theclass(1, 2, 3, 4, 5, 6, 7),
self.theclass.now()):
# Verify dt -> string -> datetime identity.
s = repr(dt)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
dt2 = eval(s)
self.assertEqual(dt, dt2)
# Verify identity via reconstructing from pieces.
dt2 = self.theclass(dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.microsecond)
self.assertEqual(dt, dt2)
def test_isoformat(self):
t = self.theclass(2, 3, 2, 4, 5, 1, 123)
self.assertEqual(t.isoformat(), "0002-03-02T04:05:01.000123")
self.assertEqual(t.isoformat('T'), "0002-03-02T04:05:01.000123")
self.assertEqual(t.isoformat(' '), "0002-03-02 04:05:01.000123")
self.assertEqual(t.isoformat('\x00'), "0002-03-02\x0004:05:01.000123")
# str is ISO format with the separator forced to a blank.
self.assertEqual(str(t), "0002-03-02 04:05:01.000123")
t = self.theclass(2, 3, 2)
self.assertEqual(t.isoformat(), "0002-03-02T00:00:00")
self.assertEqual(t.isoformat('T'), "0002-03-02T00:00:00")
self.assertEqual(t.isoformat(' '), "0002-03-02 00:00:00")
# str is ISO format with the separator forced to a blank.
self.assertEqual(str(t), "0002-03-02 00:00:00")
def test_format(self):
dt = self.theclass(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(dt.__format__(''), str(dt))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(b.__format__(''), str(dt))
for fmt in ["m:%m d:%d y:%y",
"m:%m d:%d y:%y H:%H M:%M S:%S",
"%z %Z",
]:
self.assertEqual(dt.__format__(fmt), dt.strftime(fmt))
self.assertEqual(a.__format__(fmt), dt.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_more_ctime(self):
# Test fields that TestDate doesn't touch.
import time
t = self.theclass(2002, 3, 2, 18, 3, 5, 123)
self.assertEqual(t.ctime(), "Sat Mar 2 18:03:05 2002")
# Oops! The next line fails on Win2K under MSVC 6, so it's commented
# out. The difference is that t.ctime() produces " 2" for the day,
# but platform ctime() produces "02" for the day. According to
# C99, t.ctime() is correct here.
# self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple())))
# So test a case where that difference doesn't matter.
t = self.theclass(2002, 3, 22, 18, 3, 5, 123)
self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple())))
def test_tz_independent_comparing(self):
dt1 = self.theclass(2002, 3, 1, 9, 0, 0)
dt2 = self.theclass(2002, 3, 1, 10, 0, 0)
dt3 = self.theclass(2002, 3, 1, 9, 0, 0)
self.assertEqual(dt1, dt3)
self.assertTrue(dt2 > dt3)
# Make sure comparison doesn't forget microseconds, and isn't done
# via comparing a float timestamp (an IEEE double doesn't have enough
# precision to span microsecond resolution across years 1 thru 9999,
# so comparing via timestamp necessarily calls some distinct values
# equal).
dt1 = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999998)
us = timedelta(microseconds=1)
dt2 = dt1 + us
self.assertEqual(dt2 - dt1, us)
self.assertTrue(dt1 < dt2)
def test_strftime_with_bad_tzname_replace(self):
# verify ok if tzinfo.tzname().replace() returns a non-string
class MyTzInfo(FixedOffset):
def tzname(self, dt):
class MyStr(str):
def replace(self, *args):
return None
return MyStr('name')
t = self.theclass(2005, 3, 2, 0, 0, 0, 0, MyTzInfo(3, 'name'))
self.assertRaises(TypeError, t.strftime, '%Z')
def test_bad_constructor_arguments(self):
# bad years
self.theclass(MINYEAR, 1, 1) # no exception
self.theclass(MAXYEAR, 1, 1) # no exception
self.assertRaises(ValueError, self.theclass, MINYEAR-1, 1, 1)
self.assertRaises(ValueError, self.theclass, MAXYEAR+1, 1, 1)
# bad months
self.theclass(2000, 1, 1) # no exception
self.theclass(2000, 12, 1) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 0, 1)
self.assertRaises(ValueError, self.theclass, 2000, 13, 1)
# bad days
self.theclass(2000, 2, 29) # no exception
self.theclass(2004, 2, 29) # no exception
self.theclass(2400, 2, 29) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 2, 30)
self.assertRaises(ValueError, self.theclass, 2001, 2, 29)
self.assertRaises(ValueError, self.theclass, 2100, 2, 29)
self.assertRaises(ValueError, self.theclass, 1900, 2, 29)
self.assertRaises(ValueError, self.theclass, 2000, 1, 0)
self.assertRaises(ValueError, self.theclass, 2000, 1, 32)
# bad hours
self.theclass(2000, 1, 31, 0) # no exception
self.theclass(2000, 1, 31, 23) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 24)
# bad minutes
self.theclass(2000, 1, 31, 23, 0) # no exception
self.theclass(2000, 1, 31, 23, 59) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 60)
# bad seconds
self.theclass(2000, 1, 31, 23, 59, 0) # no exception
self.theclass(2000, 1, 31, 23, 59, 59) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, 60)
# bad microseconds
self.theclass(2000, 1, 31, 23, 59, 59, 0) # no exception
self.theclass(2000, 1, 31, 23, 59, 59, 999999) # no exception
self.assertRaises(ValueError, self.theclass,
2000, 1, 31, 23, 59, 59, -1)
self.assertRaises(ValueError, self.theclass,
2000, 1, 31, 23, 59, 59,
1000000)
def test_hash_equality(self):
d = self.theclass(2000, 12, 31, 23, 30, 17)
e = self.theclass(2000, 12, 31, 23, 30, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(2001, 1, 1, 0, 5, 17)
e = self.theclass(2001, 1, 1, 0, 5, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_computations(self):
a = self.theclass(2002, 1, 31)
b = self.theclass(1956, 1, 31)
diff = a-b
self.assertEqual(diff.days, 46*365 + len(range(1956, 2002, 4)))
self.assertEqual(diff.seconds, 0)
self.assertEqual(diff.microseconds, 0)
a = self.theclass(2002, 3, 2, 17, 6)
millisec = timedelta(0, 0, 1000)
hour = timedelta(0, 3600)
day = timedelta(1)
week = timedelta(7)
self.assertEqual(a + hour, self.theclass(2002, 3, 2, 18, 6))
self.assertEqual(hour + a, self.theclass(2002, 3, 2, 18, 6))
self.assertEqual(a + 10*hour, self.theclass(2002, 3, 3, 3, 6))
self.assertEqual(a - hour, self.theclass(2002, 3, 2, 16, 6))
self.assertEqual(-hour + a, self.theclass(2002, 3, 2, 16, 6))
self.assertEqual(a - hour, a + -hour)
self.assertEqual(a - 20*hour, self.theclass(2002, 3, 1, 21, 6))
self.assertEqual(a + day, self.theclass(2002, 3, 3, 17, 6))
self.assertEqual(a - day, self.theclass(2002, 3, 1, 17, 6))
self.assertEqual(a + week, self.theclass(2002, 3, 9, 17, 6))
self.assertEqual(a - week, self.theclass(2002, 2, 23, 17, 6))
self.assertEqual(a + 52*week, self.theclass(2003, 3, 1, 17, 6))
self.assertEqual(a - 52*week, self.theclass(2001, 3, 3, 17, 6))
self.assertEqual((a + week) - a, week)
self.assertEqual((a + day) - a, day)
self.assertEqual((a + hour) - a, hour)
self.assertEqual((a + millisec) - a, millisec)
self.assertEqual((a - week) - a, -week)
self.assertEqual((a - day) - a, -day)
self.assertEqual((a - hour) - a, -hour)
self.assertEqual((a - millisec) - a, -millisec)
self.assertEqual(a - (a + week), -week)
self.assertEqual(a - (a + day), -day)
self.assertEqual(a - (a + hour), -hour)
self.assertEqual(a - (a + millisec), -millisec)
self.assertEqual(a - (a - week), week)
self.assertEqual(a - (a - day), day)
self.assertEqual(a - (a - hour), hour)
self.assertEqual(a - (a - millisec), millisec)
self.assertEqual(a + (week + day + hour + millisec),
self.theclass(2002, 3, 10, 18, 6, 0, 1000))
self.assertEqual(a + (week + day + hour + millisec),
(((a + week) + day) + hour) + millisec)
self.assertEqual(a - (week + day + hour + millisec),
self.theclass(2002, 2, 22, 16, 5, 59, 999000))
self.assertEqual(a - (week + day + hour + millisec),
(((a - week) - day) - hour) - millisec)
# Add/sub ints, longs, floats should be illegal
for i in 1, 1L, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# delta - datetime is senseless.
self.assertRaises(TypeError, lambda: day - a)
# mixing datetime and (delta or datetime) via * or // is senseless
self.assertRaises(TypeError, lambda: day * a)
self.assertRaises(TypeError, lambda: a * day)
self.assertRaises(TypeError, lambda: day // a)
self.assertRaises(TypeError, lambda: a // day)
self.assertRaises(TypeError, lambda: a * a)
self.assertRaises(TypeError, lambda: a // a)
# datetime + datetime is senseless
self.assertRaises(TypeError, lambda: a + a)
def test_pickling(self):
args = 6, 7, 23, 20, 59, 1, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_more_pickling(self):
a = self.theclass(2003, 2, 7, 16, 48, 37, 444116)
s = pickle.dumps(a)
b = pickle.loads(s)
self.assertEqual(b.year, 2003)
self.assertEqual(b.month, 2)
self.assertEqual(b.day, 7)
def test_pickling_subclass_datetime(self):
args = 6, 7, 23, 20, 59, 1, 64**2
orig = SubclassDatetime(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_more_compare(self):
# The test_compare() inherited from TestDate covers the error cases.
# We just want to test lexicographic ordering on the members datetime
# has that date lacks.
args = [2000, 11, 29, 20, 58, 16, 999998]
t1 = self.theclass(*args)
t2 = self.theclass(*args)
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertTrue(not t1 != t2)
self.assertTrue(not t1 < t2)
self.assertTrue(not t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for i in range(len(args)):
newargs = args[:]
newargs[i] = args[i] + 1
t2 = self.theclass(*newargs) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertTrue(not t1 == t2)
self.assertTrue(not t2 == t1)
self.assertTrue(not t1 > t2)
self.assertTrue(not t2 < t1)
self.assertTrue(not t1 >= t2)
self.assertTrue(not t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
# A helper for timestamp constructor tests.
def verify_field_equality(self, expected, got):
self.assertEqual(expected.tm_year, got.year)
self.assertEqual(expected.tm_mon, got.month)
self.assertEqual(expected.tm_mday, got.day)
self.assertEqual(expected.tm_hour, got.hour)
self.assertEqual(expected.tm_min, got.minute)
self.assertEqual(expected.tm_sec, got.second)
def test_fromtimestamp(self):
import time
ts = time.time()
expected = time.localtime(ts)
got = self.theclass.fromtimestamp(ts)
self.verify_field_equality(expected, got)
def test_utcfromtimestamp(self):
import time
ts = time.time()
expected = time.gmtime(ts)
got = self.theclass.utcfromtimestamp(ts)
self.verify_field_equality(expected, got)
def test_microsecond_rounding(self):
# Test whether fromtimestamp "rounds up" floats that are less
# than one microsecond smaller than an integer.
self.assertEqual(self.theclass.fromtimestamp(0.9999999),
self.theclass.fromtimestamp(1))
def test_insane_fromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.fromtimestamp,
insane)
def test_insane_utcfromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.utcfromtimestamp,
insane)
@unittest.skipIf(sys.platform == "win32", "Windows doesn't accept negative timestamps")
def test_negative_float_fromtimestamp(self):
# The result is tz-dependent; at least test that this doesn't
# fail (like it did before bug 1646728 was fixed).
self.theclass.fromtimestamp(-1.05)
@unittest.skipIf(sys.platform == "win32", "Windows doesn't accept negative timestamps")
def test_negative_float_utcfromtimestamp(self):
d = self.theclass.utcfromtimestamp(-1.05)
self.assertEqual(d, self.theclass(1969, 12, 31, 23, 59, 58, 950000))
def test_utcnow(self):
import time
# Call it a success if utcnow() and utcfromtimestamp() are within
# a second of each other.
tolerance = timedelta(seconds=1)
for dummy in range(3):
from_now = self.theclass.utcnow()
from_timestamp = self.theclass.utcfromtimestamp(time.time())
if abs(from_timestamp - from_now) <= tolerance:
break
# Else try again a few times.
self.assertTrue(abs(from_timestamp - from_now) <= tolerance)
def test_strptime(self):
import _strptime
string = '2004-12-01 13:02:47.197'
format = '%Y-%m-%d %H:%M:%S.%f'
result, frac = _strptime._strptime(string, format)
expected = self.theclass(*(result[0:6]+(frac,)))
got = self.theclass.strptime(string, format)
self.assertEqual(expected, got)
def test_more_timetuple(self):
# This tests fields beyond those tested by the TestDate.test_timetuple.
t = self.theclass(2004, 12, 31, 6, 22, 33)
self.assertEqual(t.timetuple(), (2004, 12, 31, 6, 22, 33, 4, 366, -1))
self.assertEqual(t.timetuple(),
(t.year, t.month, t.day,
t.hour, t.minute, t.second,
t.weekday(),
t.toordinal() - date(t.year, 1, 1).toordinal() + 1,
-1))
tt = t.timetuple()
self.assertEqual(tt.tm_year, t.year)
self.assertEqual(tt.tm_mon, t.month)
self.assertEqual(tt.tm_mday, t.day)
self.assertEqual(tt.tm_hour, t.hour)
self.assertEqual(tt.tm_min, t.minute)
self.assertEqual(tt.tm_sec, t.second)
self.assertEqual(tt.tm_wday, t.weekday())
self.assertEqual(tt.tm_yday, t.toordinal() -
date(t.year, 1, 1).toordinal() + 1)
self.assertEqual(tt.tm_isdst, -1)
def test_more_strftime(self):
# This tests fields beyond those tested by the TestDate.test_strftime.
t = self.theclass(2004, 12, 31, 6, 22, 33, 47)
self.assertEqual(t.strftime("%m %d %y %f %S %M %H %j"),
"12 31 04 000047 33 22 06 366")
def test_extract(self):
dt = self.theclass(2002, 3, 4, 18, 45, 3, 1234)
self.assertEqual(dt.date(), date(2002, 3, 4))
self.assertEqual(dt.time(), time(18, 45, 3, 1234))
def test_combine(self):
d = date(2002, 3, 4)
t = time(18, 45, 3, 1234)
expected = self.theclass(2002, 3, 4, 18, 45, 3, 1234)
combine = self.theclass.combine
dt = combine(d, t)
self.assertEqual(dt, expected)
dt = combine(time=t, date=d)
self.assertEqual(dt, expected)
self.assertEqual(d, dt.date())
self.assertEqual(t, dt.time())
self.assertEqual(dt, combine(dt.date(), dt.time()))
self.assertRaises(TypeError, combine) # need an arg
self.assertRaises(TypeError, combine, d) # need two args
self.assertRaises(TypeError, combine, t, d) # args reversed
self.assertRaises(TypeError, combine, d, t, 1) # too many args
self.assertRaises(TypeError, combine, "date", "time") # wrong types
def test_replace(self):
cls = self.theclass
args = [1, 2, 3, 4, 5, 6, 7]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4),
("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_astimezone(self):
# Pretty boring! The TZ test is more interesting here. astimezone()
# simply can't be applied to a naive object.
dt = self.theclass.now()
f = FixedOffset(44, "")
self.assertRaises(TypeError, dt.astimezone) # not enough args
self.assertRaises(TypeError, dt.astimezone, f, f) # too many args
self.assertRaises(TypeError, dt.astimezone, dt) # arg wrong type
self.assertRaises(ValueError, dt.astimezone, f) # naive
self.assertRaises(ValueError, dt.astimezone, tz=f) # naive
class Bogus(tzinfo):
def utcoffset(self, dt): return None
def dst(self, dt): return timedelta(0)
bog = Bogus()
self.assertRaises(ValueError, dt.astimezone, bog) # naive
class AlsoBogus(tzinfo):
def utcoffset(self, dt): return timedelta(0)
def dst(self, dt): return None
alsobog = AlsoBogus()
self.assertRaises(ValueError, dt.astimezone, alsobog) # also naive
def test_subclass_datetime(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.year + self.month + self.second
args = 2003, 4, 14, 12, 13, 41
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.toordinal(), dt2.toordinal())
self.assertEqual(dt2.newmeth(-7), dt1.year + dt1.month +
dt1.second - 7)
class SubclassTime(time):
sub_var = 1
class TestTime(HarmlessMixedComparison, unittest.TestCase):
theclass = time
def test_basic_attributes(self):
t = self.theclass(12, 0)
self.assertEqual(t.hour, 12)
self.assertEqual(t.minute, 0)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 0)
def test_basic_attributes_nonzero(self):
# Make sure all attributes are non-zero so bugs in
# bit-shifting access show up.
t = self.theclass(12, 59, 59, 8000)
self.assertEqual(t.hour, 12)
self.assertEqual(t.minute, 59)
self.assertEqual(t.second, 59)
self.assertEqual(t.microsecond, 8000)
def test_roundtrip(self):
t = self.theclass(1, 2, 3, 4)
# Verify t -> string -> time identity.
s = repr(t)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
t2 = eval(s)
self.assertEqual(t, t2)
# Verify identity via reconstructing from pieces.
t2 = self.theclass(t.hour, t.minute, t.second,
t.microsecond)
self.assertEqual(t, t2)
def test_comparing(self):
args = [1, 2, 3, 4]
t1 = self.theclass(*args)
t2 = self.theclass(*args)
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertTrue(not t1 != t2)
self.assertTrue(not t1 < t2)
self.assertTrue(not t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for i in range(len(args)):
newargs = args[:]
newargs[i] = args[i] + 1
t2 = self.theclass(*newargs) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertTrue(not t1 == t2)
self.assertTrue(not t2 == t1)
self.assertTrue(not t1 > t2)
self.assertTrue(not t2 < t1)
self.assertTrue(not t1 >= t2)
self.assertTrue(not t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 <= badarg)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_bad_constructor_arguments(self):
# bad hours
self.theclass(0, 0) # no exception
self.theclass(23, 0) # no exception
self.assertRaises(ValueError, self.theclass, -1, 0)
self.assertRaises(ValueError, self.theclass, 24, 0)
# bad minutes
self.theclass(23, 0) # no exception
self.theclass(23, 59) # no exception
self.assertRaises(ValueError, self.theclass, 23, -1)
self.assertRaises(ValueError, self.theclass, 23, 60)
# bad seconds
self.theclass(23, 59, 0) # no exception
self.theclass(23, 59, 59) # no exception
self.assertRaises(ValueError, self.theclass, 23, 59, -1)
self.assertRaises(ValueError, self.theclass, 23, 59, 60)
# bad microseconds
self.theclass(23, 59, 59, 0) # no exception
self.theclass(23, 59, 59, 999999) # no exception
self.assertRaises(ValueError, self.theclass, 23, 59, 59, -1)
self.assertRaises(ValueError, self.theclass, 23, 59, 59, 1000000)
def test_hash_equality(self):
d = self.theclass(23, 30, 17)
e = self.theclass(23, 30, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(0, 5, 17)
e = self.theclass(0, 5, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_isoformat(self):
t = self.theclass(4, 5, 1, 123)
self.assertEqual(t.isoformat(), "04:05:01.000123")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass()
self.assertEqual(t.isoformat(), "00:00:00")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=1)
self.assertEqual(t.isoformat(), "00:00:00.000001")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=10)
self.assertEqual(t.isoformat(), "00:00:00.000010")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=100)
self.assertEqual(t.isoformat(), "00:00:00.000100")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=1000)
self.assertEqual(t.isoformat(), "00:00:00.001000")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=10000)
self.assertEqual(t.isoformat(), "00:00:00.010000")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=100000)
self.assertEqual(t.isoformat(), "00:00:00.100000")
self.assertEqual(t.isoformat(), str(t))
def test_1653736(self):
# verify it doesn't accept extra keyword arguments
t = self.theclass(second=1)
self.assertRaises(TypeError, t.isoformat, foo=3)
def test_strftime(self):
t = self.theclass(1, 2, 3, 4)
self.assertEqual(t.strftime('%H %M %S %f'), "01 02 03 000004")
# A naive object replaces %z and %Z with empty strings.
self.assertEqual(t.strftime("'%z' '%Z'"), "'' ''")
def test_format(self):
t = self.theclass(1, 2, 3, 4)
self.assertEqual(t.__format__(''), str(t))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(1, 2, 3, 4)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(1, 2, 3, 4)
self.assertEqual(b.__format__(''), str(t))
for fmt in ['%H %M %S',
]:
self.assertEqual(t.__format__(fmt), t.strftime(fmt))
self.assertEqual(a.__format__(fmt), t.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_str(self):
self.assertEqual(str(self.theclass(1, 2, 3, 4)), "01:02:03.000004")
self.assertEqual(str(self.theclass(10, 2, 3, 4000)), "10:02:03.004000")
self.assertEqual(str(self.theclass(0, 2, 3, 400000)), "00:02:03.400000")
self.assertEqual(str(self.theclass(12, 2, 3, 0)), "12:02:03")
self.assertEqual(str(self.theclass(23, 15, 0, 0)), "23:15:00")
def test_repr(self):
name = 'datetime.' + self.theclass.__name__
self.assertEqual(repr(self.theclass(1, 2, 3, 4)),
"%s(1, 2, 3, 4)" % name)
self.assertEqual(repr(self.theclass(10, 2, 3, 4000)),
"%s(10, 2, 3, 4000)" % name)
self.assertEqual(repr(self.theclass(0, 2, 3, 400000)),
"%s(0, 2, 3, 400000)" % name)
self.assertEqual(repr(self.theclass(12, 2, 3, 0)),
"%s(12, 2, 3)" % name)
self.assertEqual(repr(self.theclass(23, 15, 0, 0)),
"%s(23, 15)" % name)
def test_resolution_info(self):
self.assertIsInstance(self.theclass.min, self.theclass)
self.assertIsInstance(self.theclass.max, self.theclass)
self.assertIsInstance(self.theclass.resolution, timedelta)
self.assertTrue(self.theclass.max > self.theclass.min)
def test_pickling(self):
args = 20, 59, 16, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_pickling_subclass_time(self):
args = 20, 59, 16, 64**2
orig = SubclassTime(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_bool(self):
cls = self.theclass
self.assertTrue(cls(1))
self.assertTrue(cls(0, 1))
self.assertTrue(cls(0, 0, 1))
self.assertTrue(cls(0, 0, 0, 1))
self.assertTrue(not cls(0))
self.assertTrue(not cls())
def test_replace(self):
cls = self.theclass
args = [1, 2, 3, 4]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(1)
self.assertRaises(ValueError, base.replace, hour=24)
self.assertRaises(ValueError, base.replace, minute=-1)
self.assertRaises(ValueError, base.replace, second=100)
self.assertRaises(ValueError, base.replace, microsecond=1000000)
def test_subclass_time(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.second
args = 4, 5, 6
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.isoformat(), dt2.isoformat())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.second - 7)
def test_backdoor_resistance(self):
# see TestDate.test_backdoor_resistance().
base = '2:59.0'
for hour_byte in ' ', '9', chr(24), '\xff':
self.assertRaises(TypeError, self.theclass,
hour_byte + base[1:])
# A mixin for classes with a tzinfo= argument. Subclasses must define
# theclass as a class atribute, and theclass(1, 1, 1, tzinfo=whatever)
# must be legit (which is true for time and datetime).
class TZInfoBase:
def test_argument_passing(self):
cls = self.theclass
# A datetime passes itself on, a time passes None.
class introspective(tzinfo):
def tzname(self, dt): return dt and "real" or "none"
def utcoffset(self, dt):
return timedelta(minutes = dt and 42 or -42)
dst = utcoffset
obj = cls(1, 2, 3, tzinfo=introspective())
expected = cls is time and "none" or "real"
self.assertEqual(obj.tzname(), expected)
expected = timedelta(minutes=(cls is time and -42 or 42))
self.assertEqual(obj.utcoffset(), expected)
self.assertEqual(obj.dst(), expected)
def test_bad_tzinfo_classes(self):
cls = self.theclass
self.assertRaises(TypeError, cls, 1, 1, 1, tzinfo=12)
class NiceTry(object):
def __init__(self): pass
def utcoffset(self, dt): pass
self.assertRaises(TypeError, cls, 1, 1, 1, tzinfo=NiceTry)
class BetterTry(tzinfo):
def __init__(self): pass
def utcoffset(self, dt): pass
b = BetterTry()
t = cls(1, 1, 1, tzinfo=b)
self.assertTrue(t.tzinfo is b)
def test_utc_offset_out_of_bounds(self):
class Edgy(tzinfo):
def __init__(self, offset):
self.offset = timedelta(minutes=offset)
def utcoffset(self, dt):
return self.offset
cls = self.theclass
for offset, legit in ((-1440, False),
(-1439, True),
(1439, True),
(1440, False)):
if cls is time:
t = cls(1, 2, 3, tzinfo=Edgy(offset))
elif cls is datetime:
t = cls(6, 6, 6, 1, 2, 3, tzinfo=Edgy(offset))
else:
assert 0, "impossible"
if legit:
aofs = abs(offset)
h, m = divmod(aofs, 60)
tag = "%c%02d:%02d" % (offset < 0 and '-' or '+', h, m)
if isinstance(t, datetime):
t = t.timetz()
self.assertEqual(str(t), "01:02:03" + tag)
else:
self.assertRaises(ValueError, str, t)
def test_tzinfo_classes(self):
cls = self.theclass
class C1(tzinfo):
def utcoffset(self, dt): return None
def dst(self, dt): return None
def tzname(self, dt): return None
for t in (cls(1, 1, 1),
cls(1, 1, 1, tzinfo=None),
cls(1, 1, 1, tzinfo=C1())):
self.assertTrue(t.utcoffset() is None)
self.assertTrue(t.dst() is None)
self.assertTrue(t.tzname() is None)
class C3(tzinfo):
def utcoffset(self, dt): return timedelta(minutes=-1439)
def dst(self, dt): return timedelta(minutes=1439)
def tzname(self, dt): return "aname"
t = cls(1, 1, 1, tzinfo=C3())
self.assertEqual(t.utcoffset(), timedelta(minutes=-1439))
self.assertEqual(t.dst(), timedelta(minutes=1439))
self.assertEqual(t.tzname(), "aname")
# Wrong types.
class C4(tzinfo):
def utcoffset(self, dt): return "aname"
def dst(self, dt): return 7
def tzname(self, dt): return 0
t = cls(1, 1, 1, tzinfo=C4())
self.assertRaises(TypeError, t.utcoffset)
self.assertRaises(TypeError, t.dst)
self.assertRaises(TypeError, t.tzname)
# Offset out of range.
class C6(tzinfo):
def utcoffset(self, dt): return timedelta(hours=-24)
def dst(self, dt): return timedelta(hours=24)
t = cls(1, 1, 1, tzinfo=C6())
self.assertRaises(ValueError, t.utcoffset)
self.assertRaises(ValueError, t.dst)
# Not a whole number of minutes.
class C7(tzinfo):
def utcoffset(self, dt): return timedelta(seconds=61)
def dst(self, dt): return timedelta(microseconds=-81)
t = cls(1, 1, 1, tzinfo=C7())
self.assertRaises(ValueError, t.utcoffset)
self.assertRaises(ValueError, t.dst)
def test_aware_compare(self):
cls = self.theclass
# Ensure that utcoffset() gets ignored if the comparands have
# the same tzinfo member.
class OperandDependentOffset(tzinfo):
def utcoffset(self, t):
if t.minute < 10:
# d0 and d1 equal after adjustment
return timedelta(minutes=t.minute)
else:
# d2 off in the weeds
return timedelta(minutes=59)
base = cls(8, 9, 10, tzinfo=OperandDependentOffset())
d0 = base.replace(minute=3)
d1 = base.replace(minute=9)
d2 = base.replace(minute=11)
for x in d0, d1, d2:
for y in d0, d1, d2:
got = cmp(x, y)
expected = cmp(x.minute, y.minute)
self.assertEqual(got, expected)
# However, if they're different members, uctoffset is not ignored.
# Note that a time can't actually have an operand-depedent offset,
# though (and time.utcoffset() passes None to tzinfo.utcoffset()),
# so skip this test for time.
if cls is not time:
d0 = base.replace(minute=3, tzinfo=OperandDependentOffset())
d1 = base.replace(minute=9, tzinfo=OperandDependentOffset())
d2 = base.replace(minute=11, tzinfo=OperandDependentOffset())
for x in d0, d1, d2:
for y in d0, d1, d2:
got = cmp(x, y)
if (x is d0 or x is d1) and (y is d0 or y is d1):
expected = 0
elif x is y is d2:
expected = 0
elif x is d2:
expected = -1
else:
assert y is d2
expected = 1
self.assertEqual(got, expected)
# Testing time objects with a non-None tzinfo.
class TestTimeTZ(TestTime, TZInfoBase, unittest.TestCase):
theclass = time
def test_empty(self):
t = self.theclass()
self.assertEqual(t.hour, 0)
self.assertEqual(t.minute, 0)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 0)
self.assertTrue(t.tzinfo is None)
def test_zones(self):
est = FixedOffset(-300, "EST", 1)
utc = FixedOffset(0, "UTC", -2)
met = FixedOffset(60, "MET", 3)
t1 = time( 7, 47, tzinfo=est)
t2 = time(12, 47, tzinfo=utc)
t3 = time(13, 47, tzinfo=met)
t4 = time(microsecond=40)
t5 = time(microsecond=40, tzinfo=utc)
self.assertEqual(t1.tzinfo, est)
self.assertEqual(t2.tzinfo, utc)
self.assertEqual(t3.tzinfo, met)
self.assertTrue(t4.tzinfo is None)
self.assertEqual(t5.tzinfo, utc)
self.assertEqual(t1.utcoffset(), timedelta(minutes=-300))
self.assertEqual(t2.utcoffset(), timedelta(minutes=0))
self.assertEqual(t3.utcoffset(), timedelta(minutes=60))
self.assertTrue(t4.utcoffset() is None)
self.assertRaises(TypeError, t1.utcoffset, "no args")
self.assertEqual(t1.tzname(), "EST")
self.assertEqual(t2.tzname(), "UTC")
self.assertEqual(t3.tzname(), "MET")
self.assertTrue(t4.tzname() is None)
self.assertRaises(TypeError, t1.tzname, "no args")
self.assertEqual(t1.dst(), timedelta(minutes=1))
self.assertEqual(t2.dst(), timedelta(minutes=-2))
self.assertEqual(t3.dst(), timedelta(minutes=3))
self.assertTrue(t4.dst() is None)
self.assertRaises(TypeError, t1.dst, "no args")
self.assertEqual(hash(t1), hash(t2))
self.assertEqual(hash(t1), hash(t3))
self.assertEqual(hash(t2), hash(t3))
self.assertEqual(t1, t2)
self.assertEqual(t1, t3)
self.assertEqual(t2, t3)
self.assertRaises(TypeError, lambda: t4 == t5) # mixed tz-aware & naive
self.assertRaises(TypeError, lambda: t4 < t5) # mixed tz-aware & naive
self.assertRaises(TypeError, lambda: t5 < t4) # mixed tz-aware & naive
self.assertEqual(str(t1), "07:47:00-05:00")
self.assertEqual(str(t2), "12:47:00+00:00")
self.assertEqual(str(t3), "13:47:00+01:00")
self.assertEqual(str(t4), "00:00:00.000040")
self.assertEqual(str(t5), "00:00:00.000040+00:00")
self.assertEqual(t1.isoformat(), "07:47:00-05:00")
self.assertEqual(t2.isoformat(), "12:47:00+00:00")
self.assertEqual(t3.isoformat(), "13:47:00+01:00")
self.assertEqual(t4.isoformat(), "00:00:00.000040")
self.assertEqual(t5.isoformat(), "00:00:00.000040+00:00")
d = 'datetime.time'
self.assertEqual(repr(t1), d + "(7, 47, tzinfo=est)")
self.assertEqual(repr(t2), d + "(12, 47, tzinfo=utc)")
self.assertEqual(repr(t3), d + "(13, 47, tzinfo=met)")
self.assertEqual(repr(t4), d + "(0, 0, 0, 40)")
self.assertEqual(repr(t5), d + "(0, 0, 0, 40, tzinfo=utc)")
self.assertEqual(t1.strftime("%H:%M:%S %%Z=%Z %%z=%z"),
"07:47:00 %Z=EST %z=-0500")
self.assertEqual(t2.strftime("%H:%M:%S %Z %z"), "12:47:00 UTC +0000")
self.assertEqual(t3.strftime("%H:%M:%S %Z %z"), "13:47:00 MET +0100")
yuck = FixedOffset(-1439, "%z %Z %%z%%Z")
t1 = time(23, 59, tzinfo=yuck)
self.assertEqual(t1.strftime("%H:%M %%Z='%Z' %%z='%z'"),
"23:59 %Z='%z %Z %%z%%Z' %z='-2359'")
# Check that an invalid tzname result raises an exception.
class Badtzname(tzinfo):
def tzname(self, dt): return 42
t = time(2, 3, 4, tzinfo=Badtzname())
self.assertEqual(t.strftime("%H:%M:%S"), "02:03:04")
self.assertRaises(TypeError, t.strftime, "%Z")
def test_hash_edge_cases(self):
# Offsets that overflow a basic time.
t1 = self.theclass(0, 1, 2, 3, tzinfo=FixedOffset(1439, ""))
t2 = self.theclass(0, 0, 2, 3, tzinfo=FixedOffset(1438, ""))
self.assertEqual(hash(t1), hash(t2))
t1 = self.theclass(23, 58, 6, 100, tzinfo=FixedOffset(-1000, ""))
t2 = self.theclass(23, 48, 6, 100, tzinfo=FixedOffset(-1010, ""))
self.assertEqual(hash(t1), hash(t2))
def test_pickling(self):
# Try one without a tzinfo.
args = 20, 59, 16, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
# Try one with a tzinfo.
tinfo = PicklableFixedOffset(-300, 'cookie')
orig = self.theclass(5, 6, 7, tzinfo=tinfo)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertIsInstance(derived.tzinfo, PicklableFixedOffset)
self.assertEqual(derived.utcoffset(), timedelta(minutes=-300))
self.assertEqual(derived.tzname(), 'cookie')
def test_more_bool(self):
# Test cases with non-None tzinfo.
cls = self.theclass
t = cls(0, tzinfo=FixedOffset(-300, ""))
self.assertTrue(t)
t = cls(5, tzinfo=FixedOffset(-300, ""))
self.assertTrue(t)
t = cls(5, tzinfo=FixedOffset(300, ""))
self.assertTrue(not t)
t = cls(23, 59, tzinfo=FixedOffset(23*60 + 59, ""))
self.assertTrue(not t)
# Mostly ensuring this doesn't overflow internally.
t = cls(0, tzinfo=FixedOffset(23*60 + 59, ""))
self.assertTrue(t)
# But this should yield a value error -- the utcoffset is bogus.
t = cls(0, tzinfo=FixedOffset(24*60, ""))
self.assertRaises(ValueError, lambda: bool(t))
# Likewise.
t = cls(0, tzinfo=FixedOffset(-24*60, ""))
self.assertRaises(ValueError, lambda: bool(t))
def test_replace(self):
cls = self.theclass
z100 = FixedOffset(100, "+100")
zm200 = FixedOffset(timedelta(minutes=-200), "-200")
args = [1, 2, 3, 4, z100]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8),
("tzinfo", zm200)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Ensure we can get rid of a tzinfo.
self.assertEqual(base.tzname(), "+100")
base2 = base.replace(tzinfo=None)
self.assertTrue(base2.tzinfo is None)
self.assertTrue(base2.tzname() is None)
# Ensure we can add one.
base3 = base2.replace(tzinfo=z100)
self.assertEqual(base, base3)
self.assertTrue(base.tzinfo is base3.tzinfo)
# Out of bounds.
base = cls(1)
self.assertRaises(ValueError, base.replace, hour=24)
self.assertRaises(ValueError, base.replace, minute=-1)
self.assertRaises(ValueError, base.replace, second=100)
self.assertRaises(ValueError, base.replace, microsecond=1000000)
def test_mixed_compare(self):
t1 = time(1, 2, 3)
t2 = time(1, 2, 3)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=None)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(None, ""))
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(0, ""))
self.assertRaises(TypeError, lambda: t1 == t2)
# In time w/ identical tzinfo objects, utcoffset is ignored.
class Varies(tzinfo):
def __init__(self):
self.offset = timedelta(minutes=22)
def utcoffset(self, t):
self.offset += timedelta(minutes=1)
return self.offset
v = Varies()
t1 = t2.replace(tzinfo=v)
t2 = t2.replace(tzinfo=v)
self.assertEqual(t1.utcoffset(), timedelta(minutes=23))
self.assertEqual(t2.utcoffset(), timedelta(minutes=24))
self.assertEqual(t1, t2)
# But if they're not identical, it isn't ignored.
t2 = t2.replace(tzinfo=Varies())
self.assertTrue(t1 < t2) # t1's offset counter still going up
def test_subclass_timetz(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.second
args = 4, 5, 6, 500, FixedOffset(-300, "EST", 1)
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.utcoffset(), dt2.utcoffset())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.second - 7)
# Testing datetime objects with a non-None tzinfo.
class TestDateTimeTZ(TestDateTime, TZInfoBase, unittest.TestCase):
theclass = datetime
def test_trivial(self):
dt = self.theclass(1, 2, 3, 4, 5, 6, 7)
self.assertEqual(dt.year, 1)
self.assertEqual(dt.month, 2)
self.assertEqual(dt.day, 3)
self.assertEqual(dt.hour, 4)
self.assertEqual(dt.minute, 5)
self.assertEqual(dt.second, 6)
self.assertEqual(dt.microsecond, 7)
self.assertEqual(dt.tzinfo, None)
def test_even_more_compare(self):
# The test_compare() and test_more_compare() inherited from TestDate
# and TestDateTime covered non-tzinfo cases.
# Smallest possible after UTC adjustment.
t1 = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, ""))
# Largest possible after UTC adjustment.
t2 = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, ""))
# Make sure those compare correctly, and w/o overflow.
self.assertTrue(t1 < t2)
self.assertTrue(t1 != t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 == t1)
self.assertTrue(t2 == t2)
# Equal afer adjustment.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""))
t2 = self.theclass(2, 1, 1, 3, 13, tzinfo=FixedOffset(3*60+13+2, ""))
self.assertEqual(t1, t2)
# Change t1 not to subtract a minute, and t1 should be larger.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(0, ""))
self.assertTrue(t1 > t2)
# Change t1 to subtract 2 minutes, and t1 should be smaller.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(2, ""))
self.assertTrue(t1 < t2)
# Back to the original t1, but make seconds resolve it.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""),
second=1)
self.assertTrue(t1 > t2)
# Likewise, but make microseconds resolve it.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""),
microsecond=1)
self.assertTrue(t1 > t2)
# Make t2 naive and it should fail.
t2 = self.theclass.min
self.assertRaises(TypeError, lambda: t1 == t2)
self.assertEqual(t2, t2)
# It's also naive if it has tzinfo but tzinfo.utcoffset() is None.
class Naive(tzinfo):
def utcoffset(self, dt): return None
t2 = self.theclass(5, 6, 7, tzinfo=Naive())
self.assertRaises(TypeError, lambda: t1 == t2)
self.assertEqual(t2, t2)
# OTOH, it's OK to compare two of these mixing the two ways of being
# naive.
t1 = self.theclass(5, 6, 7)
self.assertEqual(t1, t2)
# Try a bogus uctoffset.
class Bogus(tzinfo):
def utcoffset(self, dt):
return timedelta(minutes=1440) # out of bounds
t1 = self.theclass(2, 2, 2, tzinfo=Bogus())
t2 = self.theclass(2, 2, 2, tzinfo=FixedOffset(0, ""))
self.assertRaises(ValueError, lambda: t1 == t2)
def test_pickling(self):
# Try one without a tzinfo.
args = 6, 7, 23, 20, 59, 1, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
# Try one with a tzinfo.
tinfo = PicklableFixedOffset(-300, 'cookie')
orig = self.theclass(*args, **{'tzinfo': tinfo})
derived = self.theclass(1, 1, 1, tzinfo=FixedOffset(0, "", 0))
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertIsInstance(derived.tzinfo, PicklableFixedOffset)
self.assertEqual(derived.utcoffset(), timedelta(minutes=-300))
self.assertEqual(derived.tzname(), 'cookie')
def test_extreme_hashes(self):
# If an attempt is made to hash these via subtracting the offset
# then hashing a datetime object, OverflowError results. The
# Python implementation used to blow up here.
t = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, ""))
hash(t)
t = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, ""))
hash(t)
# OTOH, an OOB offset should blow up.
t = self.theclass(5, 5, 5, tzinfo=FixedOffset(-1440, ""))
self.assertRaises(ValueError, hash, t)
def test_zones(self):
est = FixedOffset(-300, "EST")
utc = FixedOffset(0, "UTC")
met = FixedOffset(60, "MET")
t1 = datetime(2002, 3, 19, 7, 47, tzinfo=est)
t2 = datetime(2002, 3, 19, 12, 47, tzinfo=utc)
t3 = datetime(2002, 3, 19, 13, 47, tzinfo=met)
self.assertEqual(t1.tzinfo, est)
self.assertEqual(t2.tzinfo, utc)
self.assertEqual(t3.tzinfo, met)
self.assertEqual(t1.utcoffset(), timedelta(minutes=-300))
self.assertEqual(t2.utcoffset(), timedelta(minutes=0))
self.assertEqual(t3.utcoffset(), timedelta(minutes=60))
self.assertEqual(t1.tzname(), "EST")
self.assertEqual(t2.tzname(), "UTC")
self.assertEqual(t3.tzname(), "MET")
self.assertEqual(hash(t1), hash(t2))
self.assertEqual(hash(t1), hash(t3))
self.assertEqual(hash(t2), hash(t3))
self.assertEqual(t1, t2)
self.assertEqual(t1, t3)
self.assertEqual(t2, t3)
self.assertEqual(str(t1), "2002-03-19 07:47:00-05:00")
self.assertEqual(str(t2), "2002-03-19 12:47:00+00:00")
self.assertEqual(str(t3), "2002-03-19 13:47:00+01:00")
d = 'datetime.datetime(2002, 3, 19, '
self.assertEqual(repr(t1), d + "7, 47, tzinfo=est)")
self.assertEqual(repr(t2), d + "12, 47, tzinfo=utc)")
self.assertEqual(repr(t3), d + "13, 47, tzinfo=met)")
def test_combine(self):
met = FixedOffset(60, "MET")
d = date(2002, 3, 4)
tz = time(18, 45, 3, 1234, tzinfo=met)
dt = datetime.combine(d, tz)
self.assertEqual(dt, datetime(2002, 3, 4, 18, 45, 3, 1234,
tzinfo=met))
def test_extract(self):
met = FixedOffset(60, "MET")
dt = self.theclass(2002, 3, 4, 18, 45, 3, 1234, tzinfo=met)
self.assertEqual(dt.date(), date(2002, 3, 4))
self.assertEqual(dt.time(), time(18, 45, 3, 1234))
self.assertEqual(dt.timetz(), time(18, 45, 3, 1234, tzinfo=met))
def test_tz_aware_arithmetic(self):
import random
now = self.theclass.now()
tz55 = FixedOffset(-330, "west 5:30")
timeaware = now.time().replace(tzinfo=tz55)
nowaware = self.theclass.combine(now.date(), timeaware)
self.assertTrue(nowaware.tzinfo is tz55)
self.assertEqual(nowaware.timetz(), timeaware)
# Can't mix aware and non-aware.
self.assertRaises(TypeError, lambda: now - nowaware)
self.assertRaises(TypeError, lambda: nowaware - now)
# And adding datetime's doesn't make sense, aware or not.
self.assertRaises(TypeError, lambda: now + nowaware)
self.assertRaises(TypeError, lambda: nowaware + now)
self.assertRaises(TypeError, lambda: nowaware + nowaware)
# Subtracting should yield 0.
self.assertEqual(now - now, timedelta(0))
self.assertEqual(nowaware - nowaware, timedelta(0))
# Adding a delta should preserve tzinfo.
delta = timedelta(weeks=1, minutes=12, microseconds=5678)
nowawareplus = nowaware + delta
self.assertTrue(nowaware.tzinfo is tz55)
nowawareplus2 = delta + nowaware
self.assertTrue(nowawareplus2.tzinfo is tz55)
self.assertEqual(nowawareplus, nowawareplus2)
# that - delta should be what we started with, and that - what we
# started with should be delta.
diff = nowawareplus - delta
self.assertTrue(diff.tzinfo is tz55)
self.assertEqual(nowaware, diff)
self.assertRaises(TypeError, lambda: delta - nowawareplus)
self.assertEqual(nowawareplus - nowaware, delta)
# Make up a random timezone.
tzr = FixedOffset(random.randrange(-1439, 1440), "randomtimezone")
# Attach it to nowawareplus.
nowawareplus = nowawareplus.replace(tzinfo=tzr)
self.assertTrue(nowawareplus.tzinfo is tzr)
# Make sure the difference takes the timezone adjustments into account.
got = nowaware - nowawareplus
# Expected: (nowaware base - nowaware offset) -
# (nowawareplus base - nowawareplus offset) =
# (nowaware base - nowawareplus base) +
# (nowawareplus offset - nowaware offset) =
# -delta + nowawareplus offset - nowaware offset
expected = nowawareplus.utcoffset() - nowaware.utcoffset() - delta
self.assertEqual(got, expected)
# Try max possible difference.
min = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, "min"))
max = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, "max"))
maxdiff = max - min
self.assertEqual(maxdiff, self.theclass.max - self.theclass.min +
timedelta(minutes=2*1439))
def test_tzinfo_now(self):
meth = self.theclass.now
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth()
# Try with and without naming the keyword.
off42 = FixedOffset(42, "42")
another = meth(off42)
again = meth(tz=off42)
self.assertTrue(another.tzinfo is again.tzinfo)
self.assertEqual(another.utcoffset(), timedelta(minutes=42))
# Bad argument with and w/o naming the keyword.
self.assertRaises(TypeError, meth, 16)
self.assertRaises(TypeError, meth, tzinfo=16)
# Bad keyword name.
self.assertRaises(TypeError, meth, tinfo=off42)
# Too many args.
self.assertRaises(TypeError, meth, off42, off42)
# We don't know which time zone we're in, and don't have a tzinfo
# class to represent it, so seeing whether a tz argument actually
# does a conversion is tricky.
weirdtz = FixedOffset(timedelta(hours=15, minutes=58), "weirdtz", 0)
utc = FixedOffset(0, "utc", 0)
for dummy in range(3):
now = datetime.now(weirdtz)
self.assertTrue(now.tzinfo is weirdtz)
utcnow = datetime.utcnow().replace(tzinfo=utc)
now2 = utcnow.astimezone(weirdtz)
if abs(now - now2) < timedelta(seconds=30):
break
# Else the code is broken, or more than 30 seconds passed between
# calls; assuming the latter, just try again.
else:
# Three strikes and we're out.
self.fail("utcnow(), now(tz), or astimezone() may be broken")
def test_tzinfo_fromtimestamp(self):
import time
meth = self.theclass.fromtimestamp
ts = time.time()
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth(ts)
# Try with and without naming the keyword.
off42 = FixedOffset(42, "42")
another = meth(ts, off42)
again = meth(ts, tz=off42)
self.assertTrue(another.tzinfo is again.tzinfo)
self.assertEqual(another.utcoffset(), timedelta(minutes=42))
# Bad argument with and w/o naming the keyword.
self.assertRaises(TypeError, meth, ts, 16)
self.assertRaises(TypeError, meth, ts, tzinfo=16)
# Bad keyword name.
self.assertRaises(TypeError, meth, ts, tinfo=off42)
# Too many args.
self.assertRaises(TypeError, meth, ts, off42, off42)
# Too few args.
self.assertRaises(TypeError, meth)
# Try to make sure tz= actually does some conversion.
timestamp = 1000000000
utcdatetime = datetime.utcfromtimestamp(timestamp)
# In POSIX (epoch 1970), that's 2001-09-09 01:46:40 UTC, give or take.
# But on some flavor of Mac, it's nowhere near that. So we can't have
# any idea here what time that actually is, we can only test that
# relative changes match.
utcoffset = timedelta(hours=-15, minutes=39) # arbitrary, but not zero
tz = FixedOffset(utcoffset, "tz", 0)
expected = utcdatetime + utcoffset
got = datetime.fromtimestamp(timestamp, tz)
self.assertEqual(expected, got.replace(tzinfo=None))
def test_tzinfo_utcnow(self):
meth = self.theclass.utcnow
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth()
# Try with and without naming the keyword; for whatever reason,
# utcnow() doesn't accept a tzinfo argument.
off42 = FixedOffset(42, "42")
self.assertRaises(TypeError, meth, off42)
self.assertRaises(TypeError, meth, tzinfo=off42)
def test_tzinfo_utcfromtimestamp(self):
import time
meth = self.theclass.utcfromtimestamp
ts = time.time()
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth(ts)
# Try with and without naming the keyword; for whatever reason,
# utcfromtimestamp() doesn't accept a tzinfo argument.
off42 = FixedOffset(42, "42")
self.assertRaises(TypeError, meth, ts, off42)
self.assertRaises(TypeError, meth, ts, tzinfo=off42)
def test_tzinfo_timetuple(self):
# TestDateTime tested most of this. datetime adds a twist to the
# DST flag.
class DST(tzinfo):
def __init__(self, dstvalue):
if isinstance(dstvalue, int):
dstvalue = timedelta(minutes=dstvalue)
self.dstvalue = dstvalue
def dst(self, dt):
return self.dstvalue
cls = self.theclass
for dstvalue, flag in (-33, 1), (33, 1), (0, 0), (None, -1):
d = cls(1, 1, 1, 10, 20, 30, 40, tzinfo=DST(dstvalue))
t = d.timetuple()
self.assertEqual(1, t.tm_year)
self.assertEqual(1, t.tm_mon)
self.assertEqual(1, t.tm_mday)
self.assertEqual(10, t.tm_hour)
self.assertEqual(20, t.tm_min)
self.assertEqual(30, t.tm_sec)
self.assertEqual(0, t.tm_wday)
self.assertEqual(1, t.tm_yday)
self.assertEqual(flag, t.tm_isdst)
# dst() returns wrong type.
self.assertRaises(TypeError, cls(1, 1, 1, tzinfo=DST("x")).timetuple)
# dst() at the edge.
self.assertEqual(cls(1,1,1, tzinfo=DST(1439)).timetuple().tm_isdst, 1)
self.assertEqual(cls(1,1,1, tzinfo=DST(-1439)).timetuple().tm_isdst, 1)
# dst() out of range.
self.assertRaises(ValueError, cls(1,1,1, tzinfo=DST(1440)).timetuple)
self.assertRaises(ValueError, cls(1,1,1, tzinfo=DST(-1440)).timetuple)
def test_utctimetuple(self):
class DST(tzinfo):
def __init__(self, dstvalue):
if isinstance(dstvalue, int):
dstvalue = timedelta(minutes=dstvalue)
self.dstvalue = dstvalue
def dst(self, dt):
return self.dstvalue
cls = self.theclass
# This can't work: DST didn't implement utcoffset.
self.assertRaises(NotImplementedError,
cls(1, 1, 1, tzinfo=DST(0)).utcoffset)
class UOFS(DST):
def __init__(self, uofs, dofs=None):
DST.__init__(self, dofs)
self.uofs = timedelta(minutes=uofs)
def utcoffset(self, dt):
return self.uofs
# Ensure tm_isdst is 0 regardless of what dst() says: DST is never
# in effect for a UTC time.
for dstvalue in -33, 33, 0, None:
d = cls(1, 2, 3, 10, 20, 30, 40, tzinfo=UOFS(-53, dstvalue))
t = d.utctimetuple()
self.assertEqual(d.year, t.tm_year)
self.assertEqual(d.month, t.tm_mon)
self.assertEqual(d.day, t.tm_mday)
self.assertEqual(11, t.tm_hour) # 20mm + 53mm = 1hn + 13mm
self.assertEqual(13, t.tm_min)
self.assertEqual(d.second, t.tm_sec)
self.assertEqual(d.weekday(), t.tm_wday)
self.assertEqual(d.toordinal() - date(1, 1, 1).toordinal() + 1,
t.tm_yday)
self.assertEqual(0, t.tm_isdst)
# At the edges, UTC adjustment can normalize into years out-of-range
# for a datetime object. Ensure that a correct timetuple is
# created anyway.
tiny = cls(MINYEAR, 1, 1, 0, 0, 37, tzinfo=UOFS(1439))
# That goes back 1 minute less than a full day.
t = tiny.utctimetuple()
self.assertEqual(t.tm_year, MINYEAR-1)
self.assertEqual(t.tm_mon, 12)
self.assertEqual(t.tm_mday, 31)
self.assertEqual(t.tm_hour, 0)
self.assertEqual(t.tm_min, 1)
self.assertEqual(t.tm_sec, 37)
self.assertEqual(t.tm_yday, 366) # "year 0" is a leap year
self.assertEqual(t.tm_isdst, 0)
huge = cls(MAXYEAR, 12, 31, 23, 59, 37, 999999, tzinfo=UOFS(-1439))
# That goes forward 1 minute less than a full day.
t = huge.utctimetuple()
self.assertEqual(t.tm_year, MAXYEAR+1)
self.assertEqual(t.tm_mon, 1)
self.assertEqual(t.tm_mday, 1)
self.assertEqual(t.tm_hour, 23)
self.assertEqual(t.tm_min, 58)
self.assertEqual(t.tm_sec, 37)
self.assertEqual(t.tm_yday, 1)
self.assertEqual(t.tm_isdst, 0)
def test_tzinfo_isoformat(self):
zero = FixedOffset(0, "+00:00")
plus = FixedOffset(220, "+03:40")
minus = FixedOffset(-231, "-03:51")
unknown = FixedOffset(None, "")
cls = self.theclass
datestr = '0001-02-03'
for ofs in None, zero, plus, minus, unknown:
for us in 0, 987001:
d = cls(1, 2, 3, 4, 5, 59, us, tzinfo=ofs)
timestr = '04:05:59' + (us and '.987001' or '')
ofsstr = ofs is not None and d.tzname() or ''
tailstr = timestr + ofsstr
iso = d.isoformat()
self.assertEqual(iso, datestr + 'T' + tailstr)
self.assertEqual(iso, d.isoformat('T'))
self.assertEqual(d.isoformat('k'), datestr + 'k' + tailstr)
self.assertEqual(str(d), datestr + ' ' + tailstr)
def test_replace(self):
cls = self.theclass
z100 = FixedOffset(100, "+100")
zm200 = FixedOffset(timedelta(minutes=-200), "-200")
args = [1, 2, 3, 4, 5, 6, 7, z100]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4),
("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8),
("tzinfo", zm200)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Ensure we can get rid of a tzinfo.
self.assertEqual(base.tzname(), "+100")
base2 = base.replace(tzinfo=None)
self.assertTrue(base2.tzinfo is None)
self.assertTrue(base2.tzname() is None)
# Ensure we can add one.
base3 = base2.replace(tzinfo=z100)
self.assertEqual(base, base3)
self.assertTrue(base.tzinfo is base3.tzinfo)
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_more_astimezone(self):
# The inherited test_astimezone covered some trivial and error cases.
fnone = FixedOffset(None, "None")
f44m = FixedOffset(44, "44")
fm5h = FixedOffset(-timedelta(hours=5), "m300")
dt = self.theclass.now(tz=f44m)
self.assertTrue(dt.tzinfo is f44m)
# Replacing with degenerate tzinfo raises an exception.
self.assertRaises(ValueError, dt.astimezone, fnone)
# Ditto with None tz.
self.assertRaises(TypeError, dt.astimezone, None)
# Replacing with same tzinfo makes no change.
x = dt.astimezone(dt.tzinfo)
self.assertTrue(x.tzinfo is f44m)
self.assertEqual(x.date(), dt.date())
self.assertEqual(x.time(), dt.time())
# Replacing with different tzinfo does adjust.
got = dt.astimezone(fm5h)
self.assertTrue(got.tzinfo is fm5h)
self.assertEqual(got.utcoffset(), timedelta(hours=-5))
expected = dt - dt.utcoffset() # in effect, convert to UTC
expected += fm5h.utcoffset(dt) # and from there to local time
expected = expected.replace(tzinfo=fm5h) # and attach new tzinfo
self.assertEqual(got.date(), expected.date())
self.assertEqual(got.time(), expected.time())
self.assertEqual(got.timetz(), expected.timetz())
self.assertTrue(got.tzinfo is expected.tzinfo)
self.assertEqual(got, expected)
def test_aware_subtract(self):
cls = self.theclass
# Ensure that utcoffset() is ignored when the operands have the
# same tzinfo member.
class OperandDependentOffset(tzinfo):
def utcoffset(self, t):
if t.minute < 10:
# d0 and d1 equal after adjustment
return timedelta(minutes=t.minute)
else:
# d2 off in the weeds
return timedelta(minutes=59)
base = cls(8, 9, 10, 11, 12, 13, 14, tzinfo=OperandDependentOffset())
d0 = base.replace(minute=3)
d1 = base.replace(minute=9)
d2 = base.replace(minute=11)
for x in d0, d1, d2:
for y in d0, d1, d2:
got = x - y
expected = timedelta(minutes=x.minute - y.minute)
self.assertEqual(got, expected)
# OTOH, if the tzinfo members are distinct, utcoffsets aren't
# ignored.
base = cls(8, 9, 10, 11, 12, 13, 14)
d0 = base.replace(minute=3, tzinfo=OperandDependentOffset())
d1 = base.replace(minute=9, tzinfo=OperandDependentOffset())
d2 = base.replace(minute=11, tzinfo=OperandDependentOffset())
for x in d0, d1, d2:
for y in d0, d1, d2:
got = x - y
if (x is d0 or x is d1) and (y is d0 or y is d1):
expected = timedelta(0)
elif x is y is d2:
expected = timedelta(0)
elif x is d2:
expected = timedelta(minutes=(11-59)-0)
else:
assert y is d2
expected = timedelta(minutes=0-(11-59))
self.assertEqual(got, expected)
def test_mixed_compare(self):
t1 = datetime(1, 2, 3, 4, 5, 6, 7)
t2 = datetime(1, 2, 3, 4, 5, 6, 7)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=None)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(None, ""))
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(0, ""))
self.assertRaises(TypeError, lambda: t1 == t2)
# In datetime w/ identical tzinfo objects, utcoffset is ignored.
class Varies(tzinfo):
def __init__(self):
self.offset = timedelta(minutes=22)
def utcoffset(self, t):
self.offset += timedelta(minutes=1)
return self.offset
v = Varies()
t1 = t2.replace(tzinfo=v)
t2 = t2.replace(tzinfo=v)
self.assertEqual(t1.utcoffset(), timedelta(minutes=23))
self.assertEqual(t2.utcoffset(), timedelta(minutes=24))
self.assertEqual(t1, t2)
# But if they're not identical, it isn't ignored.
t2 = t2.replace(tzinfo=Varies())
self.assertTrue(t1 < t2) # t1's offset counter still going up
def test_subclass_datetimetz(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.year
args = 2002, 12, 31, 4, 5, 6, 500, FixedOffset(-300, "EST", 1)
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.utcoffset(), dt2.utcoffset())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.year - 7)
# Pain to set up DST-aware tzinfo classes.
def first_sunday_on_or_after(dt):
days_to_go = 6 - dt.weekday()
if days_to_go:
dt += timedelta(days_to_go)
return dt
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
DAY = timedelta(days=1)
# In the US, DST starts at 2am (standard time) on the first Sunday in April.
DSTSTART = datetime(1, 4, 1, 2)
# and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct,
# which is the first Sunday on or after Oct 25. Because we view 1:MM as
# being standard time on that day, there is no spelling in local time of
# the last hour of DST (that's 1:MM DST, but 1:MM is taken as standard time).
DSTEND = datetime(1, 10, 25, 1)
class USTimeZone(tzinfo):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt is None or dt.tzinfo is None:
# An exception instead may be sensible here, in one or more of
# the cases.
return ZERO
assert dt.tzinfo is self
# Find first Sunday in April.
start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year))
assert start.weekday() == 6 and start.month == 4 and start.day <= 7
# Find last Sunday in October.
end = first_sunday_on_or_after(DSTEND.replace(year=dt.year))
assert end.weekday() == 6 and end.month == 10 and end.day >= 25
# Can't compare naive to aware objects, so strip the timezone from
# dt first.
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
Central = USTimeZone(-6, "Central", "CST", "CDT")
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
utc_real = FixedOffset(0, "UTC", 0)
# For better test coverage, we want another flavor of UTC that's west of
# the Eastern and Pacific timezones.
utc_fake = FixedOffset(-12*60, "UTCfake", 0)
class TestTimezoneConversions(unittest.TestCase):
# The DST switch times for 2002, in std time.
dston = datetime(2002, 4, 7, 2)
dstoff = datetime(2002, 10, 27, 1)
theclass = datetime
# Check a time that's inside DST.
def checkinside(self, dt, tz, utc, dston, dstoff):
self.assertEqual(dt.dst(), HOUR)
# Conversion to our own timezone is always an identity.
self.assertEqual(dt.astimezone(tz), dt)
asutc = dt.astimezone(utc)
there_and_back = asutc.astimezone(tz)
# Conversion to UTC and back isn't always an identity here,
# because there are redundant spellings (in local time) of
# UTC time when DST begins: the clock jumps from 1:59:59
# to 3:00:00, and a local time of 2:MM:SS doesn't really
# make sense then. The classes above treat 2:MM:SS as
# daylight time then (it's "after 2am"), really an alias
# for 1:MM:SS standard time. The latter form is what
# conversion back from UTC produces.
if dt.date() == dston.date() and dt.hour == 2:
# We're in the redundant hour, and coming back from
# UTC gives the 1:MM:SS standard-time spelling.
self.assertEqual(there_and_back + HOUR, dt)
# Although during was considered to be in daylight
# time, there_and_back is not.
self.assertEqual(there_and_back.dst(), ZERO)
# They're the same times in UTC.
self.assertEqual(there_and_back.astimezone(utc),
dt.astimezone(utc))
else:
# We're not in the redundant hour.
self.assertEqual(dt, there_and_back)
# Because we have a redundant spelling when DST begins, there is
# (unfortunately) an hour when DST ends that can't be spelled at all in
# local time. When DST ends, the clock jumps from 1:59 back to 1:00
# again. The hour 1:MM DST has no spelling then: 1:MM is taken to be
# standard time. 1:MM DST == 0:MM EST, but 0:MM is taken to be
# daylight time. The hour 1:MM daylight == 0:MM standard can't be
# expressed in local time. Nevertheless, we want conversion back
# from UTC to mimic the local clock's "repeat an hour" behavior.
nexthour_utc = asutc + HOUR
nexthour_tz = nexthour_utc.astimezone(tz)
if dt.date() == dstoff.date() and dt.hour == 0:
# We're in the hour before the last DST hour. The last DST hour
# is ineffable. We want the conversion back to repeat 1:MM.
self.assertEqual(nexthour_tz, dt.replace(hour=1))
nexthour_utc += HOUR
nexthour_tz = nexthour_utc.astimezone(tz)
self.assertEqual(nexthour_tz, dt.replace(hour=1))
else:
self.assertEqual(nexthour_tz - dt, HOUR)
# Check a time that's outside DST.
def checkoutside(self, dt, tz, utc):
self.assertEqual(dt.dst(), ZERO)
# Conversion to our own timezone is always an identity.
self.assertEqual(dt.astimezone(tz), dt)
# Converting to UTC and back is an identity too.
asutc = dt.astimezone(utc)
there_and_back = asutc.astimezone(tz)
self.assertEqual(dt, there_and_back)
def convert_between_tz_and_utc(self, tz, utc):
dston = self.dston.replace(tzinfo=tz)
# Because 1:MM on the day DST ends is taken as being standard time,
# there is no spelling in tz for the last hour of daylight time.
# For purposes of the test, the last hour of DST is 0:MM, which is
# taken as being daylight time (and 1:MM is taken as being standard
# time).
dstoff = self.dstoff.replace(tzinfo=tz)
for delta in (timedelta(weeks=13),
DAY,
HOUR,
timedelta(minutes=1),
timedelta(microseconds=1)):
self.checkinside(dston, tz, utc, dston, dstoff)
for during in dston + delta, dstoff - delta:
self.checkinside(during, tz, utc, dston, dstoff)
self.checkoutside(dstoff, tz, utc)
for outside in dston - delta, dstoff + delta:
self.checkoutside(outside, tz, utc)
def test_easy(self):
# Despite the name of this test, the endcases are excruciating.
self.convert_between_tz_and_utc(Eastern, utc_real)
self.convert_between_tz_and_utc(Pacific, utc_real)
self.convert_between_tz_and_utc(Eastern, utc_fake)
self.convert_between_tz_and_utc(Pacific, utc_fake)
# The next is really dancing near the edge. It works because
# Pacific and Eastern are far enough apart that their "problem
# hours" don't overlap.
self.convert_between_tz_and_utc(Eastern, Pacific)
self.convert_between_tz_and_utc(Pacific, Eastern)
# OTOH, these fail! Don't enable them. The difficulty is that
# the edge case tests assume that every hour is representable in
# the "utc" class. This is always true for a fixed-offset tzinfo
# class (lke utc_real and utc_fake), but not for Eastern or Central.
# For these adjacent DST-aware time zones, the range of time offsets
# tested ends up creating hours in the one that aren't representable
# in the other. For the same reason, we would see failures in the
# Eastern vs Pacific tests too if we added 3*HOUR to the list of
# offset deltas in convert_between_tz_and_utc().
#
# self.convert_between_tz_and_utc(Eastern, Central) # can't work
# self.convert_between_tz_and_utc(Central, Eastern) # can't work
def test_tricky(self):
# 22:00 on day before daylight starts.
fourback = self.dston - timedelta(hours=4)
ninewest = FixedOffset(-9*60, "-0900", 0)
fourback = fourback.replace(tzinfo=ninewest)
# 22:00-0900 is 7:00 UTC == 2:00 EST == 3:00 DST. Since it's "after
# 2", we should get the 3 spelling.
# If we plug 22:00 the day before into Eastern, it "looks like std
# time", so its offset is returned as -5, and -5 - -9 = 4. Adding 4
# to 22:00 lands on 2:00, which makes no sense in local time (the
# local clock jumps from 1 to 3). The point here is to make sure we
# get the 3 spelling.
expected = self.dston.replace(hour=3)
got = fourback.astimezone(Eastern).replace(tzinfo=None)
self.assertEqual(expected, got)
# Similar, but map to 6:00 UTC == 1:00 EST == 2:00 DST. In that
# case we want the 1:00 spelling.
sixutc = self.dston.replace(hour=6, tzinfo=utc_real)
# Now 6:00 "looks like daylight", so the offset wrt Eastern is -4,
# and adding -4-0 == -4 gives the 2:00 spelling. We want the 1:00 EST
# spelling.
expected = self.dston.replace(hour=1)
got = sixutc.astimezone(Eastern).replace(tzinfo=None)
self.assertEqual(expected, got)
# Now on the day DST ends, we want "repeat an hour" behavior.
# UTC 4:MM 5:MM 6:MM 7:MM checking these
# EST 23:MM 0:MM 1:MM 2:MM
# EDT 0:MM 1:MM 2:MM 3:MM
# wall 0:MM 1:MM 1:MM 2:MM against these
for utc in utc_real, utc_fake:
for tz in Eastern, Pacific:
first_std_hour = self.dstoff - timedelta(hours=2) # 23:MM
# Convert that to UTC.
first_std_hour -= tz.utcoffset(None)
# Adjust for possibly fake UTC.
asutc = first_std_hour + utc.utcoffset(None)
# First UTC hour to convert; this is 4:00 when utc=utc_real &
# tz=Eastern.
asutcbase = asutc.replace(tzinfo=utc)
for tzhour in (0, 1, 1, 2):
expectedbase = self.dstoff.replace(hour=tzhour)
for minute in 0, 30, 59:
expected = expectedbase.replace(minute=minute)
asutc = asutcbase.replace(minute=minute)
astz = asutc.astimezone(tz)
self.assertEqual(astz.replace(tzinfo=None), expected)
asutcbase += HOUR
def test_bogus_dst(self):
class ok(tzinfo):
def utcoffset(self, dt): return HOUR
def dst(self, dt): return HOUR
now = self.theclass.now().replace(tzinfo=utc_real)
# Doesn't blow up.
now.astimezone(ok())
# Does blow up.
class notok(ok):
def dst(self, dt): return None
self.assertRaises(ValueError, now.astimezone, notok())
def test_fromutc(self):
self.assertRaises(TypeError, Eastern.fromutc) # not enough args
now = datetime.utcnow().replace(tzinfo=utc_real)
self.assertRaises(ValueError, Eastern.fromutc, now) # wrong tzinfo
now = now.replace(tzinfo=Eastern) # insert correct tzinfo
enow = Eastern.fromutc(now) # doesn't blow up
self.assertEqual(enow.tzinfo, Eastern) # has right tzinfo member
self.assertRaises(TypeError, Eastern.fromutc, now, now) # too many args
self.assertRaises(TypeError, Eastern.fromutc, date.today()) # wrong type
# Always converts UTC to standard time.
class FauxUSTimeZone(USTimeZone):
def fromutc(self, dt):
return dt + self.stdoffset
FEastern = FauxUSTimeZone(-5, "FEastern", "FEST", "FEDT")
# UTC 4:MM 5:MM 6:MM 7:MM 8:MM 9:MM
# EST 23:MM 0:MM 1:MM 2:MM 3:MM 4:MM
# EDT 0:MM 1:MM 2:MM 3:MM 4:MM 5:MM
# Check around DST start.
start = self.dston.replace(hour=4, tzinfo=Eastern)
fstart = start.replace(tzinfo=FEastern)
for wall in 23, 0, 1, 3, 4, 5:
expected = start.replace(hour=wall)
if wall == 23:
expected -= timedelta(days=1)
got = Eastern.fromutc(start)
self.assertEqual(expected, got)
expected = fstart + FEastern.stdoffset
got = FEastern.fromutc(fstart)
self.assertEqual(expected, got)
# Ensure astimezone() calls fromutc() too.
got = fstart.replace(tzinfo=utc_real).astimezone(FEastern)
self.assertEqual(expected, got)
start += HOUR
fstart += HOUR
# Check around DST end.
start = self.dstoff.replace(hour=4, tzinfo=Eastern)
fstart = start.replace(tzinfo=FEastern)
for wall in 0, 1, 1, 2, 3, 4:
expected = start.replace(hour=wall)
got = Eastern.fromutc(start)
self.assertEqual(expected, got)
expected = fstart + FEastern.stdoffset
got = FEastern.fromutc(fstart)
self.assertEqual(expected, got)
# Ensure astimezone() calls fromutc() too.
got = fstart.replace(tzinfo=utc_real).astimezone(FEastern)
self.assertEqual(expected, got)
start += HOUR
fstart += HOUR
#############################################################################
# oddballs
class Oddballs(unittest.TestCase):
def test_bug_1028306(self):
# Trying to compare a date to a datetime should act like a mixed-
# type comparison, despite that datetime is a subclass of date.
as_date = date.today()
as_datetime = datetime.combine(as_date, time())
self.assertTrue(as_date != as_datetime)
self.assertTrue(as_datetime != as_date)
self.assertTrue(not as_date == as_datetime)
self.assertTrue(not as_datetime == as_date)
self.assertRaises(TypeError, lambda: as_date < as_datetime)
self.assertRaises(TypeError, lambda: as_datetime < as_date)
self.assertRaises(TypeError, lambda: as_date <= as_datetime)
self.assertRaises(TypeError, lambda: as_datetime <= as_date)
self.assertRaises(TypeError, lambda: as_date > as_datetime)
self.assertRaises(TypeError, lambda: as_datetime > as_date)
self.assertRaises(TypeError, lambda: as_date >= as_datetime)
self.assertRaises(TypeError, lambda: as_datetime >= as_date)
# Neverthelss, comparison should work with the base-class (date)
# projection if use of a date method is forced.
self.assertTrue(as_date.__eq__(as_datetime))
different_day = (as_date.day + 1) % 20 + 1
self.assertTrue(not as_date.__eq__(as_datetime.replace(day=
different_day)))
# And date should compare with other subclasses of date. If a
# subclass wants to stop this, it's up to the subclass to do so.
date_sc = SubclassDate(as_date.year, as_date.month, as_date.day)
self.assertEqual(as_date, date_sc)
self.assertEqual(date_sc, as_date)
# Ditto for datetimes.
datetime_sc = SubclassDatetime(as_datetime.year, as_datetime.month,
as_date.day, 0, 0, 0)
self.assertEqual(as_datetime, datetime_sc)
self.assertEqual(datetime_sc, as_datetime)
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| apache-2.0 |
makinacorpus/django | tests/custom_managers/models.py | 9 | 2042 | """
23. Giving models a custom manager
You can use a custom ``Manager`` in a particular model by extending the base
``Manager`` class and instantiating your custom ``Manager`` in your model.
There are two reasons you might want to customize a ``Manager``: to add extra
``Manager`` methods, and/or to modify the initial ``QuerySet`` the ``Manager``
returns.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# An example of a custom manager called "objects".
class PersonManager(models.Manager):
def get_fun_people(self):
return self.filter(fun=True)
@python_2_unicode_compatible
class Person(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
fun = models.BooleanField()
objects = PersonManager()
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
# An example of a custom manager that sets get_queryset().
class PublishedBookManager(models.Manager):
def get_queryset(self):
return super(PublishedBookManager, self).get_queryset().filter(is_published=True)
@python_2_unicode_compatible
class Book(models.Model):
title = models.CharField(max_length=50)
author = models.CharField(max_length=30)
is_published = models.BooleanField()
published_objects = PublishedBookManager()
authors = models.ManyToManyField(Person, related_name='books')
def __str__(self):
return self.title
# An example of providing multiple custom managers.
class FastCarManager(models.Manager):
def get_queryset(self):
return super(FastCarManager, self).get_queryset().filter(top_speed__gt=150)
@python_2_unicode_compatible
class Car(models.Model):
name = models.CharField(max_length=10)
mileage = models.IntegerField()
top_speed = models.IntegerField(help_text="In miles per hour.")
cars = models.Manager()
fast_cars = FastCarManager()
def __str__(self):
return self.name
| bsd-3-clause |
phobson/wqio | wqio/tests/test_datacollections.py | 2 | 28761 | from distutils.version import LooseVersion
from textwrap import dedent
from io import StringIO
import numpy
import scipy
from scipy import stats
import pandas
from unittest import mock
import pytest
import pandas.testing as pdtest
from wqio.tests import helpers
from wqio.features import Location, Dataset
from wqio.datacollections import DataCollection, _dist_compare
OLD_SCIPY = LooseVersion(scipy.version.version) < LooseVersion("0.19")
def check_stat(expected_csv, result, comp=False):
index_col = [0]
if comp:
index_col += [1]
file_obj = StringIO(dedent(expected_csv))
expected = pandas.read_csv(file_obj, header=[0, 1], index_col=index_col)
if comp:
expected = expected.stack(level=-1)
pdtest.assert_frame_equal(
expected.sort_index(axis="columns"),
result.sort_index(axis="columns").round(6),
atol=1e-5,
)
def remove_g_and_h(group):
return group.name[1] not in ["G", "H"]
@pytest.fixture
def dc():
df = helpers.make_dc_data_complex()
dc = DataCollection(
df,
rescol="res",
qualcol="qual",
stationcol="loc",
paramcol="param",
ndval="<",
othergroups=None,
pairgroups=["state", "bmp"],
useros=True,
filterfxn=remove_g_and_h,
bsiter=10000,
)
return dc
@pytest.fixture
def dc_noNDs():
df = helpers.make_dc_data_complex()
dc = DataCollection(
df,
rescol="res",
qualcol="qual",
stationcol="loc",
paramcol="param",
ndval="junk",
othergroups=None,
pairgroups=["state", "bmp"],
useros=True,
filterfxn=remove_g_and_h,
bsiter=10000,
)
return dc
def test_basic_attr(dc):
assert dc._raw_rescol == "res"
assert isinstance(dc.data, pandas.DataFrame)
assert dc.roscol == "ros_res"
assert dc.rescol == "ros_res"
assert dc.qualcol == "qual"
assert dc.stationcol == "loc"
assert dc.paramcol == "param"
assert dc.ndval == ["<"]
assert dc.bsiter == 10000
assert dc.groupcols == ["loc", "param"]
assert dc.tidy_columns == ["loc", "param", "res", "__censorship"]
assert hasattr(dc, "filterfxn")
def test_data(dc):
assert isinstance(dc.data, pandas.DataFrame)
assert dc.data.shape == (519, 8)
assert "G" in dc.data["param"].unique()
assert "H" in dc.data["param"].unique()
@pytest.mark.parametrize("useros", [True, False])
def test_tidy(dc, useros):
assert isinstance(dc.tidy, pandas.DataFrame)
assert dc.tidy.shape == (388, 5)
assert "G" not in dc.tidy["param"].unique()
assert "H" not in dc.tidy["param"].unique()
collist = ["loc", "param", "res", "__censorship", "ros_res"]
assert dc.tidy.columns.tolist() == collist
def test_paired(dc):
assert isinstance(dc.paired, pandas.DataFrame)
assert dc.paired.shape == (164, 6)
assert "G" not in dc.paired.index.get_level_values("param").unique()
assert "H" not in dc.paired.index.get_level_values("param").unique()
dc.paired.columns.tolist() == [
("res", "Inflow"),
("res", "Outflow"),
("res", "Reference"),
("__censorship", "Inflow"),
("__censorship", "Outflow"),
("__censorship", "Reference"),
]
def test_count(dc):
known_csv = """\
station,Inflow,Outflow,Reference
result,Count,Count,Count
param,,,
A,21,22,20
B,24,22,19
C,24,24,25
D,24,25,21
E,19,16,20
F,21,24,17
"""
check_stat(known_csv, dc.count)
def test_n_unique(dc):
known_csv = """\
loc,Inflow,Outflow,Reference
result,bmp,bmp,bmp
param,,,
A,7,7,7
B,7,7,7
C,7,7,7
D,7,7,7
E,7,7,7
F,7,7,7
G,7,7,7
H,7,7,7
"""
check_stat(known_csv, dc.n_unique("bmp"))
@helpers.seed
def test_median(dc):
known_csv = """\
station,Inflow,Inflow,Inflow,Outflow,Outflow,Outflow,Reference,Reference,Reference
result,lower,median,upper,lower,median,upper,lower,median,upper
param,,,,,,,,,
A,0.334506,1.197251,2.013994,0.860493,2.231058,2.626023,1.073386,1.639472,1.717293
B,1.366948,2.773989,3.297147,0.23201,1.546499,2.579206,0.204164,1.565076,2.196367
C,0.17351,0.525957,0.68024,0.247769,0.396984,0.540742,0.136462,0.412693,0.559458
D,0.374122,1.201892,2.098846,0.516989,1.362759,1.827087,0.314655,0.882695,1.24545
E,0.276095,1.070858,1.152887,0.287914,0.516746,1.456859,0.366824,0.80716,2.040739
F,0.05667,0.832488,1.310575,0.425237,1.510942,2.193997,0.162327,0.745993,1.992513
"""
check_stat(known_csv, dc.median)
@helpers.seed
def test_mean(dc):
known_csv = """\
station,Inflow,Inflow,Inflow,Outflow,Outflow,Outflow,Reference,Reference,Reference
result,lower,mean,upper,lower,mean,upper,lower,mean,upper
param,,,,,,,,,
A,1.231607,2.646682,4.204054,1.930601,5.249281,9.081952,1.540167,3.777974,6.389439
B,2.99031,7.647175,12.810844,1.545539,6.863835,12.705913,1.010374,4.504255,9.592572
C,0.37496,0.513248,0.65948,0.411501,1.004637,1.706317,0.35779,0.541962,0.734751
D,1.29141,3.021235,4.987855,1.285899,2.318808,3.451824,1.008364,1.945828,2.924812
E,0.818641,1.914696,3.049554,0.584826,1.098241,1.640807,1.113589,2.283292,3.581946
F,0.8379,9.825404,25.289933,1.497825,3.450184,5.61929,0.939917,2.491708,4.094258
"""
check_stat(known_csv, dc.mean)
@helpers.seed
def test_std_dev(dc):
known_csv = """\
station,Inflow,Outflow,Reference
result,std. dev.,std. dev.,std. dev.
param,,,
A,3.58649,8.719371,5.527633
B,12.360099,13.60243,10.759285
C,0.353755,1.691208,0.493325
D,4.811938,2.849393,2.248178
E,2.55038,1.096698,2.789238
F,34.447565,5.361033,3.398367
"""
check_stat(known_csv, dc.std_dev)
@helpers.seed
def test_percentile_25(dc):
known_csv = """\
station,Inflow,Outflow,Reference
result,pctl 25,pctl 25,pctl 25
param,,,
A,0.522601,0.906029,1.094721
B,1.472541,0.251126,0.314226
C,0.164015,0.267521,0.136462
D,0.35688,0.516989,0.383895
E,0.364748,0.311508,0.394658
F,0.120068,0.406132,0.224429
"""
check_stat(known_csv, dc.percentile(25))
@helpers.seed
def test_percentile_75(dc):
known_csv = """\
station,Inflow,Outflow,Reference
result,pctl 75,pctl 75,pctl 75
param,,,
A,2.563541,3.838021,2.650648
B,4.728871,2.849948,2.261847
C,0.776388,0.853535,0.792612
D,3.04268,2.79341,3.611793
E,1.532775,1.59183,3.201534
F,1.792985,2.80979,2.742249
"""
check_stat(known_csv, dc.percentile(75))
@helpers.seed
def test_logmean(dc):
known_csv = """\
station,Inflow,Inflow,Inflow,Outflow,Outflow,Outflow,Reference,Reference,Reference
result,Log-mean,lower,upper,Log-mean,lower,upper,Log-mean,lower,upper
param,,,,,,,,,
A,0.140559,-0.55112,0.644202,0.733004,0.047053,1.22099,0.545205,-0.057683,1.029948
B,1.026473,0.368659,1.541241,0.105106,-0.939789,0.860244,0.068638,-0.932357,0.661203
C,-0.963004,-1.304115,-0.638446,-0.83221,-1.464092,-0.414379,-1.088377,-1.556795,-0.720706
D,0.062317,-0.663241,0.58349,0.185757,-0.325074,0.598432,-0.063507,-0.670456,0.434214
E,-0.103655,-0.751075,0.385909,-0.456202,-1.08692,0.029967,-0.068135,-0.787007,0.51226
F,-0.442721,-1.874677,0.344704,0.211658,-0.504166,0.734283,-0.253352,-1.175917,0.467231
"""
check_stat(known_csv, dc.logmean)
@helpers.seed
def test_logstd_dev(dc):
known_csv = """\
station,Inflow,Outflow,Reference
result,Log-std. dev.,Log-std. dev.,Log-std. dev.
param,,,
A,1.374026,1.343662,1.225352
B,1.430381,2.07646,1.662001
C,0.818504,1.263631,1.057177
D,1.530871,1.187246,1.277927
E,1.264403,1.121038,1.474431
F,2.324063,1.516331,1.701596
"""
check_stat(known_csv, dc.logstd_dev)
@helpers.seed
def test_geomean(dc):
known_csv = """\
station,Inflow,Inflow,Inflow,Outflow,Outflow,Outflow,Reference,Reference,Reference
Geo-mean,Log-mean,lower,upper,Log-mean,lower,upper,Log-mean,lower,upper
param,,,,,,,,,
A,1.150917,0.576304,1.904467,2.081323,1.048178,3.390543,1.724962,0.943949,2.800919
B,2.791205,1.445795,4.670381,1.110829,0.39071,2.363737,1.071049,0.393625,1.937121
C,0.381744,0.271413,0.528113,0.435087,0.231288,0.66075,0.336763,0.210811,0.486409
D,1.064299,0.515179,1.792283,1.204129,0.722474,1.819264,0.938467,0.511475,1.543749
E,0.901536,0.471859,1.470951,0.633686,0.337254,1.03042,0.934134,0.455205,1.66906
F,0.642286,0.153405,1.411572,1.235726,0.604009,2.083988,0.776195,0.308536,1.595571
"""
check_stat(known_csv, dc.geomean)
@helpers.seed
def test_geostd_dev(dc):
known_csv = """\
station,Inflow,Outflow,Reference
Geo-std. dev.,Log-std. dev.,Log-std. dev.,Log-std. dev.
param,,,
A,3.951225,3.833055,3.405365
B,4.180294,7.976181,5.269843
C,2.267105,3.538244,2.878234
D,4.622199,3.278041,3.589191
E,3.540977,3.068036,4.368548
F,10.217099,4.55548,5.48269
"""
check_stat(known_csv, dc.geostd_dev)
@helpers.seed
def test_shapiro(dc):
known_csv = """\
station,Inflow,Inflow,Outflow,Outflow,Reference,Reference
result,pvalue,statistic,pvalue,statistic,pvalue,statistic
param,,,,,,
A,1.8e-05,0.685783,1e-06,0.576069,4e-06,0.61735
B,1e-06,0.594411,0.0,0.530962,0.0,0.41471
C,0.028774,0.905906,0.0,0.546626,0.00279,0.860373
D,1e-06,0.622915,1.5e-05,0.722374,0.000202,0.76518
E,1.7e-05,0.654137,0.004896,0.818813,0.000165,0.74917
F,0.0,0.292916,2e-06,0.634671,0.000167,0.713968
"""
check_stat(known_csv, dc.shapiro)
@helpers.seed
def test_shapiro_log(dc):
known_csv = """\
station,Inflow,Inflow,Outflow,Outflow,Reference,Reference
result,statistic,pvalue,statistic,pvalue,statistic,pvalue
param,,,,,,
A,0.983521938,0.96662426,0.979861856,0.913820148,0.939460814,0.234214202
B,0.957531095,0.390856266,0.97048676,0.722278714,0.967978418,0.735424638
C,0.906479359,0.029602444,0.974698305,0.78197974,0.967106879,0.572929323
D,0.989704251,0.995502174,0.990663111,0.997093379,0.964812279,0.617747009
E,0.955088913,0.479993254,0.95211035,0.523841977,0.963425279,0.61430341
F,0.97542423,0.847370088,0.982230783,0.933124721,0.966197193,0.749036908
"""
check_stat(known_csv, dc.shapiro_log)
@helpers.seed
def test_lilliefors(dc):
known_csv = """\
station,Inflow,Inflow,Outflow,Outflow,Reference,Reference
result,lilliefors,pvalue,lilliefors,pvalue,lilliefors,pvalue
param,,,,,,
A,0.308131,1.4e-05,0.340594,0.0,0.364453,0.0
B,0.36764,0.0,0.420343,0.0,0.417165,0.0
C,0.166799,0.082737,0.324733,0.0,0.161753,0.090455
D,0.273012,6.7e-05,0.240311,0.000665,0.296919,3.7e-05
E,0.341398,3e-06,0.239314,0.014862,0.233773,0.005474
F,0.419545,0.0,0.331315,0.0,0.284249,0.000741
"""
check_stat(known_csv, dc.lilliefors)
@helpers.seed
def test_lilliefors_log(dc):
known_csv = """\
station,Inflow,Inflow,Outflow,Outflow,Reference,Reference
result,log-lilliefors,pvalue,log-lilliefors,pvalue,log-lilliefors,pvalue
param,,,,,,
A,0.08548109,0.95458004,0.15443943,0.19715747,0.20141389,0.03268737
B,0.16162839,0.10505016,0.12447902,0.49697902,0.15934334,0.22969362
C,0.16957278,0.07248915,0.12388174,0.44379732,0.11746642,0.48915671
D,0.06885549,0.99,0.06067356,0.99,0.13401954,0.41967483
E,0.13506577,0.47186822,0.14552341,0.47797919,0.09164876,0.92860794
F,0.14420794,0.30694533,0.08463267,0.92741885,0.08586933,0.9800294
"""
check_stat(known_csv, dc.lilliefors_log)
@helpers.seed
def test_anderson_darling(dc):
with helpers.raises(NotImplementedError):
_ = dc.anderson_darling
@helpers.seed
def test_anderson_darling_log(dc):
with helpers.raises(NotImplementedError):
_ = dc.anderson_darling_log
@helpers.seed
def test_mann_whitney(dc):
known_csv = """\
,,mann_whitney,mann_whitney,mann_whitney,pvalue,pvalue,pvalue
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,180.0,179.0,,0.2198330905,0.4263216587
A,Outflow,282.0,,248.0,0.2198330905,,0.488580368
A,Reference,241.0,192.0,,0.4263216587,0.488580368,
B,Inflow,,345.0,317.0,,0.0766949991,0.0304383994
B,Outflow,183.0,,216.0,0.0766949991,,0.8650586835
B,Reference,139.0,202.0,,0.0304383994,0.8650586835,
C,Inflow,,282.0,323.0,,0.9097070273,0.6527104406
C,Outflow,294.0,,323.0,0.9097070273,,0.6527104406
C,Reference,277.0,277.0,,0.6527104406,0.6527104406,
D,Inflow,,285.0,263.0,,0.7718162376,0.8111960975
D,Outflow,315.0,,293.0,0.7718162376,,0.5082395211
D,Reference,241.0,232.0,,0.8111960975,0.5082395211,
E,Inflow,,164.0,188.0,,0.7033493939,0.9663820218
E,Outflow,140.0,,132.0,0.7033493939,,0.3813114322
E,Reference,192.0,188.0,,0.9663820218,0.3813114322,
F,Inflow,,201.0,172.0,,0.2505911218,0.8601783903
F,Outflow,303.0,,236.0,0.2505911218,,0.4045186043
F,Reference,185.0,172.0,,0.8601783903,0.4045186043
"""
check_stat(known_csv, dc.mann_whitney, comp=True)
@helpers.seed
def test_t_test(dc):
known_csv = """\
,,pvalue,pvalue,pvalue,t_test,t_test,t_test
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,0.2178424157,0.4563196599,,-1.2604458127,-0.7539785777
A,Outflow,0.2178424157,,0.5240147979,1.2604458127,,0.643450194
A,Reference,0.4563196599,0.5240147979,,0.7539785777,-0.643450194,
B,Inflow,,0.8430007638,0.3898358794,,0.1992705833,0.869235357
B,Outflow,0.8430007638,,0.5491097882,-0.1992705833,,0.6043850808
B,Reference,0.3898358794,0.5491097882,,-0.869235357,-0.6043850808,
C,Inflow,,0.1847386316,0.8191392537,,-1.3639360123,-0.2300373632
C,Outflow,0.1847386316,,0.2179907667,1.3639360123,,1.2615982727
C,Reference,0.8191392537,0.2179907667,,0.2300373632,-1.2615982727,
D,Inflow,,0.5484265023,0.344783812,,0.6056706932,0.9582600001
D,Outflow,0.5484265023,,0.6299742693,-0.6056706932,,0.4851636024
D,Reference,0.344783812,0.6299742693,,-0.9582600001,-0.4851636024,
E,Inflow,,0.2304569921,0.6770414622,,1.2287029977,-0.4198288251
E,Outflow,0.2304569921,,0.1023435465,-1.2287029977,,-1.6935358498
E,Reference,0.6770414622,0.1023435465,,0.4198288251,1.6935358498,
F,Inflow,,0.422008391,0.3549979666,,0.8190789273,0.9463539528
F,Outflow,0.422008391,,0.4988994144,-0.8190789273,,0.6826435968
F,Reference,0.3549979666,0.4988994144,,-0.9463539528,-0.6826435968
"""
check_stat(known_csv, dc.t_test, comp=True)
@helpers.seed
def test_levene(dc):
known_csv = """\
,,levene,levene,levene,pvalue,pvalue,pvalue
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,1.176282059,0.293152155,,0.284450688,0.591287419
A,Outflow,1.176282059,,0.397705309,0.284450688,,0.531863542
A,Reference,0.293152155,0.397705309,,0.591287419,0.531863542,
B,Inflow,,0.003559637,0.402002411,,0.952694449,0.529578712
B,Outflow,0.003559637,,0.408938588,0.952694449,,0.526247443
B,Reference,0.402002411,0.408938588,,0.529578712,0.526247443,
C,Inflow,,1.965613561,0.679535532,,0.167626459,0.413910674
C,Outflow,1.965613561,,1.462364363,0.167626459,,0.232602352
C,Reference,0.679535532,1.462364363,,0.413910674,0.232602352,
D,Inflow,,0.643364813,0.983777911,,0.426532092,0.32681669
D,Outflow,0.643364813,,0.116830634,0.426532092,,0.734124856
D,Reference,0.983777911,0.116830634,,0.32681669,0.734124856,
E,Inflow,,0.961616536,0.410491665,,0.333914902,0.525668596
E,Outflow,0.961616536,,2.726351564,0.333914902,,0.107912818
E,Reference,0.410491665,2.726351564,,0.525668596,0.107912818,
F,Inflow,,0.841984453,0.734809611,,0.363948105,0.396999375
F,Outflow,0.841984453,,0.25881357,0.363948105,,0.613802541
F,Reference,0.734809611,0.25881357,,0.396999375,0.613802541,
"""
check_stat(known_csv, dc.levene, comp=True)
@helpers.seed
def test_wilcoxon(dc):
known_csv = """\
,,wilcoxon,wilcoxon,wilcoxon,pvalue,pvalue,pvalue
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,32.0,59.0,,0.03479,0.430679
A,Outflow,32.0,,46.0,0.03479,,0.274445
A,Reference,59.0,46.0,,0.430679,0.274445,
B,Inflow,,38.0,22.0,,0.600179,0.182338
B,Outflow,38.0,,31.0,0.600179,,0.858863
B,Reference,22.0,31.0,,0.182338,0.858863,
C,Inflow,,75.0,120.0,,0.167807,0.601046
C,Outflow,75.0,,113.0,0.167807,,0.463381
C,Reference,120.0,113.0,,0.601046,0.463381,
D,Inflow,,44.0,31.0,,0.593618,0.530285
D,Outflow,44.0,,45.0,0.593618,,0.972125
D,Reference,31.0,45.0,,0.530285,0.972125,
E,Inflow,,21.0,19.0,,0.910156,0.386271
E,Outflow,21.0,,16.0,0.910156,,0.077148
E,Reference,19.0,16.0,,0.386271,0.077148,
F,Inflow,,62.0,22.0,,0.492459,0.952765
F,Outflow,62.0,,28.0,0.492459,,0.656642
F,Reference,22.0,28.0,,0.952765,0.656642,
"""
with pytest.warns(UserWarning):
check_stat(known_csv, dc.wilcoxon, comp=True)
@helpers.seed
def test_ranksums(dc):
known_csv = """\
,,pvalue,pvalue,pvalue,rank_sums,rank_sums,rank_sums
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,0.2153009,0.4187782,,-1.2391203,-0.8085428
A,Outflow,0.2153009,,0.4807102,1.2391203,,0.7051607
A,Reference,0.4187782,0.4807102,,0.8085428,-0.7051607,
B,Inflow,,0.0748817,0.029513,,1.781188,2.1765661
B,Outflow,0.0748817,,0.8547898,-1.781188,,0.1830104
B,Reference,0.029513,0.8547898,,-2.1765661,-0.1830104,
C,Inflow,,0.9015386,0.6455162,,-0.1237179,0.46
C,Outflow,0.9015386,,0.6455162,0.1237179,,0.46
C,Reference,0.6455162,0.6455162,,-0.46,-0.46,
D,Inflow,,0.7641772,0.8023873,,-0.3,0.2502587
D,Outflow,0.7641772,,0.5011969,0.3,,0.6726078
D,Reference,0.8023873,0.5011969,,-0.2502587,-0.6726078,
E,Inflow,,0.6911022,0.9551863,,0.3973597,-0.0561951
E,Outflow,0.6911022,,0.3727144,-0.3973597,,-0.8914004
E,Reference,0.9551863,0.3727144,,0.0561951,0.8914004,
F,Inflow,,0.2459307,0.8486619,,-1.1602902,-0.190826
F,Outflow,0.2459307,,0.3971011,1.1602902,,0.8468098
F,Reference,0.8486619,0.3971011,,0.190826,-0.8468098,
"""
check_stat(known_csv, dc.ranksums, comp=True)
@helpers.seed
@pytest.mark.xfail(OLD_SCIPY, reason="Scipy < 0.19")
def test_kendall(dc):
known_csv = """\
,,kendalltau,kendalltau,kendalltau,pvalue,pvalue,pvalue
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,-0.051661,-0.00738,,0.772893,0.967114
A,Outflow,-0.051661,,-0.083333,0.772893,,0.690095
A,Reference,-0.00738,-0.083333,,0.967114,0.690095,
B,Inflow,,0.441351,0.298246,,0.015267,0.119265
B,Outflow,0.441351,,0.559855,0.015267,,0.004202
B,Reference,0.298246,0.559855,,0.119265,0.004202,
C,Inflow,,0.280223,0.084006,,0.078682,0.578003
C,Outflow,0.280223,,-0.1417,0.078682,,0.352394
C,Reference,0.084006,-0.1417,,0.578003,0.352394,
D,Inflow,,0.403469,0.095299,,0.020143,0.634826
D,Outflow,0.403469,,0.318337,0.020143,,0.094723
D,Reference,0.095299,0.318337,,0.634826,0.094723,
E,Inflow,,0.114286,0.640703,,0.673337,0.004476
E,Outflow,0.114286,,0.167944,0.673337,,0.449603
E,Reference,0.640703,0.167944,,0.004476,0.449603,
F,Inflow,,0.0,0.07231,,1.0,0.763851
F,Outflow,0.0,,0.388889,1.0,,0.063
F,Reference,0.07231,0.388889,,0.763851,0.063,
"""
check_stat(known_csv, dc.kendall, comp=True)
@helpers.seed
def test_spearman(dc):
known_csv = """\
,,pvalue,pvalue,pvalue,spearmanrho,spearmanrho,spearmanrho
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,0.7574884491,0.9627447553,,-0.0809319588,0.012262418
A,Outflow,0.7574884491,,0.7617330788,-0.0809319588,,-0.0823529412
A,Reference,0.9627447553,0.7617330788,,0.012262418,-0.0823529412,
B,Inflow,,0.0110829791,0.0775159774,,0.5831305575,0.4537313433
B,Outflow,0.0110829791,,0.0024069317,0.5831305575,,0.6850916941
B,Reference,0.0775159774,0.0024069317,,0.4537313433,0.6850916941,
C,Inflow,,0.1330504059,0.6063501968,,0.3387640122,0.1134228342
C,Outflow,0.1330504059,,0.3431640379,0.3387640122,,-0.2070506455
C,Reference,0.6063501968,0.3431640379,,0.1134228342,-0.2070506455,
D,Inflow,,0.0195715066,0.4751861062,,0.4935814032,0.1858231711
D,Outflow,0.0195715066,,0.1263974782,0.4935814032,,0.363209462
D,Reference,0.4751861062,0.1263974782,,0.1858231711,0.363209462,
E,Inflow,,0.9828818202,0.0013596162,,0.0084033613,0.8112988341
E,Outflow,0.9828818202,,0.3413722947,0.0084033613,,0.3012263814
E,Reference,0.0013596162,0.3413722947,,0.8112988341,0.3012263814,
F,Inflow,,0.9645303744,0.6759971848,,-0.0106277141,0.1348767061
F,Outflow,0.9645303744,,0.0560590794,-0.0106277141,,0.5028571429
F,Reference,0.6759971848,0.0560590794,,0.1348767061,0.5028571429
"""
check_stat(known_csv, dc.spearman, comp=True)
@helpers.seed
def test_theilslopes(dc):
with helpers.raises(NotImplementedError):
_ = dc.theilslopes
def test_inventory(dc):
known_csv = StringIO(
dedent(
"""\
loc,param,Count,Non-Detect
Inflow,A,21,3
Inflow,B,24,6
Inflow,C,24,0
Inflow,D,24,11
Inflow,E,19,4
Inflow,F,21,8
Outflow,A,22,1
Outflow,B,22,9
Outflow,C,24,4
Outflow,D,25,12
Outflow,E,16,2
Outflow,F,24,8
Reference,A,20,2
Reference,B,19,6
Reference,C,25,4
Reference,D,21,12
Reference,E,20,3
Reference,F,17,7
"""
)
)
expected = pandas.read_csv(known_csv, index_col=[0, 1]).astype(int)
pdtest.assert_frame_equal(expected, dc.inventory.astype(int), check_names=False)
def test_inventory_noNDs(dc_noNDs):
known_csv = StringIO(
dedent(
"""\
loc,param,Count,Non-Detect
Inflow,A,21,0
Inflow,B,24,0
Inflow,C,24,0
Inflow,D,24,0
Inflow,E,19,0
Inflow,F,21,0
Outflow,A,22,0
Outflow,B,22,0
Outflow,C,24,0
Outflow,D,25,0
Outflow,E,16,0
Outflow,F,24,0
Reference,A,20,0
Reference,B,19,0
Reference,C,25,0
Reference,D,21,0
Reference,E,20,0
Reference,F,17,0
"""
)
)
expected = pandas.read_csv(known_csv, index_col=[0, 1]).astype(int)
pdtest.assert_frame_equal(
expected, dc_noNDs.inventory.astype(int), check_names=False,
)
@helpers.seed
def test_stat_summary(dc):
known_csv = StringIO(
dedent(
"""\
ros_res,loc,A,B,C,D,E,F
Count,Inflow,21,24,24,24,19,21
Count,Outflow,22,22,24,25,16,24
Count,Reference,20,19,25,21,20,17
Non-Detect,Inflow,3.0,6.0,0.0,11.0,4.0,8.0
Non-Detect,Outflow,1.0,9.0,4.0,12.0,2.0,8.0
Non-Detect,Reference,2.0,6.0,4.0,12.0,3.0,7.0
mean,Inflow,2.64668,7.64717,0.51325,3.02124,1.9147,9.8254
mean,Outflow,5.24928,6.86384,1.00464,2.31881,1.09824,3.45018
mean,Reference,3.77797,4.50425,0.54196,1.94583,2.28329,2.49171
std,Inflow,3.67506,12.62594,0.36136,4.91543,2.62027,35.29825
std,Outflow,8.92456,13.92253,1.72758,2.90815,1.13267,5.47634
std,Reference,5.67123,11.05411,0.5035,2.3037,2.8617,3.50296
min,Inflow,0.0756,0.17404,0.10213,0.05365,0.08312,0.00803
min,Outflow,0.11177,0.02106,0.03578,0.11678,0.07425,0.06377
min,Reference,0.15575,0.04909,0.04046,0.08437,0.05237,0.03445
10%,Inflow,0.1772,0.45233,0.13467,0.15495,0.1763,0.03548
10%,Outflow,0.44852,0.08297,0.08222,0.26949,0.19903,0.18008
10%,Reference,0.38448,0.13467,0.08241,0.19355,0.12777,0.09457
25%,Inflow,0.5226,1.47254,0.16401,0.35688,0.36475,0.12007
25%,Outflow,0.90603,0.25113,0.26752,0.51699,0.31151,0.40613
25%,Reference,1.09472,0.31423,0.13646,0.3839,0.39466,0.22443
50%,Inflow,1.19725,2.77399,0.52596,1.20189,1.07086,0.83249
50%,Outflow,2.23106,1.5465,0.39698,1.36276,0.51675,1.51094
50%,Reference,1.63947,1.56508,0.41269,0.8827,0.80716,0.74599
75%,Inflow,2.56354,4.72887,0.77639,3.04268,1.53278,1.79299
75%,Outflow,3.83802,2.84995,0.85354,2.79341,1.59183,2.80979
75%,Reference,2.65065,2.26185,0.79261,3.61179,3.20153,2.74225
90%,Inflow,6.02835,24.40655,0.99293,8.00691,6.28345,8.51706
90%,Outflow,12.43052,23.90022,2.43829,5.66731,2.30348,10.32829
90%,Reference,12.58278,6.67125,1.2205,4.78255,7.72012,8.57303
max,Inflow,13.87664,45.97893,1.26657,21.75505,8.88365,163.01001
max,Outflow,36.58941,47.49381,8.04948,12.39894,4.19118,23.29367
max,Reference,21.22363,48.23615,1.94442,7.67751,8.75609,10.5095
"""
)
)
expected = pandas.read_csv(known_csv, index_col=[0, 1]).T
pdtest.assert_frame_equal(
expected.round(5),
dc.stat_summary().round(5),
check_names=False,
check_dtype=False,
rtol=1e-4,
)
def test_locations(dc):
for loc in dc.locations:
assert isinstance(loc, Location)
assert len(dc.locations) == 18
assert dc.locations[0].definition == {"loc": "Inflow", "param": "A"}
assert dc.locations[1].definition == {"loc": "Inflow", "param": "B"}
def test_datasets(dc):
_ds = []
for d in dc.datasets("Inflow", "Outflow"):
assert isinstance(d, Dataset)
_ds.append(d)
assert len(_ds) == 6
assert _ds[0].definition == {"param": "A"}
assert _ds[1].definition == {"param": "B"}
# this sufficiently tests dc._filter_collection
def test_selectLocations(dc):
locs = dc.selectLocations(param="A", loc=["Inflow", "Outflow"])
assert len(locs) == 2
for n, (loc, loctype) in enumerate(zip(locs, ["Inflow", "Outflow"])):
assert isinstance(loc, Location)
assert loc.definition["param"] == "A"
assert loc.definition["loc"] == loctype
def test_selectLocations_squeeze_False(dc):
locs = dc.selectLocations(param="A", loc=["Inflow"], squeeze=False)
assert len(locs) == 1
for n, loc in enumerate(locs):
assert isinstance(loc, Location)
assert loc.definition["param"] == "A"
assert loc.definition["loc"] == "Inflow"
def test_selectLocations_squeeze_True(dc):
loc = dc.selectLocations(param="A", loc=["Inflow"], squeeze=True)
assert isinstance(loc, Location)
assert loc.definition["param"] == "A"
assert loc.definition["loc"] == "Inflow"
def test_selectLocations_squeeze_True_None(dc):
loc = dc.selectLocations(param="A", loc=["Junk"], squeeze=True)
assert loc is None
# since the test_selectLocations* tests stress _filter_collection
# enough, we'll mock it out for datasets:
def test_selectDatasets(dc):
with mock.patch.object(dc, "_filter_collection") as _fc:
with mock.patch.object(dc, "datasets", return_value=["A", "B"]) as _ds:
dc.selectDatasets("Inflow", "Reference", foo="A", bar="C")
_ds.assert_called_once_with("Inflow", "Reference")
_fc.assert_called_once_with(["A", "B"], foo="A", bar="C", squeeze=False)
@pytest.mark.parametrize("func", [stats.mannwhitneyu, stats.wilcoxon])
@pytest.mark.parametrize(
("x", "all_same"), [([5, 5, 5, 5, 5], True), ([5, 6, 7, 7, 8], False)]
)
def test_dist_compare_wrapper(x, all_same, func):
y = [5, 5, 5, 5, 5]
with mock.patch.object(stats, func.__name__) as _test:
result = _dist_compare(x, y, _test)
if all_same:
assert numpy.isnan(result.stat)
assert numpy.isnan(result.pvalue)
assert _test.call_count == 0
else:
# assert result == (0, 0)
_test.assert_called_once_with(x, y, alternative="two-sided")
| bsd-3-clause |
radicalbit/ambari | ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/nimbus.py | 1 | 3638 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management.libraries.functions import check_process_status
from resource_management.libraries.script import Script
from resource_management.libraries.functions import format
from resource_management.libraries.functions import stack_select
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import StackFeature
from storm import storm
from service import service
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
FILE_TYPE_JAAS_CONF
from setup_ranger_storm import setup_ranger_storm
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
from resource_management.core.resources.service import Service
class Nimbus(Script):
def install(self, env):
self.install_packages(env)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
storm("nimbus")
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class NimbusDefault(Nimbus):
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
stack_select.select_packages(params.version)
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env)
setup_ranger_storm(upgrade_type=upgrade_type)
service("nimbus", action="start")
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
service("nimbus", action="stop")
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.pid_nimbus)
def get_log_folder(self):
import params
return params.log_dir
def get_user(self):
import params
return params.storm_user
def get_pid_files(self):
import status_params
return [status_params.pid_nimbus]
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class NimbusWindows(Nimbus):
def start(self, env):
import status_params
env.set_params(status_params)
Service(status_params.nimbus_win_service_name, action="start")
def stop(self, env):
import status_params
env.set_params(status_params)
Service(status_params.nimbus_win_service_name, action="stop")
def status(self, env):
import status_params
from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
env.set_params(status_params)
check_windows_service_status(status_params.nimbus_win_service_name)
if __name__ == "__main__":
Nimbus().execute()
| apache-2.0 |
emilk/sproxel | distro/common/lib/lib-tk/test/test_tkinter/test_text.py | 7 | 1172 | import unittest
import Tkinter
from test.test_support import requires, run_unittest
from ttk import setup_master
requires('gui')
class TextTest(unittest.TestCase):
def setUp(self):
self.root = setup_master()
self.text = Tkinter.Text(self.root)
def tearDown(self):
self.text.destroy()
def test_search(self):
text = self.text
# pattern and index are obligatory arguments.
self.assertRaises(Tkinter.TclError, text.search, None, '1.0')
self.assertRaises(Tkinter.TclError, text.search, 'a', None)
self.assertRaises(Tkinter.TclError, text.search, None, None)
# Invalid text index.
self.assertRaises(Tkinter.TclError, text.search, '', 0)
# Check if we are getting the indices as strings -- you are likely
# to get Tcl_Obj under Tk 8.5 if Tkinter doesn't convert it.
text.insert('1.0', 'hi-test')
self.assertEqual(text.search('-test', '1.0', 'end'), '1.2')
self.assertEqual(text.search('test', '1.0', 'end'), '1.3')
tests_gui = (TextTest, )
if __name__ == "__main__":
run_unittest(*tests_gui)
| bsd-3-clause |
cedi4155476/QGIS | python/plugins/fTools/tools/doSpatialJoin.py | 9 | 12355 | # -*- coding: utf-8 -*-
#-----------------------------------------------------------
#
# fTools
# Copyright (C) 2008-2011 Carson Farmer
# EMAIL: carson.farmer (at) gmail.com
# WEB : http://www.ftools.ca/fTools.html
#
# A collection of data management and analysis tools for vector data
#
#-----------------------------------------------------------
#
# licensed under the terms of GNU GPL 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#---------------------------------------------------------------------
from PyQt4.QtCore import QObject, SIGNAL, QVariant, QFile
from PyQt4.QtGui import QDialog, QDialogButtonBox, QMessageBox
from qgis.core import QGis, QgsVectorFileWriter, QgsVectorLayer, QgsMapLayerRegistry, QgsFields, QgsField, QgsFeature, QgsGeometry, NULL
import ftools_utils
from ui_frmSpatialJoin import Ui_Dialog
def myself(L):
#median computation
nVal = len(L)
if nVal == 1:
return L[0]
L.sort()
#test for list length
medianVal = 0
if nVal > 1:
if (nVal % 2) == 0:
#index begin at 0
#remove 1 to index in standard median computation
medianVal = 0.5 * ((L[(nVal) / 2 - 1]) + (L[(nVal) / 2]))
else:
medianVal = L[(nVal + 1) / 2 - 1]
return medianVal
def filter_null(vals):
"""Takes an iterator of values and returns a new iterator returning the same values but skipping any NULL values"""
return (v for v in vals if v is not None)
class Dialog(QDialog, Ui_Dialog):
def __init__(self, iface):
QDialog.__init__(self, iface.mainWindow())
self.iface = iface
# Set up the user interface from Designer.
self.setupUi(self)
QObject.connect(self.toolOut, SIGNAL("clicked()"), self.outFile)
self.setWindowTitle(self.tr("Join attributes by location"))
self.buttonOk = self.buttonBox_2.button(QDialogButtonBox.Ok)
# populate layer list
self.progressBar.setValue(0)
layers = ftools_utils.getLayerNames([QGis.Point, QGis.Line, QGis.Polygon])
self.inShape.addItems(layers)
self.joinShape.addItems(layers)
def accept(self):
self.buttonOk.setEnabled(False)
if self.inShape.currentText() == "":
QMessageBox.information(self, self.tr("Spatial Join"), self.tr("Please specify target vector layer"))
elif self.outShape.text() == "":
QMessageBox.information(self, self.tr("Spatial Join"), self.tr("Please specify output shapefile"))
elif self.joinShape.currentText() == "":
QMessageBox.information(self, self.tr("Spatial Join"), self.tr("Please specify join vector layer"))
elif self.rdoSummary.isChecked() and not (self.chkMean.isChecked() or self.chkSum.isChecked() or self.chkMin.isChecked() or self.chkMax.isChecked() or self.chkMean.isChecked() or self.chkMedian.isChecked()):
QMessageBox.information(self, self.tr("Spatial Join"), self.tr("Please specify at least one summary statistic"))
else:
inName = self.inShape.currentText()
joinName = self.joinShape.currentText()
outPath = self.outShape.text()
if self.rdoSummary.isChecked():
summary = True
sumList = []
if self.chkSum.isChecked():
sumList.append("SUM")
if self.chkMean.isChecked():
sumList.append("MEAN")
if self.chkMin.isChecked():
sumList.append("MIN")
if self.chkMax.isChecked():
sumList.append("MAX")
if self.chkMedian.isChecked():
sumList.append("MED")
else:
summary = False
sumList = ["all"]
if self.rdoKeep.isChecked():
keep = True
else:
keep = False
outName = ftools_utils.getShapefileName(outPath)
res = self.compute(inName, joinName, outPath, summary, sumList, keep, self.progressBar)
self.outShape.clear()
if res:
addToTOC = QMessageBox.question(
self, self.tr("Spatial Join"),
self.tr("Created output shapefile:\n%s\n\nWould you like to add the new layer to the TOC?") % (unicode(outPath)),
QMessageBox.Yes, QMessageBox.No, QMessageBox.NoButton)
if addToTOC == QMessageBox.Yes:
self.vlayer = QgsVectorLayer(outPath, unicode(outName), "ogr")
QgsMapLayerRegistry.instance().addMapLayers([self.vlayer])
self.progressBar.setValue(0)
self.buttonOk.setEnabled(True)
def outFile(self):
self.outShape.clear()
(self.shapefileName, self.encoding) = ftools_utils.saveDialog(self)
if self.shapefileName is None or self.encoding is None:
return
self.outShape.setText(self.shapefileName)
def compute(self, inName, joinName, outName, summary, sumList, keep, progressBar):
layer1 = ftools_utils.getVectorLayerByName(inName)
provider1 = layer1.dataProvider()
fieldList1 = ftools_utils.getFieldList(layer1)
layer2 = ftools_utils.getVectorLayerByName(joinName)
provider2 = layer2.dataProvider()
fieldList2 = ftools_utils.getFieldList(layer2)
fieldList = QgsFields()
if provider1.crs() != provider2.crs():
QMessageBox.warning(self, self.tr("CRS warning!"), self.tr("Warning: Input layers have non-matching CRS.\nThis may cause unexpected results."))
if not summary:
fieldList2 = ftools_utils.testForUniqueness(fieldList1, fieldList2)
seq = range(0, len(fieldList1) + len(fieldList2))
fieldList1.extend(fieldList2)
fieldList1 = dict(zip(seq, fieldList1))
else:
numFields = {}
for j in xrange(len(fieldList2)):
if fieldList2[j].type() == QVariant.Int or fieldList2[j].type() == QVariant.Double:
numFields[j] = []
for i in sumList:
field = QgsField(i + unicode(fieldList2[j].name()), QVariant.Double, "real", 24, 16, self.tr("Summary field"))
fieldList.append(field)
field = QgsField("COUNT", QVariant.Double, "real", 24, 16, self.tr("Summary field"))
fieldList.append(field)
fieldList2 = ftools_utils.testForUniqueness(fieldList1, fieldList)
fieldList1.extend(fieldList)
seq = range(0, len(fieldList1))
fieldList1 = dict(zip(seq, fieldList1))
sRs = provider1.crs()
progressBar.setValue(13)
check = QFile(self.shapefileName)
if check.exists():
if not QgsVectorFileWriter.deleteShapeFile(self.shapefileName):
QMessageBox.warning(
self, self.tr('Error deleting shapefile'),
self.tr("Can't delete existing shapefile\n%s") % (self.shapefileName))
return False
fields = QgsFields()
for f in fieldList1.values():
fields.append(f)
writer = QgsVectorFileWriter(self.shapefileName, self.encoding, fields, provider1.geometryType(), sRs)
#writer = QgsVectorFileWriter(outName, "UTF-8", fieldList1, provider1.geometryType(), sRs)
inFeat = QgsFeature()
outFeat = QgsFeature()
inFeatB = QgsFeature()
inGeom = QgsGeometry()
progressBar.setValue(15)
start = 15.00
add = 85.00 / provider1.featureCount()
index = ftools_utils.createIndex(provider2)
# cache all features from provider2 to avoid huge number of feature requests in the inner loop
mapP2 = {}
for f in provider2.getFeatures():
mapP2[f.id()] = QgsFeature(f)
fit1 = provider1.getFeatures()
while fit1.nextFeature(inFeat):
inGeom = inFeat.geometry()
atMap1 = inFeat.attributes()
outFeat.setGeometry(inGeom)
none = True
joinList = []
if inGeom.type() == QGis.Point:
#(check, joinList) = layer2.featuresInRectangle(inGeom.buffer(10,2).boundingBox(), True, True)
#layer2.select(inGeom.buffer(10,2).boundingBox(), False)
#joinList = layer2.selectedFeatures()
joinList = index.intersects(inGeom.buffer(10, 2).boundingBox())
if len(joinList) > 0:
check = 0
else:
check = 1
else:
#(check, joinList) = layer2.featuresInRectangle(inGeom.boundingBox(), True, True)
#layer2.select(inGeom.boundingBox(), False)
#joinList = layer2.selectedFeatures()
joinList = index.intersects(inGeom.boundingBox())
if len(joinList) > 0:
check = 0
else:
check = 1
if check == 0:
count = 0
for i in joinList:
inFeatB = mapP2[i] # cached feature from provider2
if inGeom.intersects(inFeatB.geometry()):
count = count + 1
none = False
atMap2 = inFeatB.attributes()
if not summary:
atMap = atMap1
atMap2 = atMap2
atMap.extend(atMap2)
atMap = dict(zip(seq, atMap))
break
else:
for j in numFields.keys():
numFields[j].append(atMap2[j])
if summary and not none:
atMap = atMap1
for j in numFields.keys():
for k in sumList:
if k == "SUM":
atMap.append(sum(filter_null(numFields[j])))
elif k == "MEAN":
try:
nn_count = sum(1 for _ in filter_null(numFields[j]))
atMap.append(sum(filter_null(numFields[j])) / nn_count)
except ZeroDivisionError:
atMap.append(NULL)
elif k == "MIN":
try:
atMap.append(min(filter_null(numFields[j])))
except ValueError:
atMap.append(NULL)
elif k == "MED":
atMap.append(myself(numFields[j]))
else:
try:
atMap.append(max(filter_null(numFields[j])))
except ValueError:
atMap.append(NULL)
numFields[j] = []
atMap.append(count)
atMap = dict(zip(seq, atMap))
if none:
outFeat.setAttributes(atMap1)
else:
outFeat.setAttributes(atMap.values())
if keep: # keep all records
writer.addFeature(outFeat)
else: # keep only matching records
if not none:
writer.addFeature(outFeat)
start = start + add
progressBar.setValue(start)
del writer
return True
| gpl-2.0 |
z-plot/z-plot | examples/basics-svg/verticalbars.py | 1 | 2343 | #! /usr/bin/env python
from zplot import *
# populate zplot table from data file
t = table('verticalbars.data')
# create the postscript file we'll use as our canvas
canvas = svg('verticalbars.svg')
# on the x-axis, we want categories, not numbers. Thus, we
# determine the number of categories by checking the max
# "rownumber" (a field automatically added by zplot). We want a
# half bar width (0.5) to the left and right of the bar locations
# so we don't overflow the drawable.
d = drawable(canvas, xrange=[-0.5,t.getmax('rownumber')+0.5], yrange=[0,80])
# xmanual is a list of the form [(label1,x1), (label2,x2), ...].
# We want to use the "op" field from the data file as our labels
# and use "rownumber" as our x coordinate.
axis(d, xtitle='Operation', xmanual=t.query(select='op,rownumber'),
ytitle='Latency (ms)', yauto=[0,80,20])
# we are going to create several bars with similar arguments. One
# easy way to do this is to put all the arguments in a dict, and
# use Python's special syntax ("**") for using the dict as named
# args. Then we can tweak the args between each call to
# verticalbars.
#
# yfield determines the bar height, and stackfields determines
# where the bottom of a bar starts. This is useful for showing
# several bar sections to indicate a breakdown. After the first
# bar, we append the previous yfield to stackfields to stack the bars.
p = plotter()
L = legend()
barargs = {'drawable':d, 'table':t, 'xfield':'rownumber',
'linewidth':0.5, 'fill':True, 'barwidth':0.8,
'legend':L, 'stackfields':[]}
# compute bar
barargs['yfield'] = 'compute'
barargs['legendtext'] = 'CPU'
barargs['fillcolor'] = 'red'
p.verticalbars(**barargs)
# network bar
barargs['stackfields'].append(barargs['yfield'])
barargs['yfield'] = 'network'
barargs['legendtext'] = 'Net'
barargs['fillcolor'] = 'green'
p.verticalbars(**barargs)
# storage bar
barargs['stackfields'].append(barargs['yfield'])
barargs['yfield'] = 'storage'
barargs['legendtext'] = 'Disk'
barargs['fillcolor'] = 'blue'
p.verticalbars(**barargs)
# we want legend entries to be all on one line. Thus, we use
# skipnext=1 to get one row. We specify the horizontal space
# between legend symbols (not considering text) with skipspace.
L.draw(canvas, coord=[d.left()+30, d.top()-5], skipnext=1, skipspace=40)
canvas.render()
| bsd-3-clause |
jocave/snapcraft | snapcraft/_options.py | 3 | 4856 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import multiprocessing
import os
import platform
logger = logging.getLogger(__name__)
_ARCH_TRANSLATIONS = {
'armv7l': {
'kernel': 'arm',
'deb': 'armhf',
'cross-compiler-prefix': 'arm-linux-gnueabihf-',
'cross-build-packages': ['gcc-arm-linux-gnueabihf'],
'triplet': 'arm-linux-gnueabihf',
},
'aarch64': {
'kernel': 'arm64',
'deb': 'arm64',
'cross-compiler-prefix': 'aarch64-linux-gnu-',
'cross-build-packages': ['gcc-aarch64-linux-gnu'],
'triplet': 'aarch64-linux-gnu',
},
'i686': {
'kernel': 'x86',
'deb': 'i386',
'triplet': 'i386-linux-gnu',
},
'ppc64le': {
'kernel': 'powerpc',
'deb': 'ppc64el',
'cross-compiler-prefix': 'powerpc64le-linux-gnu-',
'cross-build-packages': ['gcc-powerpc64le-linux-gnu'],
'triplet': 'powerpc64le-linux-gnu',
},
'x86_64': {
'kernel': 'x86',
'deb': 'amd64',
'triplet': 'x86_64-linux-gnu',
},
's390x': {
'kernel': 's390x',
'deb': 's390x',
'cross-compiler-prefix': 's390x-linux-gnu-',
'cross-build-packages': ['gcc-s390x-linux-gnu'],
'triplet': 's390x-linux-gnu',
}
}
class ProjectOptions:
@property
def use_geoip(self):
return self.__use_geoip
@property
def parallel_builds(self):
return self.__parallel_builds
@property
def parallel_build_count(self):
build_count = 1
if self.__parallel_builds:
try:
build_count = multiprocessing.cpu_count()
except NotImplementedError:
logger.warning(
'Unable to determine CPU count; disabling parallel builds')
return build_count
@property
def is_cross_compiling(self):
return self.__target_machine != self.__host_machine
@property
def cross_compiler_prefix(self):
try:
return self.__machine_info['cross-compiler-prefix']
except KeyError:
raise EnvironmentError(
'Cross compilation not support for target arch {!}'.format(
self.__machine_target))
@property
def additional_build_packages(self):
packages = []
if self.is_cross_compiling:
packages.extend(self.__machine_info.get(
'cross-build-packages', []))
return packages
@property
def arch_triplet(self):
return self.__machine_info['triplet']
@property
def deb_arch(self):
return self.__machine_info['deb']
@property
def kernel_arch(self):
return self.__machine_info['kernel']
@property
def local_plugins_dir(self):
return os.path.join(self.parts_dir, 'plugins')
@property
def parts_dir(self):
return os.path.join(self.__project_dir, 'parts')
@property
def stage_dir(self):
return os.path.join(self.__project_dir, 'stage')
@property
def snap_dir(self):
return os.path.join(self.__project_dir, 'prime')
def __init__(self, use_geoip=False, parallel_builds=True,
target_deb_arch=None):
# TODO: allow setting a different project dir and check for
# snapcraft.yaml
self.__project_dir = os.getcwd()
self.__use_geoip = use_geoip
self.__parallel_builds = parallel_builds
self._set_machine(target_deb_arch)
def _set_machine(self, target_deb_arch):
self.__host_machine = platform.machine()
if not target_deb_arch:
self.__target_machine = self.__host_machine
else:
self.__target_machine = _find_machine(target_deb_arch)
logger.info('Setting target machine to {!r}'.format(
target_deb_arch))
self.__machine_info = _ARCH_TRANSLATIONS[self.__target_machine]
def _find_machine(deb_arch):
for machine in _ARCH_TRANSLATIONS:
if _ARCH_TRANSLATIONS[machine].get('deb', '') == deb_arch:
return machine
raise EnvironmentError(
'Cannot set machine from deb_arch {!r}'.format(deb_arch))
| gpl-3.0 |
svfat/django-docs | setup.py | 2 | 1415 | import os
from setuptools import setup, find_packages
def read_file(filename):
"""Read a file into a string"""
path = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(path, filename)
try:
return open(filepath).read()
except IOError:
return ''
try:
REQUIREMENTS = read_file('requirements.txt').splitlines()
except:
REQUIREMENTS = [
'Django',
]
setup(
name='django-docs',
version=__import__('docs').__version__,
author='Evgeny Demchenko',
author_email='[email protected]',
packages=find_packages(),
include_package_data=True,
url='https://github.com/littlepea/django-docs',
license='BSD',
description=u' '.join(__import__('docs').__doc__.splitlines()).strip(),
classifiers=[
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Framework :: Django',
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
],
long_description=read_file('README.rst'),
test_suite='docs.tests.runtests.runtests',
tests_require=[
'django-nose',
'coverage',
'django-coverage',
],
zip_safe=False,
install_requires=REQUIREMENTS,
)
| bsd-3-clause |
ChrisBeaumont/luigi | test/date_interval_test.py | 13 | 5774 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
from helpers import unittest
import luigi
from luigi.parameter import DateIntervalParameter as DI
class DateIntervalTest(unittest.TestCase):
def test_date(self):
di = DI().parse('2012-01-01')
self.assertEqual(di.dates(), [datetime.date(2012, 1, 1)])
self.assertEqual(di.next().dates(), [datetime.date(2012, 1, 2)])
self.assertEqual(di.prev().dates(), [datetime.date(2011, 12, 31)])
self.assertEqual(str(di), '2012-01-01')
def test_month(self):
di = DI().parse('2012-01')
self.assertEqual(di.dates(), [datetime.date(2012, 1, 1) + datetime.timedelta(i) for i in range(31)])
self.assertEqual(di.next().dates(), [datetime.date(2012, 2, 1) + datetime.timedelta(i) for i in range(29)])
self.assertEqual(di.prev().dates(), [datetime.date(2011, 12, 1) + datetime.timedelta(i) for i in range(31)])
self.assertEqual(str(di), '2012-01')
def test_year(self):
di = DI().parse('2012')
self.assertEqual(di.dates(), [datetime.date(2012, 1, 1) + datetime.timedelta(i) for i in range(366)])
self.assertEqual(di.next().dates(), [datetime.date(2013, 1, 1) + datetime.timedelta(i) for i in range(365)])
self.assertEqual(di.prev().dates(), [datetime.date(2011, 1, 1) + datetime.timedelta(i) for i in range(365)])
self.assertEqual(str(di), '2012')
def test_week(self):
# >>> datetime.date(2012, 1, 1).isocalendar()
# (2011, 52, 7)
# >>> datetime.date(2012, 12, 31).isocalendar()
# (2013, 1, 1)
di = DI().parse('2011-W52')
self.assertEqual(di.dates(), [datetime.date(2011, 12, 26) + datetime.timedelta(i) for i in range(7)])
self.assertEqual(di.next().dates(), [datetime.date(2012, 1, 2) + datetime.timedelta(i) for i in range(7)])
self.assertEqual(str(di), '2011-W52')
di = DI().parse('2013-W01')
self.assertEqual(di.dates(), [datetime.date(2012, 12, 31) + datetime.timedelta(i) for i in range(7)])
self.assertEqual(di.prev().dates(), [datetime.date(2012, 12, 24) + datetime.timedelta(i) for i in range(7)])
self.assertEqual(str(di), '2013-W01')
def test_interval(self):
di = DI().parse('2012-01-01-2012-02-01')
self.assertEqual(di.dates(), [datetime.date(2012, 1, 1) + datetime.timedelta(i) for i in range(31)])
self.assertRaises(NotImplementedError, di.next)
self.assertRaises(NotImplementedError, di.prev)
self.assertEquals(di.to_string(), '2012-01-01-2012-02-01')
def test_exception(self):
self.assertRaises(ValueError, DI().parse, 'xyz')
def test_comparison(self):
a = DI().parse('2011')
b = DI().parse('2013')
c = DI().parse('2012')
self.assertTrue(a < b)
self.assertTrue(a < c)
self.assertTrue(b > c)
d = DI().parse('2012')
self.assertTrue(d == c)
self.assertEqual(d, min(c, b))
self.assertEqual(3, len(set([a, b, c, d])))
def test_comparison_different_types(self):
x = DI().parse('2012')
y = DI().parse('2012-01-01-2013-01-01')
self.assertRaises(TypeError, lambda: x == y)
def test_parameter_parse_and_default(self):
month = luigi.date_interval.Month(2012, 11)
other = luigi.date_interval.Month(2012, 10)
class MyTask(luigi.Task):
di = DI(default=month)
class MyTaskNoDefault(luigi.Task):
di = DI()
task = luigi.interface._ArgParseInterface().parse(["MyTask"])[0]
self.assertEqual(task.di, month)
task = luigi.interface._ArgParseInterface().parse(["MyTask", "--di", "2012-10"])[0]
self.assertEqual(task.di, other)
task = MyTask(month)
self.assertEqual(task.di, month)
task = MyTask(di=month)
self.assertEqual(task.di, month)
task = MyTask(other)
self.assertNotEquals(task.di, month)
def fail1():
luigi.interface._ArgParseInterface().parse(["MyTaskNoDefault"])[0]
self.assertRaises(luigi.parameter.MissingParameterException, fail1)
task = luigi.interface._ArgParseInterface().parse(["MyTaskNoDefault", "--di", "2012-10"])[0]
self.assertEqual(task.di, other)
def test_hours(self):
d = DI().parse('2015')
self.assertEquals(len(list(d.hours())), 24 * 365)
def test_cmp(self):
operators = [lambda x, y: x == y,
lambda x, y: x != y,
lambda x, y: x < y,
lambda x, y: x > y,
lambda x, y: x <= y,
lambda x, y: x >= y]
dates = [(1, 30, DI().parse('2015-01-01-2015-01-30')),
(1, 15, DI().parse('2015-01-01-2015-01-15')),
(10, 20, DI().parse('2015-01-10-2015-01-20')),
(20, 30, DI().parse('2015-01-20-2015-01-30'))]
for from_a, to_a, di_a in dates:
for from_b, to_b, di_b in dates:
for op in operators:
self.assertEquals(
op((from_a, to_a), (from_b, to_b)),
op(di_a, di_b))
| apache-2.0 |
cs-au-dk/Artemis | WebKit/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py | 1 | 7183 | # Copyright (c) 2010, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.net.layouttestresults import LayoutTestResults, ORWTResultsHTMLParser
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.models import test_failures
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
class ORWTResultsHTMLParserTest(unittest.TestCase):
_example_results_html = """
<html>
<head>
<title>Layout Test Results</title>
</head>
<body>
<p>Tests that had stderr output:</p>
<table>
<tr>
<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/accessibility/aria-activedescendant-crash.html">accessibility/aria-activedescendant-crash.html</a></td>
<td><a href="accessibility/aria-activedescendant-crash-stderr.txt">stderr</a></td>
</tr>
<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/http/tests/security/canvas-remote-read-svg-image.html">http/tests/security/canvas-remote-read-svg-image.html</a></td>
<td><a href="http/tests/security/canvas-remote-read-svg-image-stderr.txt">stderr</a></td>
</tr>
</table><p>Tests that had no expected results (probably new):</p>
<table>
<tr>
<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/fast/repaint/no-caret-repaint-in-non-content-editable-element.html">fast/repaint/no-caret-repaint-in-non-content-editable-element.html</a></td>
<td><a href="fast/repaint/no-caret-repaint-in-non-content-editable-element-actual.txt">result</a></td>
</tr>
</table></body>
</html>
"""
_example_results_html_with_failing_tests = """
<html>
<head>
<title>Layout Test Results</title>
</head>
<body>
<p>Tests where results did not match expected results:</p>
<table>
<tr>
<td><a href="http://trac.webkit.org/export/91245/trunk/LayoutTests/compositing/plugins/composited-plugin.html">compositing/plugins/composited-plugin.html</a></td>
<td>
<a href="compositing/plugins/composited-plugin-expected.txt">expected</a>
</td>
<td>
<a href="compositing/plugins/composited-plugin-actual.txt">actual</a>
</td>
<td>
<a href="compositing/plugins/composited-plugin-diffs.txt">diff</a>
</td>
<td>
<a href="compositing/plugins/composited-plugin-pretty-diff.html">pretty diff</a>
</td>
</tr>
</table>
<p>Tests that had stderr output:</p>
<table>
<tr>
<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/accessibility/aria-activedescendant-crash.html">accessibility/aria-activedescendant-crash.html</a></td>
<td><a href="accessibility/aria-activedescendant-crash-stderr.txt">stderr</a></td>
</tr>
<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/http/tests/security/canvas-remote-read-svg-image.html">http/tests/security/canvas-remote-read-svg-image.html</a></td>
<td><a href="http/tests/security/canvas-remote-read-svg-image-stderr.txt">stderr</a></td>
</tr>
</table><p>Tests that had no expected results (probably new):</p>
<table>
<tr>
<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/fast/repaint/no-caret-repaint-in-non-content-editable-element.html">fast/repaint/no-caret-repaint-in-non-content-editable-element.html</a></td>
<td><a href="fast/repaint/no-caret-repaint-in-non-content-editable-element-actual.txt">result</a></td>
</tr>
</table></body>
</html>
"""
def test_parse_layout_test_results(self):
failures = [test_failures.FailureMissingResult(), test_failures.FailureMissingImageHash(), test_failures.FailureMissingImage()]
testname = 'fast/repaint/no-caret-repaint-in-non-content-editable-element.html'
expected_results = [test_results.TestResult(testname, failures)]
results = ORWTResultsHTMLParser.parse_results_html(self._example_results_html)
self.assertEqual(expected_results, results)
def test_failures_from_fail_row(self):
row = BeautifulSoup("<tr><td><a>test.hml</a></td><td><a>expected image</a></td><td><a>25%</a></td></tr>")
test_name = unicode(row.find("a").string)
# Even if the caller has already found the test name, findAll inside _failures_from_fail_row will see it again.
failures = OutputCapture().assert_outputs(self, ORWTResultsHTMLParser._failures_from_fail_row, [row])
self.assertEqual(len(failures), 1)
self.assertEqual(type(sorted(failures)[0]), test_failures.FailureImageHashMismatch)
row = BeautifulSoup("<tr><td><a>test.hml</a><a>foo</a></td></tr>")
expected_stderr = "Unhandled link text in results.html parsing: foo. Please file a bug against webkitpy.\n"
OutputCapture().assert_outputs(self, ORWTResultsHTMLParser._failures_from_fail_row, [row], expected_stderr=expected_stderr)
class LayoutTestResultsTest(unittest.TestCase):
def test_set_failure_limit_count(self):
results = LayoutTestResults([])
self.assertEquals(results.failure_limit_count(), None)
results.set_failure_limit_count(10)
self.assertEquals(results.failure_limit_count(), 10)
def test_results_from_string(self):
self.assertEqual(LayoutTestResults.results_from_string(None), None)
self.assertEqual(LayoutTestResults.results_from_string(""), None)
results = LayoutTestResults.results_from_string(ORWTResultsHTMLParserTest._example_results_html)
self.assertEqual(len(results.failing_tests()), 0)
def test_tests_matching_failure_types(self):
results = LayoutTestResults.results_from_string(ORWTResultsHTMLParserTest._example_results_html_with_failing_tests)
failing_tests = results.tests_matching_failure_types([test_failures.FailureTextMismatch])
self.assertEqual(len(results.failing_tests()), 1)
| gpl-3.0 |
Chitrank-Dixit/django-fcm | setup.py | 1 | 1550 | import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-fcm',
version='0.1.1',
packages=find_packages(),
include_package_data=True,
description='A Django package that enables sending messages using FCM (Firebase Cloud Messaging).',
long_description=README,
url='https://django-fcm.readthedocs.io/en/latest/',
author='Chitrank Dixit',
author_email='[email protected]',
zip_safe=False,
license='MIT License',
platforms=['any'],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.10', # replace "X.Y" as appropriate
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
install_requires=[
'django>=1.9',
'djangorestframework>=3.3.2',
'pytz>=2015.7',
'requests>=2.9.1'
],
)
| mit |
kennedyshead/home-assistant | tests/components/zha/conftest.py | 2 | 7337 | """Test configuration for the ZHA component."""
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
import pytest
import zigpy
from zigpy.application import ControllerApplication
import zigpy.config
import zigpy.group
import zigpy.types
from homeassistant.components.zha import DOMAIN
import homeassistant.components.zha.core.const as zha_const
import homeassistant.components.zha.core.device as zha_core_device
from homeassistant.setup import async_setup_component
from .common import FakeDevice, FakeEndpoint, get_zha_gateway
from tests.common import MockConfigEntry
from tests.components.light.conftest import mock_light_profiles # noqa: F401
FIXTURE_GRP_ID = 0x1001
FIXTURE_GRP_NAME = "fixture group"
@pytest.fixture
def zigpy_app_controller():
"""Zigpy ApplicationController fixture."""
app = MagicMock(spec_set=ControllerApplication)
app.startup = AsyncMock()
app.shutdown = AsyncMock()
groups = zigpy.group.Groups(app)
groups.add_group(FIXTURE_GRP_ID, FIXTURE_GRP_NAME, suppress_event=True)
app.configure_mock(groups=groups)
type(app).ieee = PropertyMock()
app.ieee.return_value = zigpy.types.EUI64.convert("00:15:8d:00:02:32:4f:32")
type(app).nwk = PropertyMock(return_value=zigpy.types.NWK(0x0000))
type(app).devices = PropertyMock(return_value={})
return app
@pytest.fixture(name="config_entry")
async def config_entry_fixture(hass):
"""Fixture representing a config entry."""
entry = MockConfigEntry(
version=2,
domain=zha_const.DOMAIN,
data={
zigpy.config.CONF_DEVICE: {zigpy.config.CONF_DEVICE_PATH: "/dev/ttyUSB0"},
zha_const.CONF_RADIO_TYPE: "ezsp",
},
options={
zha_const.CUSTOM_CONFIGURATION: {
zha_const.ZHA_ALARM_OPTIONS: {
zha_const.CONF_ALARM_ARM_REQUIRES_CODE: False,
zha_const.CONF_ALARM_MASTER_CODE: "4321",
zha_const.CONF_ALARM_FAILED_TRIES: 2,
}
}
},
)
entry.add_to_hass(hass)
return entry
@pytest.fixture
def setup_zha(hass, config_entry, zigpy_app_controller):
"""Set up ZHA component."""
zha_config = {zha_const.CONF_ENABLE_QUIRKS: False}
p1 = patch(
"bellows.zigbee.application.ControllerApplication.new",
return_value=zigpy_app_controller,
)
async def _setup(config=None):
config = config or {}
with p1:
status = await async_setup_component(
hass, zha_const.DOMAIN, {zha_const.DOMAIN: {**zha_config, **config}}
)
assert status is True
await hass.async_block_till_done()
return _setup
@pytest.fixture
def channel():
"""Channel mock factory fixture."""
def channel(name: str, cluster_id: int, endpoint_id: int = 1):
ch = MagicMock()
ch.name = name
ch.generic_id = f"channel_0x{cluster_id:04x}"
ch.id = f"{endpoint_id}:0x{cluster_id:04x}"
ch.async_configure = AsyncMock()
ch.async_initialize = AsyncMock()
return ch
return channel
@pytest.fixture
def zigpy_device_mock(zigpy_app_controller):
"""Make a fake device using the specified cluster classes."""
def _mock_dev(
endpoints,
ieee="00:0d:6f:00:0a:90:69:e7",
manufacturer="FakeManufacturer",
model="FakeModel",
node_descriptor=b"\x02@\x807\x10\x7fd\x00\x00*d\x00\x00",
nwk=0xB79C,
patch_cluster=True,
):
"""Make a fake device using the specified cluster classes."""
device = FakeDevice(
zigpy_app_controller, ieee, manufacturer, model, node_descriptor, nwk=nwk
)
for epid, ep in endpoints.items():
endpoint = FakeEndpoint(manufacturer, model, epid)
endpoint.device = device
device.endpoints[epid] = endpoint
endpoint.device_type = ep["device_type"]
profile_id = ep.get("profile_id")
if profile_id:
endpoint.profile_id = profile_id
for cluster_id in ep.get("in_clusters", []):
endpoint.add_input_cluster(cluster_id, _patch_cluster=patch_cluster)
for cluster_id in ep.get("out_clusters", []):
endpoint.add_output_cluster(cluster_id, _patch_cluster=patch_cluster)
return device
return _mock_dev
@pytest.fixture
def zha_device_joined(hass, setup_zha):
"""Return a newly joined ZHA device."""
async def _zha_device(zigpy_dev):
await setup_zha()
zha_gateway = get_zha_gateway(hass)
await zha_gateway.async_device_initialized(zigpy_dev)
await hass.async_block_till_done()
return zha_gateway.get_device(zigpy_dev.ieee)
return _zha_device
@pytest.fixture
def zha_device_restored(hass, zigpy_app_controller, setup_zha, hass_storage):
"""Return a restored ZHA device."""
async def _zha_device(zigpy_dev, last_seen=None):
zigpy_app_controller.devices[zigpy_dev.ieee] = zigpy_dev
if last_seen is not None:
hass_storage[f"{DOMAIN}.storage"] = {
"key": f"{DOMAIN}.storage",
"version": 1,
"data": {
"devices": [
{
"ieee": str(zigpy_dev.ieee),
"last_seen": last_seen,
"name": f"{zigpy_dev.manufacturer} {zigpy_dev.model}",
}
],
},
}
await setup_zha()
zha_gateway = hass.data[zha_const.DATA_ZHA][zha_const.DATA_ZHA_GATEWAY]
return zha_gateway.get_device(zigpy_dev.ieee)
return _zha_device
@pytest.fixture(params=["zha_device_joined", "zha_device_restored"])
def zha_device_joined_restored(request):
"""Join or restore ZHA device."""
named_method = request.getfixturevalue(request.param)
named_method.name = request.param
return named_method
@pytest.fixture
def zha_device_mock(hass, zigpy_device_mock):
"""Return a zha Device factory."""
def _zha_device(
endpoints=None,
ieee="00:11:22:33:44:55:66:77",
manufacturer="mock manufacturer",
model="mock model",
node_desc=b"\x02@\x807\x10\x7fd\x00\x00*d\x00\x00",
patch_cluster=True,
):
if endpoints is None:
endpoints = {
1: {
"in_clusters": [0, 1, 8, 768],
"out_clusters": [0x19],
"device_type": 0x0105,
},
2: {
"in_clusters": [0],
"out_clusters": [6, 8, 0x19, 768],
"device_type": 0x0810,
},
}
zigpy_device = zigpy_device_mock(
endpoints, ieee, manufacturer, model, node_desc, patch_cluster=patch_cluster
)
zha_device = zha_core_device.ZHADevice(hass, zigpy_device, MagicMock())
return zha_device
return _zha_device
@pytest.fixture
def hass_disable_services(hass):
"""Mock service register."""
with patch.object(hass.services, "async_register"), patch.object(
hass.services, "has_service", return_value=True
):
yield hass
| apache-2.0 |
klynch/emacs.d-old | python-libs/ropemode/refactor.py | 23 | 15727 | import re
import rope.base.change
import rope.contrib.generate
import rope.refactor.change_signature
import rope.refactor.extract
import rope.refactor.inline
import rope.refactor.introduce_factory
import rope.refactor.method_object
import rope.refactor.move
import rope.refactor.rename
import rope.refactor.restructure
import rope.refactor.usefunction
from rope.base import taskhandle
from ropemode import dialog, filter
class Refactoring(object):
key = None
confs = {}
optionals = {}
saveall = True
def __init__(self, interface, env):
self.interface = interface
self.env = env
def show(self, initial_asking=True):
self.interface._check_project()
self.interface._save_buffers(only_current=not self.saveall)
self._create_refactoring()
action, result = dialog.show_dialog(
self.interface._askdata, ['perform', 'preview', 'cancel'],
self._get_confs(), self._get_optionals(),
initial_asking=initial_asking)
if action == 'cancel':
self.env.message('Cancelled!')
return
def calculate(handle):
return self._calculate_changes(result, handle)
name = 'Calculating %s changes' % self.name
changes = runtask(self.env, calculate, name=name)
if action == 'perform':
self._perform(changes)
if action == 'preview':
if changes is not None:
diffs = changes.get_description()
if self.env.preview_changes(diffs):
self._perform(changes)
else:
self.env.message('Thrown away!')
else:
self.env.message('No changes!')
@property
def project(self):
return self.interface.project
@property
def resource(self):
return self.interface._get_resource()
@property
def offset(self):
return self.env.get_offset()
@property
def region(self):
return self.env.get_region()
@property
def name(self):
return refactoring_name(self.__class__)
def _calculate_changes(self, option_values, task_handle):
pass
def _create_refactoring(self):
pass
def _done(self):
pass
def _perform(self, changes):
if changes is None:
self.env.message('No changes!')
return
def perform(handle, self=self, changes=changes):
self.project.do(changes, task_handle=handle)
self.interface._reload_buffers(changes)
self._done()
runtask(self.env, perform, 'Making %s changes' % self.name,
interrupts=False)
self.env.message(str(changes.description) + ' finished')
def _get_confs(self):
return self.confs
def _get_optionals(self):
return self.optionals
@property
def resources_option(self):
return dialog.Data('Files to apply this refactoring on: ',
decode=self._decode_resources)
def _decode_resources(self, value):
return _resources(self.project, value)
class Rename(Refactoring):
key = 'r'
saveall = True
def _create_refactoring(self):
self.renamer = rope.refactor.rename.Rename(
self.project, self.resource, self.offset)
def _calculate_changes(self, values, task_handle):
return self.renamer.get_changes(task_handle=task_handle, **values)
def _get_optionals(self):
opts = {}
opts['docs'] = dialog.Boolean('Search comments and docs: ', True)
if self.renamer.is_method():
opts['in_hierarchy'] = dialog.Boolean('Rename methods in '
'class hierarchy: ')
opts['resources'] = self.resources_option
opts['unsure'] = dialog.Data('Unsure occurrences: ',
decode=self._decode_unsure,
values=['ignore', 'match'],
default='ignore')
return opts
def _get_confs(self):
oldname = str(self.renamer.get_old_name())
return {'new_name': dialog.Data('New name: ', default=oldname)}
def _decode_unsure(self, value):
unsure = value == 'match'
return lambda occurrence: unsure
class RenameCurrentModule(Rename):
key = '1 r'
offset = None
class Restructure(Refactoring):
key = 'x'
confs = {'pattern': dialog.Data('Restructuring pattern: '),
'goal': dialog.Data('Restructuring goal: ')}
def _calculate_changes(self, values, task_handle):
restructuring = rope.refactor.restructure.Restructure(
self.project, values['pattern'], values['goal'],
args=values['args'], imports=values['imports'])
return restructuring.get_changes(resources=values['resources'],
task_handle=task_handle)
def _get_optionals(self):
return {
'args': dialog.Data('Arguments: ', decode=self._decode_args),
'imports': dialog.Data('Imports: ', decode=self._decode_imports),
'resources': self.resources_option}
def _decode_args(self, value):
if value:
args = {}
for raw_check in value.split('\n'):
if raw_check:
key, value = raw_check.split(':', 1)
args[key.strip()] = value.strip()
return args
def _decode_imports(self, value):
if value:
return [line.strip() for line in value.split('\n')]
class UseFunction(Refactoring):
key = 'u'
def _create_refactoring(self):
self.user = rope.refactor.usefunction.UseFunction(
self.project, self.resource, self.offset)
def _calculate_changes(self, values, task_handle):
return self.user.get_changes(task_handle=task_handle, **values)
def _get_optionals(self):
return {'resources': self.resources_option}
class Move(Refactoring):
key = 'v'
def _create_refactoring(self):
self.mover = rope.refactor.move.create_move(self.project,
self.resource,
self.offset)
def _calculate_changes(self, values, task_handle):
destination = values['destination']
resources = values.get('resources', None)
if isinstance(self.mover, rope.refactor.move.MoveGlobal):
return self._move_global(destination, resources, task_handle)
if isinstance(self.mover, rope.refactor.move.MoveModule):
return self._move_module(destination, resources, task_handle)
if isinstance(self.mover, rope.refactor.move.MoveMethod):
return self._move_method(destination, resources, task_handle)
def _move_global(self, dest, resources, handle):
destination = self.project.pycore.find_module(dest)
return self.mover.get_changes(
destination, resources=resources, task_handle=handle)
def _move_method(self, dest, resources, handle):
return self.mover.get_changes(
dest, self.mover.get_method_name(),
resources=resources, task_handle=handle)
def _move_module(self, dest, resources, handle):
destination = self.project.pycore.find_module(dest)
return self.mover.get_changes(
destination, resources=resources, task_handle=handle)
def _get_confs(self):
if isinstance(self.mover, rope.refactor.move.MoveGlobal):
prompt = 'Destination module: '
if isinstance(self.mover, rope.refactor.move.MoveModule):
prompt = 'Destination package: '
if isinstance(self.mover, rope.refactor.move.MoveMethod):
prompt = 'Destination attribute: '
return {'destination': dialog.Data(prompt)}
def _get_optionals(self):
return {'resources': self.resources_option}
class MoveCurrentModule(Move):
key = '1 v'
offset = None
class ModuleToPackage(Refactoring):
key = '1 p'
saveall = False
def _create_refactoring(self):
self.packager = rope.refactor.ModuleToPackage(
self.project, self.resource)
def _calculate_changes(self, values, task_handle):
return self.packager.get_changes()
class Inline(Refactoring):
key = 'i'
def _create_refactoring(self):
self.inliner = rope.refactor.inline.create_inline(
self.project, self.resource, self.offset)
def _calculate_changes(self, values, task_handle):
return self.inliner.get_changes(task_handle=task_handle, **values)
def _get_optionals(self):
opts = {'resources': self.resources_option}
if self.inliner.get_kind() == 'parameter':
opts['in_hierarchy'] = dialog.Boolean(
'Apply on all matching methods in class hierarchy: ', False)
else:
opts['remove'] = dialog.Boolean('Remove the definition: ', True)
opts['only_current'] = dialog.Boolean('Inline this '
'occurrence only: ')
return opts
class _Extract(Refactoring):
saveall = False
optionals = {'similar': dialog.Boolean('Extract similar pieces: ', True),
'global_': dialog.Boolean('Make global: ')}
kind = None
constructor = None
def _create_refactoring(self):
start, end = self.region
self.extractor = self.constructor(self.project,
self.resource, start, end)
def _calculate_changes(self, values, task_handle):
similar = values.get('similar')
global_ = values.get('global_')
return self.extractor.get_changes(values['name'], similar=similar,
global_=global_)
def _get_confs(self):
return {'name': dialog.Data('Extracted %s name: ' % self.kind)}
class ExtractVariable(_Extract):
key = 'l'
kind = 'variable'
constructor = rope.refactor.extract.ExtractVariable
class ExtractMethod(_Extract):
key = 'm'
kind = 'method'
constructor = rope.refactor.extract.ExtractMethod
class OrganizeImports(Refactoring):
key = 'o'
saveall = False
def _create_refactoring(self):
self.organizer = rope.refactor.ImportOrganizer(self.project)
def _calculate_changes(self, values, task_handle):
return self.organizer.organize_imports(self.resource)
class MethodObject(Refactoring):
saveall = False
confs = {'classname': dialog.Data('New class name: ',
default='_ExtractedClass')}
def _create_refactoring(self):
self.objecter = rope.refactor.method_object.MethodObject(
self.project, self.resource, self.offset)
def _calculate_changes(self, values, task_handle):
classname = values.get('classname')
return self.objecter.get_changes(classname)
class IntroduceFactory(Refactoring):
saveall = True
key = 'f'
def _create_refactoring(self):
self.factory = rope.refactor.introduce_factory.IntroduceFactory(
self.project, self.resource, self.offset)
def _calculate_changes(self, values, task_handle):
return self.factory.get_changes(task_handle=task_handle, **values)
def _get_confs(self):
default = 'create_%s' % self.factory.old_name.lower()
return {'factory_name': dialog.Data('Factory name: ', default)}
def _get_optionals(self):
return {'global_factory': dialog.Boolean('Make global: ', True),
'resources': self.resources_option}
class ChangeSignature(Refactoring):
saveall = True
key = 's'
def _create_refactoring(self):
self.changer = rope.refactor.change_signature.ChangeSignature(
self.project, self.resource, self.offset)
def _calculate_changes(self, values, task_handle):
signature = values.get('signature')
args = re.sub(r'[\s\(\)]+', '', signature).split(',')
olds = [arg[0] for arg in self._get_args()]
changers = []
for arg in list(olds):
if arg in args:
continue
changers.append(rope.refactor.change_signature.
ArgumentRemover(olds.index(arg)))
olds.remove(arg)
order = []
for index, arg in enumerate(args):
if arg not in olds:
changers.append(rope.refactor.change_signature.
ArgumentAdder(index, arg))
olds.insert(index, arg)
order.append(olds.index(arg))
changers.append(rope.refactor.change_signature.
ArgumentReorderer(order, autodef='None'))
del values['signature']
return self.changer.get_changes(changers, task_handle=task_handle,
**values)
def _get_args(self):
if hasattr(self.changer, 'get_args'):
return self.changer.get_args()
return self.changer.get_definition_info().args_with_defaults
def _get_confs(self):
args = []
for arg, default in self._get_args():
args.append(arg)
signature = '(' + ', '.join(args) + ')'
return {'signature': dialog.Data('Change the signature: ',
default=signature)}
def _get_optionals(self):
opts = {'resources': self.resources_option}
if self.changer.is_method():
opts['in_hierarchy'] = dialog.Boolean('Rename methods in '
'class hierarchy: ')
return opts
class _GenerateElement(Refactoring):
def _create_refactoring(self):
kind = self.name.split('_')[-1]
self.generator = rope.contrib.generate.create_generate(
kind, self.project, self.resource, self.offset)
def _calculate_changes(self, values, task_handle):
return self.generator.get_changes()
def _done(self):
resource, lineno = self.generator.get_location()
self.interface._goto_location(resource, lineno)
class GenerateVariable(_GenerateElement):
key = 'n v'
class GenerateFunction(_GenerateElement):
key = 'n f'
class GenerateClass(_GenerateElement):
key = 'n c'
class GenerateModule(_GenerateElement):
key = 'n m'
class GeneratePackage(_GenerateElement):
key = 'n p'
def refactoring_name(refactoring):
classname = refactoring.__name__
result = []
for c in classname:
if result and c.isupper():
result.append('_')
result.append(c.lower())
name = ''.join(result)
return name
def _resources(project, text):
if text is None or text.strip() == '':
return None
return filter.resources(project, text)
def runtask(env, command, name, interrupts=True):
return RunTask(env, command, name, interrupts)()
class RunTask(object):
def __init__(self, env, task, name, interrupts=True):
self.env = env
self.task = task
self.name = name
self.interrupts = interrupts
def __call__(self):
handle = taskhandle.TaskHandle(name=self.name)
progress = self.env.create_progress(self.name)
def update_progress():
jobset = handle.current_jobset()
if jobset:
percent = jobset.get_percent_done()
if percent is not None:
progress.update(percent)
handle.add_observer(update_progress)
result = self.task(handle)
progress.done()
return result
| gpl-3.0 |
BradAJ/zipflights | web_app/app/models.py | 1 | 5577 | from app import *
def ita_search(faa_orig, faa_dest, start_date, end_date, duration = None, out_constraints = None, return_constraints = None, month_search = True):
"""
faa_orig, faa_dest: FAA airport code strs e.g. 'SFO'
start_date, end_date: datetime objs e.g. datetime.date.today(), datetime.date(2015, 2, 28)
NOTE: start_date is used as departure date if NOT month_search, similarly for end_date
duration: int number of nights at destination e.g. 7. If None => One-way flight, SET duration = True for specificDate roundtrips!
out/return_constraints: ITA flags e.g. 'N' for nonstops, 'ORD' to transfer there, or 'UA+' for 1 or more United flights.
"""
search_url = 'http://matrix.itasoftware.com/xhr/shop/search'
payload_d = {"pax":{"adults":1},"cabin":"COACH","changeOfAirport":False,"checkAvailability":True,"firstDayOfWeek":"SUNDAY"}
trip_slice = {"originPreferCity":False,"destinationPreferCity":False, "isArrivalDate":False}
def apt_code_parser(codes_in):
return [codes_in] if type(codes_in) is not list else codes_in
outbound_d = trip_slice.copy()
outbound_d['origins'] = apt_code_parser(faa_orig)
outbound_d['destinations'] = apt_code_parser(faa_dest)
if out_constraints is not None:
outbound_d['routeLanguage'] = out_constraints
if month_search:
search_type = 'calendar&summarizers=itineraryCarrierList%2Ccalendar'
payload_d['startDate'] = start_date
payload_d['endDate'] = end_date
payload_d['layover'] = {"max":duration, "min":duration}
else:
search_type = 'specificDates&summarizers=solutionList%2CitineraryCarrierList%2CitineraryOrigins%2CitineraryDestinations'
outbound_d['date'] = start_date
outbound_d['dateModifier'] = {"minus":0, "plus":0}
if duration is not None:
return_d = trip_slice.copy()
return_d['origins'] = apt_code_parser(faa_dest)
return_d['destinations'] = apt_code_parser(faa_orig)
if return_constraints is not None:
return_d['routeLanguage'] = return_constraints
if not month_search:
return_d['date'] = end_date
return_d['dateModifier'] = {"minus":0, "plus":0}
payload_d['slices'] = [outbound_d, return_d]
else:
payload_d['slices'] = [outbound_d]
payload = urllib.quote_plus(json.dumps(payload_d))
url_start_search = 'http://matrix.itasoftware.com/xhr/shop/search?name='
return requests.post(url_start_search + search_type + '&format=JSON&inputs=' + payload)
def ita_response_airline_parse(response):
airline_fares = ita_response_d(response)['result']['itineraryCarrierList']['groups']
airlines = []
for fare in airline_fares:
if 'minPriceInSummary' in fare:
route_price = fare['minPrice']
airlines.append(fare['label']['shortName'])
return route_price, airlines
def ita_response_hidden_parse(response, faa_orig, faa_dest):
resp_d = ita_response_d(response)
flights_d = dict()
minprice = float(resp_d['result']['solutionList']['minPrice'].strip('USD'))
flights_d['minprice'] = minprice
for itin in resp_d['result']['solutionList']['solutions']:
flightprice = float(itin['displayTotal'].strip('USD'))
if flightprice <= (minprice + 1.0): #fixes sensitivity to cents.
for slic in itin['itinerary']['slices']:
flight = slic['flights'][0] #only interested in first flight here!
if flight not in flights_d:
result_d = dict()
result_d['carrier'] = itin['itinerary']['ext']['dominantCarrier']['shortName']
result_d['departing'] = slic['departure']
result_d['fake_dest'] = slic['destination']['code']
result_d['true_orig'] = slic['origin']['code']
if 'stops' in slic:
result_d['stops'] = slic['stops'][0]['code'] #Looking for non-stops only!
flights_d[flight] = result_d
flights_d['out_flights'] = set()
flights_d['back_flights'] = set()
flights_d['carriers'] = set()
for key in flights_d:
if type(flights_d[key]) is dict and 'true_orig' in flights_d[key]:
if faa_orig == flights_d[key]['true_orig']:
flights_d['out_flights'].add(key)
flights_d['carriers'].add(flights_d[key]['carrier'])
elif faa_dest == flights_d[key]['true_orig']:
flights_d['back_flights'].add(key)
flights_d['carriers'].add(flights_d[key]['carrier'])
flights_d['out_flights'] = sorted(list(flights_d['out_flights'])) if len(flights_d['out_flights']) != 0 else None
flights_d['back_flights'] = sorted(list(flights_d['back_flights'])) if len(flights_d['back_flights']) != 0 else None
return flights_d
def ita_response_d(response):
return json.loads(response.content[4:])
def date_obj_to_s(date_obj):
y = str(date_obj.year)
m = '0' + str(date_obj.month) if date_obj.month < 10 else str(date_obj.month)
d = '0' + str(date_obj.day) if date_obj.day < 10 else str(date_obj.day)
return y + '-' + m + '-' + d
def stars_from_price(delta_p, distrib_mean, distrib_std):
z_score = (delta_p - distrib_mean) / (1.0 * distrib_std)
if abs(z_score) <= (1./3.):
return 3
elif abs(z_score) < (4./3.):
return 3 + (1 if z_score >= 0 else -1)
else:
return 3 + (2 if z_score >= 0 else -2)
| mit |
onceuponatimeforever/oh-mainline | vendor/packages/Django/django/utils/safestring.py | 208 | 4185 | """
Functions for working with "safe strings": strings that can be displayed safely
without further escaping in HTML. Marking something as a "safe string" means
that the producer of the string has already turned characters that should not
be interpreted by the HTML engine (e.g. '<') into the appropriate entities.
"""
from django.utils.functional import curry, Promise
from django.utils import six
class EscapeData(object):
pass
class EscapeBytes(bytes, EscapeData):
"""
A byte string that should be HTML-escaped when output.
"""
pass
class EscapeText(six.text_type, EscapeData):
"""
A unicode string object that should be HTML-escaped when output.
"""
pass
if six.PY3:
EscapeString = EscapeText
else:
EscapeString = EscapeBytes
# backwards compatibility for Python 2
EscapeUnicode = EscapeText
class SafeData(object):
pass
class SafeBytes(bytes, SafeData):
"""
A bytes subclass that has been specifically marked as "safe" (requires no
further escaping) for HTML output purposes.
"""
def __add__(self, rhs):
"""
Concatenating a safe byte string with another safe byte string or safe
unicode string is safe. Otherwise, the result is no longer safe.
"""
t = super(SafeBytes, self).__add__(rhs)
if isinstance(rhs, SafeText):
return SafeText(t)
elif isinstance(rhs, SafeBytes):
return SafeBytes(t)
return t
def _proxy_method(self, *args, **kwargs):
"""
Wrap a call to a normal unicode method up so that we return safe
results. The method that is being wrapped is passed in the 'method'
argument.
"""
method = kwargs.pop('method')
data = method(self, *args, **kwargs)
if isinstance(data, bytes):
return SafeBytes(data)
else:
return SafeText(data)
decode = curry(_proxy_method, method=bytes.decode)
class SafeText(six.text_type, SafeData):
"""
A unicode (Python 2) / str (Python 3) subclass that has been specifically
marked as "safe" for HTML output purposes.
"""
def __add__(self, rhs):
"""
Concatenating a safe unicode string with another safe byte string or
safe unicode string is safe. Otherwise, the result is no longer safe.
"""
t = super(SafeText, self).__add__(rhs)
if isinstance(rhs, SafeData):
return SafeText(t)
return t
def _proxy_method(self, *args, **kwargs):
"""
Wrap a call to a normal unicode method up so that we return safe
results. The method that is being wrapped is passed in the 'method'
argument.
"""
method = kwargs.pop('method')
data = method(self, *args, **kwargs)
if isinstance(data, bytes):
return SafeBytes(data)
else:
return SafeText(data)
encode = curry(_proxy_method, method=six.text_type.encode)
if six.PY3:
SafeString = SafeText
else:
SafeString = SafeBytes
# backwards compatibility for Python 2
SafeUnicode = SafeText
def mark_safe(s):
"""
Explicitly mark a string as safe for (HTML) output purposes. The returned
object can be used everywhere a string or unicode object is appropriate.
Can be called multiple times on a single string.
"""
if isinstance(s, SafeData):
return s
if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes):
return SafeBytes(s)
if isinstance(s, (six.text_type, Promise)):
return SafeText(s)
return SafeString(str(s))
def mark_for_escaping(s):
"""
Explicitly mark a string as requiring HTML escaping upon output. Has no
effect on SafeData subclasses.
Can be called multiple times on a single string (the resulting escaping is
only applied once).
"""
if isinstance(s, (SafeData, EscapeData)):
return s
if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes):
return EscapeBytes(s)
if isinstance(s, (six.text_type, Promise)):
return EscapeText(s)
return EscapeBytes(bytes(s))
| agpl-3.0 |
vadimtk/chrome4sdp | native_client_sdk/src/build_tools/nacl-mono-builder.py | 51 | 5243 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import sys
import tarfile
import buildbot_common
from build_paths import SCRIPT_DIR
SDK_BUILD_DIR = SCRIPT_DIR
MONO_BUILD_DIR = os.path.join(SDK_BUILD_DIR, 'mono_build')
MONO_DIR = os.path.join(MONO_BUILD_DIR, 'nacl-mono')
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('--arch',
help='Target architecture',
dest='arch',
default='x86-32')
parser.add_argument('--sdk-revision',
help='SDK Revision'
' (default=buildbot revision)',
dest='sdk_revision',
default=None)
parser.add_argument('--sdk-url',
help='SDK Download URL',
dest='sdk_url',
default=None)
parser.add_argument('--install-dir',
help='Install Directory',
dest='install_dir',
default='naclmono')
options = parser.parse_args(args)
assert sys.platform.find('linux') != -1
buildbot_revision = os.environ.get('BUILDBOT_REVISION', '')
build_prefix = options.arch + ' '
buildbot_common.BuildStep(build_prefix + 'Clean Old SDK')
buildbot_common.MakeDir(MONO_BUILD_DIR)
buildbot_common.RemoveDir(os.path.join(MONO_BUILD_DIR, 'pepper_*'))
buildbot_common.BuildStep(build_prefix + 'Setup New SDK')
sdk_dir = None
sdk_revision = options.sdk_revision
sdk_url = options.sdk_url
if not sdk_url:
if not sdk_revision:
assert buildbot_revision
sdk_revision = buildbot_revision.split(':')[0]
sdk_url = 'gs://nativeclient-mirror/nacl/nacl_sdk/'\
'trunk.%s/naclsdk_linux.tar.bz2' % sdk_revision
sdk_url = sdk_url.replace('https://storage.googleapis.com/', 'gs://')
sdk_file = sdk_url.split('/')[-1]
buildbot_common.Run([buildbot_common.GetGsutil(), 'cp', sdk_url, sdk_file],
cwd=MONO_BUILD_DIR)
tar_file = None
try:
tar_file = tarfile.open(os.path.join(MONO_BUILD_DIR, sdk_file))
pepper_dir = os.path.commonprefix(tar_file.getnames())
tar_file.extractall(path=MONO_BUILD_DIR)
sdk_dir = os.path.join(MONO_BUILD_DIR, pepper_dir)
finally:
if tar_file:
tar_file.close()
assert sdk_dir
buildbot_common.BuildStep(build_prefix + 'Checkout Mono')
# TODO(elijahtaylor): Get git URL from master/trigger to make this
# more flexible for building from upstream and release branches.
if options.arch == 'arm':
git_url = 'git://github.com/igotti-google/mono.git'
git_rev = 'arm_nacl'
else:
git_url = 'git://github.com/elijahtaylor/mono.git'
git_rev = 'HEAD'
if buildbot_revision:
# Unfortunately, we use different git branches/revisions
# for ARM and x86 now, so ignore buildbot_revision variable for ARM.
# Need to rethink this approach, if we'll plan to support
# more flexible repo selection mechanism.
if options.arch != 'arm':
git_rev = buildbot_revision.split(':')[1]
# ARM and x86 is built out of different git trees, so distinguish
# them by appending the arch. It also makes 32 and 64 bit x86 separated,
# which is good.
# TODO(olonho): maybe we need to avoid modifications of global.
global MONO_DIR
tag = options.arch
MONO_DIR = "%s-%s" % (MONO_DIR, tag)
if not os.path.exists(MONO_DIR):
buildbot_common.MakeDir(MONO_DIR)
buildbot_common.Run(['git', 'clone', git_url, MONO_DIR])
else:
buildbot_common.Run(['git', 'fetch'], cwd=MONO_DIR)
if git_rev:
buildbot_common.Run(['git', 'checkout', git_rev], cwd=MONO_DIR)
arch_to_bitsize = {'x86-32': '32',
'x86-64': '64',
'arm': 'arm'}
arch_to_output_folder = {'x86-32': 'runtime-x86-32-build',
'x86-64': 'runtime-x86-64-build',
'arm': 'runtime-arm-build'}
buildbot_common.BuildStep(build_prefix + 'Configure Mono')
os.environ['NACL_SDK_ROOT'] = sdk_dir
os.environ['TARGET_ARCH'] = options.arch
os.environ['TARGET_BITSIZE'] = arch_to_bitsize[options.arch]
buildbot_common.Run(['./autogen.sh'], cwd=MONO_DIR)
buildbot_common.Run(['make', 'distclean'], cwd=MONO_DIR)
buildbot_common.BuildStep(build_prefix + 'Build and Install Mono')
nacl_interp_script = os.path.join(SDK_BUILD_DIR, 'nacl_interp_loader_mono.sh')
os.environ['NACL_INTERP_LOADER'] = nacl_interp_script
buildbot_common.Run(['./nacl-mono-runtime.sh',
MONO_DIR, # Mono directory with 'configure'
arch_to_output_folder[options.arch], # Build dir
options.install_dir],
cwd=SDK_BUILD_DIR)
# TODO(elijahtaylor,olonho): Re-enable tests on arm when they compile/run.
if options.arch != 'arm':
buildbot_common.BuildStep(build_prefix + 'Test Mono')
buildbot_common.Run(['make', 'check', '-j8'],
cwd=os.path.join(SDK_BUILD_DIR, arch_to_output_folder[options.arch]))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
Pablo126/SSBW | Entrega1/lib/python3.5/site-packages/pymongo/collection.py | 15 | 113009 | # Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection level utilities for Mongo."""
import collections
import datetime
import warnings
from bson.code import Code
from bson.objectid import ObjectId
from bson.py3compat import (_unicode,
integer_types,
string_type)
from bson.raw_bson import RawBSONDocument
from bson.codec_options import CodecOptions
from bson.son import SON
from pymongo import (common,
helpers,
message)
from pymongo.bulk import BulkOperationBuilder, _Bulk
from pymongo.command_cursor import CommandCursor
from pymongo.collation import validate_collation_or_none
from pymongo.cursor import Cursor
from pymongo.errors import ConfigurationError, InvalidName, OperationFailure
from pymongo.helpers import _check_write_command_response
from pymongo.helpers import _UNICODE_REPLACE_CODEC_OPTIONS
from pymongo.operations import _WriteOp, IndexModel
from pymongo.read_concern import DEFAULT_READ_CONCERN
from pymongo.read_preferences import ReadPreference
from pymongo.results import (BulkWriteResult,
DeleteResult,
InsertOneResult,
InsertManyResult,
UpdateResult)
from pymongo.write_concern import WriteConcern
try:
from collections import OrderedDict
_ORDERED_TYPES = (SON, OrderedDict)
except ImportError:
_ORDERED_TYPES = (SON,)
_NO_OBJ_ERROR = "No matching object found"
_UJOIN = u"%s.%s"
class ReturnDocument(object):
"""An enum used with
:meth:`~pymongo.collection.Collection.find_one_and_replace` and
:meth:`~pymongo.collection.Collection.find_one_and_update`.
"""
BEFORE = False
"""Return the original document before it was updated/replaced, or
``None`` if no document matches the query.
"""
AFTER = True
"""Return the updated/replaced or inserted document."""
class Collection(common.BaseObject):
"""A Mongo collection.
"""
def __init__(self, database, name, create=False, codec_options=None,
read_preference=None, write_concern=None, read_concern=None,
**kwargs):
"""Get / create a Mongo collection.
Raises :class:`TypeError` if `name` is not an instance of
:class:`basestring` (:class:`str` in python 3). Raises
:class:`~pymongo.errors.InvalidName` if `name` is not a valid
collection name. Any additional keyword arguments will be used
as options passed to the create command. See
:meth:`~pymongo.database.Database.create_collection` for valid
options.
If `create` is ``True``, `collation` is specified, or any additional
keyword arguments are present, a ``create`` command will be
sent. Otherwise, a ``create`` command will not be sent and the
collection will be created implicitly on first use.
:Parameters:
- `database`: the database to get a collection from
- `name`: the name of the collection to get
- `create` (optional): if ``True``, force collection
creation even without options being set
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) database.codec_options is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) database.read_preference is used.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) database.write_concern is used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) database.read_concern is used.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. If a collation is provided,
it will be passed to the create collection command. This option is
only supported on MongoDB 3.4 and above.
- `**kwargs` (optional): additional keyword arguments will
be passed as options for the create collection command
.. versionchanged:: 3.4
Support the `collation` option.
.. versionchanged:: 3.2
Added the read_concern option.
.. versionchanged:: 3.0
Added the codec_options, read_preference, and write_concern options.
Removed the uuid_subtype attribute.
:class:`~pymongo.collection.Collection` no longer returns an
instance of :class:`~pymongo.collection.Collection` for attribute
names with leading underscores. You must use dict-style lookups
instead::
collection['__my_collection__']
Not:
collection.__my_collection__
.. versionchanged:: 2.2
Removed deprecated argument: options
.. versionadded:: 2.1
uuid_subtype attribute
.. mongodoc:: collections
"""
super(Collection, self).__init__(
codec_options or database.codec_options,
read_preference or database.read_preference,
write_concern or database.write_concern,
read_concern or database.read_concern)
if not isinstance(name, string_type):
raise TypeError("name must be an instance "
"of %s" % (string_type.__name__,))
if not name or ".." in name:
raise InvalidName("collection names cannot be empty")
if "$" in name and not (name.startswith("oplog.$main") or
name.startswith("$cmd")):
raise InvalidName("collection names must not "
"contain '$': %r" % name)
if name[0] == "." or name[-1] == ".":
raise InvalidName("collection names must not start "
"or end with '.': %r" % name)
if "\x00" in name:
raise InvalidName("collection names must not contain the "
"null character")
collation = validate_collation_or_none(kwargs.pop('collation', None))
self.__database = database
self.__name = _unicode(name)
self.__full_name = _UJOIN % (self.__database.name, self.__name)
if create or kwargs or collation:
self.__create(kwargs, collation)
self.__write_response_codec_options = self.codec_options._replace(
unicode_decode_error_handler='replace',
document_class=dict)
def _socket_for_reads(self):
return self.__database.client._socket_for_reads(self.read_preference)
def _socket_for_primary_reads(self):
return self.__database.client._socket_for_reads(ReadPreference.PRIMARY)
def _socket_for_writes(self):
return self.__database.client._socket_for_writes()
def _command(self, sock_info, command, slave_ok=False,
read_preference=None,
codec_options=None, check=True, allowable_errors=None,
read_concern=DEFAULT_READ_CONCERN,
write_concern=None,
parse_write_concern_error=False,
collation=None):
"""Internal command helper.
:Parameters:
- `sock_info` - A SocketInfo instance.
- `command` - The command itself, as a SON instance.
- `slave_ok`: whether to set the SlaveOkay wire protocol bit.
- `codec_options` (optional) - An instance of
:class:`~bson.codec_options.CodecOptions`.
- `check`: raise OperationFailure if there are errors
- `allowable_errors`: errors to ignore if `check` is True
- `read_concern` (optional) - An instance of
:class:`~pymongo.read_concern.ReadConcern`.
- `write_concern`: An instance of
:class:`~pymongo.write_concern.WriteConcern`. This option is only
valid for MongoDB 3.4 and above.
- `parse_write_concern_error` (optional): Whether to parse a
``writeConcernError`` field in the command response.
- `collation` (optional) - An instance of
:class:`~pymongo.collation.Collation`.
:Returns:
# todo: don't return address
(result document, address of server the command was run on)
"""
return sock_info.command(
self.__database.name,
command,
slave_ok,
read_preference or self.read_preference,
codec_options or self.codec_options,
check,
allowable_errors,
read_concern=read_concern,
write_concern=write_concern,
parse_write_concern_error=parse_write_concern_error,
collation=collation)
def __create(self, options, collation):
"""Sends a create command with the given options.
"""
cmd = SON([("create", self.__name)])
if options:
if "size" in options:
options["size"] = float(options["size"])
cmd.update(options)
with self._socket_for_writes() as sock_info:
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
write_concern=self.write_concern,
parse_write_concern_error=True,
collation=collation)
def __getattr__(self, name):
"""Get a sub-collection of this collection by name.
Raises InvalidName if an invalid collection name is used.
:Parameters:
- `name`: the name of the collection to get
"""
if name.startswith('_'):
full_name = _UJOIN % (self.__name, name)
raise AttributeError(
"Collection has no attribute %r. To access the %s"
" collection, use database['%s']." % (
name, full_name, full_name))
return self.__getitem__(name)
def __getitem__(self, name):
return Collection(self.__database, _UJOIN % (self.__name, name))
def __repr__(self):
return "Collection(%r, %r)" % (self.__database, self.__name)
def __eq__(self, other):
if isinstance(other, Collection):
return (self.__database == other.database and
self.__name == other.name)
return NotImplemented
def __ne__(self, other):
return not self == other
@property
def full_name(self):
"""The full name of this :class:`Collection`.
The full name is of the form `database_name.collection_name`.
"""
return self.__full_name
@property
def name(self):
"""The name of this :class:`Collection`."""
return self.__name
@property
def database(self):
"""The :class:`~pymongo.database.Database` that this
:class:`Collection` is a part of.
"""
return self.__database
def with_options(
self, codec_options=None, read_preference=None,
write_concern=None, read_concern=None):
"""Get a clone of this collection changing the specified settings.
>>> coll1.read_preference
Primary()
>>> from pymongo import ReadPreference
>>> coll2 = coll1.with_options(read_preference=ReadPreference.SECONDARY)
>>> coll1.read_preference
Primary()
>>> coll2.read_preference
Secondary(tag_sets=None)
:Parameters:
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`Collection`
is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`Collection` is used. See :mod:`~pymongo.read_preferences`
for options.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`Collection`
is used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) the :attr:`read_concern` of this :class:`Collection`
is used.
"""
return Collection(self.__database,
self.__name,
False,
codec_options or self.codec_options,
read_preference or self.read_preference,
write_concern or self.write_concern,
read_concern or self.read_concern)
def initialize_unordered_bulk_op(self, bypass_document_validation=False):
"""Initialize an unordered batch of write operations.
Operations will be performed on the server in arbitrary order,
possibly in parallel. All operations will be attempted.
:Parameters:
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance.
See :ref:`unordered_bulk` for examples.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 2.7
"""
return BulkOperationBuilder(self, False, bypass_document_validation)
def initialize_ordered_bulk_op(self, bypass_document_validation=False):
"""Initialize an ordered batch of write operations.
Operations will be performed on the server serially, in the
order provided. If an error occurs all remaining operations
are aborted.
:Parameters:
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance.
See :ref:`ordered_bulk` for examples.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 2.7
"""
return BulkOperationBuilder(self, True, bypass_document_validation)
def bulk_write(self, requests, ordered=True,
bypass_document_validation=False):
"""Send a batch of write operations to the server.
Requests are passed as a list of write operation instances (
:class:`~pymongo.operations.InsertOne`,
:class:`~pymongo.operations.UpdateOne`,
:class:`~pymongo.operations.UpdateMany`,
:class:`~pymongo.operations.ReplaceOne`,
:class:`~pymongo.operations.DeleteOne`, or
:class:`~pymongo.operations.DeleteMany`).
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634ef')}
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
>>> # DeleteMany, UpdateOne, and UpdateMany are also available.
...
>>> from pymongo import InsertOne, DeleteOne, ReplaceOne
>>> requests = [InsertOne({'y': 1}), DeleteOne({'x': 1}),
... ReplaceOne({'w': 1}, {'z': 1}, upsert=True)]
>>> result = db.test.bulk_write(requests)
>>> result.inserted_count
1
>>> result.deleted_count
1
>>> result.modified_count
0
>>> result.upserted_ids
{2: ObjectId('54f62ee28891e756a6e1abd5')}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
{u'y': 1, u'_id': ObjectId('54f62ee2fba5226811f634f1')}
{u'z': 1, u'_id': ObjectId('54f62ee28891e756a6e1abd5')}
:Parameters:
- `requests`: A list of write operations (see examples above).
- `ordered` (optional): If ``True`` (the default) requests will be
performed on the server serially, in the order provided. If an error
occurs all remaining operations are aborted. If ``False`` requests
will be performed on the server in arbitrary order, possibly in
parallel, and all operations will be attempted.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
:Returns:
An instance of :class:`~pymongo.results.BulkWriteResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
if not isinstance(requests, list):
raise TypeError("requests must be a list")
blk = _Bulk(self, ordered, bypass_document_validation)
for request in requests:
if not isinstance(request, _WriteOp):
raise TypeError("%r is not a valid request" % (request,))
request._add_to_bulk(blk)
bulk_api_result = blk.execute(self.write_concern.document)
if bulk_api_result is not None:
return BulkWriteResult(bulk_api_result, True)
return BulkWriteResult({}, False)
def _legacy_write(self, sock_info, name, cmd, acknowledged, op_id,
bypass_doc_val, func, *args):
"""Internal legacy write helper."""
# Cannot have both unacknowledged write and bypass document validation.
if (bypass_doc_val and not acknowledged and
sock_info.max_wire_version >= 4):
raise OperationFailure("Cannot set bypass_document_validation with"
" unacknowledged write concern")
listeners = self.database.client._event_listeners
publish = listeners.enabled_for_commands
if publish:
start = datetime.datetime.now()
rqst_id, msg, max_size = func(*args)
if publish:
duration = datetime.datetime.now() - start
listeners.publish_command_start(
cmd, self.__database.name, rqst_id, sock_info.address, op_id)
start = datetime.datetime.now()
try:
result = sock_info.legacy_write(
rqst_id, msg, max_size, acknowledged)
except Exception as exc:
if publish:
dur = (datetime.datetime.now() - start) + duration
if isinstance(exc, OperationFailure):
details = exc.details
# Succeed if GLE was successful and this is a write error.
if details.get("ok") and "n" in details:
reply = message._convert_write_result(
name, cmd, details)
listeners.publish_command_success(
dur, reply, name, rqst_id, sock_info.address, op_id)
raise
else:
details = message._convert_exception(exc)
listeners.publish_command_failure(
dur, details, name, rqst_id, sock_info.address, op_id)
raise
if publish:
if result is not None:
reply = message._convert_write_result(name, cmd, result)
else:
# Comply with APM spec.
reply = {'ok': 1}
duration = (datetime.datetime.now() - start) + duration
listeners.publish_command_success(
duration, reply, name, rqst_id, sock_info.address, op_id)
return result
def _insert_one(
self, sock_info, doc, ordered,
check_keys, manipulate, write_concern, op_id, bypass_doc_val):
"""Internal helper for inserting a single document."""
if manipulate:
doc = self.__database._apply_incoming_manipulators(doc, self)
if not isinstance(doc, RawBSONDocument) and '_id' not in doc:
doc['_id'] = ObjectId()
doc = self.__database._apply_incoming_copying_manipulators(doc,
self)
concern = (write_concern or self.write_concern).document
acknowledged = concern.get("w") != 0
command = SON([('insert', self.name),
('ordered', ordered),
('documents', [doc])])
if concern:
command['writeConcern'] = concern
if sock_info.max_wire_version > 1 and acknowledged:
if bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
# Insert command.
result = sock_info.command(
self.__database.name,
command,
codec_options=self.__write_response_codec_options,
check_keys=check_keys)
_check_write_command_response([(0, result)])
else:
# Legacy OP_INSERT.
self._legacy_write(
sock_info, 'insert', command, acknowledged, op_id,
bypass_doc_val, message.insert, self.__full_name, [doc],
check_keys, acknowledged, concern, False,
self.__write_response_codec_options)
if not isinstance(doc, RawBSONDocument):
return doc.get('_id')
def _insert(self, sock_info, docs, ordered=True, check_keys=True,
manipulate=False, write_concern=None, op_id=None,
bypass_doc_val=False):
"""Internal insert helper."""
if isinstance(docs, collections.Mapping):
return self._insert_one(
sock_info, docs, ordered,
check_keys, manipulate, write_concern, op_id, bypass_doc_val)
ids = []
if manipulate:
def gen():
"""Generator that applies SON manipulators to each document
and adds _id if necessary.
"""
_db = self.__database
for doc in docs:
# Apply user-configured SON manipulators. This order of
# operations is required for backwards compatibility,
# see PYTHON-709.
doc = _db._apply_incoming_manipulators(doc, self)
if not (isinstance(doc, RawBSONDocument) or '_id' in doc):
doc['_id'] = ObjectId()
doc = _db._apply_incoming_copying_manipulators(doc, self)
ids.append(doc['_id'])
yield doc
else:
def gen():
"""Generator that only tracks existing _ids."""
for doc in docs:
# Don't inflate RawBSONDocument by touching fields.
if not isinstance(doc, RawBSONDocument):
ids.append(doc.get('_id'))
yield doc
concern = (write_concern or self.write_concern).document
acknowledged = concern.get("w") != 0
command = SON([('insert', self.name),
('ordered', ordered)])
if concern:
command['writeConcern'] = concern
if op_id is None:
op_id = message._randint()
if bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
bwc = message._BulkWriteContext(
self.database.name, command, sock_info, op_id,
self.database.client._event_listeners)
if sock_info.max_wire_version > 1 and acknowledged:
# Batched insert command.
results = message._do_batched_write_command(
self.database.name + ".$cmd", message._INSERT, command,
gen(), check_keys, self.__write_response_codec_options, bwc)
_check_write_command_response(results)
else:
# Legacy batched OP_INSERT.
message._do_batched_insert(self.__full_name, gen(), check_keys,
acknowledged, concern, not ordered,
self.__write_response_codec_options, bwc)
return ids
def insert_one(self, document, bypass_document_validation=False):
"""Insert a single document.
>>> db.test.count({'x': 1})
0
>>> result = db.test.insert_one({'x': 1})
>>> result.inserted_id
ObjectId('54f112defba522406c9cc208')
>>> db.test.find_one({'x': 1})
{u'x': 1, u'_id': ObjectId('54f112defba522406c9cc208')}
:Parameters:
- `document`: The document to insert. Must be a mutable mapping
type. If the document does not have an _id field one will be
added automatically.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
:Returns:
- An instance of :class:`~pymongo.results.InsertOneResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_document_type("document", document)
if not (isinstance(document, RawBSONDocument) or "_id" in document):
document["_id"] = ObjectId()
with self._socket_for_writes() as sock_info:
return InsertOneResult(
self._insert(sock_info, document,
bypass_doc_val=bypass_document_validation),
self.write_concern.acknowledged)
def insert_many(self, documents, ordered=True,
bypass_document_validation=False):
"""Insert an iterable of documents.
>>> db.test.count()
0
>>> result = db.test.insert_many([{'x': i} for i in range(2)])
>>> result.inserted_ids
[ObjectId('54f113fffba522406c9cc20e'), ObjectId('54f113fffba522406c9cc20f')]
>>> db.test.count()
2
:Parameters:
- `documents`: A iterable of documents to insert.
- `ordered` (optional): If ``True`` (the default) documents will be
inserted on the server serially, in the order provided. If an error
occurs all remaining inserts are aborted. If ``False``, documents
will be inserted on the server in arbitrary order, possibly in
parallel, and all document inserts will be attempted.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
:Returns:
An instance of :class:`~pymongo.results.InsertManyResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
if not isinstance(documents, collections.Iterable) or not documents:
raise TypeError("documents must be a non-empty list")
inserted_ids = []
def gen():
"""A generator that validates documents and handles _ids."""
for document in documents:
common.validate_is_document_type("document", document)
if not isinstance(document, RawBSONDocument):
if "_id" not in document:
document["_id"] = ObjectId()
inserted_ids.append(document["_id"])
yield (message._INSERT, document)
blk = _Bulk(self, ordered, bypass_document_validation)
blk.ops = [doc for doc in gen()]
blk.execute(self.write_concern.document)
return InsertManyResult(inserted_ids, self.write_concern.acknowledged)
def _update(self, sock_info, criteria, document, upsert=False,
check_keys=True, multi=False, manipulate=False,
write_concern=None, op_id=None, ordered=True,
bypass_doc_val=False, collation=None):
"""Internal update / replace helper."""
common.validate_boolean("upsert", upsert)
if manipulate:
document = self.__database._fix_incoming(document, self)
collation = validate_collation_or_none(collation)
concern = (write_concern or self.write_concern).document
acknowledged = concern.get("w") != 0
update_doc = SON([('q', criteria),
('u', document),
('multi', multi),
('upsert', upsert)])
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
elif not acknowledged:
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
else:
update_doc['collation'] = collation
command = SON([('update', self.name),
('ordered', ordered),
('updates', [update_doc])])
if concern:
command['writeConcern'] = concern
if sock_info.max_wire_version > 1 and acknowledged:
# Update command.
if bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
# The command result has to be published for APM unmodified
# so we make a shallow copy here before adding updatedExisting.
result = sock_info.command(
self.__database.name,
command,
codec_options=self.__write_response_codec_options).copy()
_check_write_command_response([(0, result)])
# Add the updatedExisting field for compatibility.
if result.get('n') and 'upserted' not in result:
result['updatedExisting'] = True
else:
result['updatedExisting'] = False
# MongoDB >= 2.6.0 returns the upsert _id in an array
# element. Break it out for backward compatibility.
if 'upserted' in result:
result['upserted'] = result['upserted'][0]['_id']
return result
else:
# Legacy OP_UPDATE.
return self._legacy_write(
sock_info, 'update', command, acknowledged, op_id,
bypass_doc_val, message.update, self.__full_name, upsert,
multi, criteria, document, acknowledged, concern, check_keys,
self.__write_response_codec_options)
def replace_one(self, filter, replacement, upsert=False,
bypass_document_validation=False, collation=None):
"""Replace a single document matching the filter.
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')}
>>> result = db.test.replace_one({'x': 1}, {'y': 1})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find({}):
... print(doc)
...
{u'y': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')}
The *upsert* option can be used to insert a new document if a matching
document does not exist.
>>> result = db.test.replace_one({'x': 1}, {'x': 1}, True)
>>> result.matched_count
0
>>> result.modified_count
0
>>> result.upserted_id
ObjectId('54f11e5c8891e756a6e1abd4')
>>> db.test.find_one({'x': 1})
{u'x': 1, u'_id': ObjectId('54f11e5c8891e756a6e1abd4')}
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The new document.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_replace(replacement)
with self._socket_for_writes() as sock_info:
result = self._update(sock_info, filter, replacement, upsert,
bypass_doc_val=bypass_document_validation,
collation=collation)
return UpdateResult(result, self.write_concern.acknowledged)
def update_one(self, filter, update, upsert=False,
bypass_document_validation=False,
collation=None):
"""Update a single document matching the filter.
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> result = db.test.update_one({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 4, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_update(update)
with self._socket_for_writes() as sock_info:
result = self._update(sock_info, filter, update, upsert,
check_keys=False,
bypass_doc_val=bypass_document_validation,
collation=collation)
return UpdateResult(result, self.write_concern.acknowledged)
def update_many(self, filter, update, upsert=False,
bypass_document_validation=False, collation=None):
"""Update one or more documents that match the filter.
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> result = db.test.update_many({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
3
>>> result.modified_count
3
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 4, u'_id': 0}
{u'x': 4, u'_id': 1}
{u'x': 4, u'_id': 2}
:Parameters:
- `filter`: A query that matches the documents to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation` (optional): If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_update(update)
with self._socket_for_writes() as sock_info:
result = self._update(sock_info, filter, update, upsert,
check_keys=False, multi=True,
bypass_doc_val=bypass_document_validation,
collation=collation)
return UpdateResult(result, self.write_concern.acknowledged)
def drop(self):
"""Alias for :meth:`~pymongo.database.Database.drop_collection`.
The following two calls are equivalent:
>>> db.foo.drop()
>>> db.drop_collection("foo")
"""
self.__database.drop_collection(self.__name)
def _delete(
self, sock_info, criteria, multi,
write_concern=None, op_id=None, ordered=True,
collation=None):
"""Internal delete helper."""
common.validate_is_mapping("filter", criteria)
concern = (write_concern or self.write_concern).document
acknowledged = concern.get("w") != 0
delete_doc = SON([('q', criteria),
('limit', int(not multi))])
collation = validate_collation_or_none(collation)
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
elif not acknowledged:
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
else:
delete_doc['collation'] = collation
command = SON([('delete', self.name),
('ordered', ordered),
('deletes', [delete_doc])])
if concern:
command['writeConcern'] = concern
if sock_info.max_wire_version > 1 and acknowledged:
# Delete command.
result = sock_info.command(
self.__database.name,
command,
codec_options=self.__write_response_codec_options)
_check_write_command_response([(0, result)])
return result
else:
# Legacy OP_DELETE.
return self._legacy_write(
sock_info, 'delete', command, acknowledged, op_id,
False, message.delete, self.__full_name, criteria,
acknowledged, concern, self.__write_response_codec_options,
int(not multi))
def delete_one(self, filter, collation=None):
"""Delete a single document matching the filter.
>>> db.test.count({'x': 1})
3
>>> result = db.test.delete_one({'x': 1})
>>> result.deleted_count
1
>>> db.test.count({'x': 1})
2
:Parameters:
- `filter`: A query that matches the document to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
:Returns:
- An instance of :class:`~pymongo.results.DeleteResult`.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
with self._socket_for_writes() as sock_info:
return DeleteResult(self._delete(sock_info, filter, False,
collation=collation),
self.write_concern.acknowledged)
def delete_many(self, filter, collation=None):
"""Delete one or more documents matching the filter.
>>> db.test.count({'x': 1})
3
>>> result = db.test.delete_many({'x': 1})
>>> result.deleted_count
3
>>> db.test.count({'x': 1})
0
:Parameters:
- `filter`: A query that matches the documents to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
:Returns:
- An instance of :class:`~pymongo.results.DeleteResult`.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
with self._socket_for_writes() as sock_info:
return DeleteResult(self._delete(sock_info, filter, True,
collation=collation),
self.write_concern.acknowledged)
def find_one(self, filter=None, *args, **kwargs):
"""Get a single document from the database.
All arguments to :meth:`find` are also valid arguments for
:meth:`find_one`, although any `limit` argument will be
ignored. Returns a single document, or ``None`` if no matching
document is found.
The :meth:`find_one` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `filter` (optional): a dictionary specifying
the query to be performed OR any other type to be used as
the value for a query for ``"_id"``.
- `*args` (optional): any additional positional arguments
are the same as the arguments to :meth:`find`.
- `**kwargs` (optional): any additional keyword arguments
are the same as the arguments to :meth:`find`.
- `max_time_ms` (optional): a value for max_time_ms may be
specified as part of `**kwargs`, e.g.
>>> find_one(max_time_ms=100)
"""
if (filter is not None and not
isinstance(filter, collections.Mapping)):
filter = {"_id": filter}
max_time_ms = kwargs.pop("max_time_ms", None)
cursor = self.find(filter,
*args, **kwargs).max_time_ms(max_time_ms)
for result in cursor.limit(-1):
return result
return None
def find(self, *args, **kwargs):
"""Query the database.
The `filter` argument is a prototype document that all results
must match. For example:
>>> db.test.find({"hello": "world"})
only matches documents that have a key "hello" with value
"world". Matches can have other keys *in addition* to
"hello". The `projection` argument is used to specify a subset
of fields that should be included in the result documents. By
limiting results to a certain subset of fields you can cut
down on network traffic and decoding time.
Raises :class:`TypeError` if any of the arguments are of
improper type. Returns an instance of
:class:`~pymongo.cursor.Cursor` corresponding to this query.
The :meth:`find` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `filter` (optional): a SON object specifying elements which
must be present for a document to be included in the
result set
- `projection` (optional): a list of field names that should be
returned in the result set or a dict specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a dict to exclude fields from
the result (e.g. projection={'_id': False}).
- `skip` (optional): the number of documents to omit (from
the start of the result set) when returning the results
- `limit` (optional): the maximum number of results to
return
- `no_cursor_timeout` (optional): if False (the default), any
returned cursor is closed by the server after 10 minutes of
inactivity. If set to True, the returned cursor will never
time out on the server. Care should be taken to ensure that
cursors with no_cursor_timeout turned on are properly closed.
- `cursor_type` (optional): the type of cursor to return. The valid
options are defined by :class:`~pymongo.cursor.CursorType`:
- :attr:`~pymongo.cursor.CursorType.NON_TAILABLE` - the result of
this find call will return a standard cursor over the result set.
- :attr:`~pymongo.cursor.CursorType.TAILABLE` - the result of this
find call will be a tailable cursor - tailable cursors are only
for use with capped collections. They are not closed when the
last data is retrieved but are kept open and the cursor location
marks the final document position. If more data is received
iteration of the cursor will continue from the last document
received. For details, see the `tailable cursor documentation
<http://www.mongodb.org/display/DOCS/Tailable+Cursors>`_.
- :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` - the result
of this find call will be a tailable cursor with the await flag
set. The server will wait for a few seconds after returning the
full result set so that it can capture and return additional data
added during the query.
- :attr:`~pymongo.cursor.CursorType.EXHAUST` - the result of this
find call will be an exhaust cursor. MongoDB will stream batched
results to the client without waiting for the client to request
each batch, reducing latency. See notes on compatibility below.
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for this query. See
:meth:`~pymongo.cursor.Cursor.sort` for details.
- `allow_partial_results` (optional): if True, mongos will return
partial results if some shards are down instead of returning an
error.
- `oplog_replay` (optional): If True, set the oplogReplay query
flag.
- `modifiers` (optional): A dict specifying the MongoDB `query
modifiers`_ that should be used for this query. For example::
>>> db.test.find(modifiers={"$maxTimeMS": 500})
- `batch_size` (optional): Limits the number of documents returned in
a single batch.
- `manipulate` (optional): **DEPRECATED** - If True (the default),
apply any outgoing SON manipulators before returning.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
.. note:: There are a number of caveats to using
:attr:`~pymongo.cursor.CursorType.EXHAUST` as cursor_type:
- The `limit` option can not be used with an exhaust cursor.
- Exhaust cursors are not supported by mongos and can not be
used with a sharded cluster.
- A :class:`~pymongo.cursor.Cursor` instance created with the
:attr:`~pymongo.cursor.CursorType.EXHAUST` cursor_type requires an
exclusive :class:`~socket.socket` connection to MongoDB. If the
:class:`~pymongo.cursor.Cursor` is discarded without being
completely iterated the underlying :class:`~socket.socket`
connection will be closed and discarded without being returned to
the connection pool.
.. versionchanged:: 3.4
Support the `collation` option.
.. versionchanged:: 3.0
Changed the parameter names `spec`, `fields`, `timeout`, and
`partial` to `filter`, `projection`, `no_cursor_timeout`, and
`allow_partial_results` respectively.
Added the `cursor_type`, `oplog_replay`, and `modifiers` options.
Removed the `network_timeout`, `read_preference`, `tag_sets`,
`secondary_acceptable_latency_ms`, `max_scan`, `snapshot`,
`tailable`, `await_data`, `exhaust`, `as_class`, and slave_okay
parameters. Removed `compile_re` option: PyMongo now always
represents BSON regular expressions as :class:`~bson.regex.Regex`
objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to
convert from a BSON regular expression to a Python regular
expression object. Soft deprecated the `manipulate` option.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. versionadded:: 2.3
The `tag_sets` and `secondary_acceptable_latency_ms` parameters.
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
.. _query modifiers:
http://docs.mongodb.org/manual/reference/operator/query-modifier/
.. mongodoc:: find
"""
return Cursor(self, *args, **kwargs)
def parallel_scan(self, num_cursors, **kwargs):
"""Scan this entire collection in parallel.
Returns a list of up to ``num_cursors`` cursors that can be iterated
concurrently. As long as the collection is not modified during
scanning, each document appears once in one of the cursors result
sets.
For example, to process each document in a collection using some
thread-safe ``process_document()`` function:
>>> def process_cursor(cursor):
... for document in cursor:
... # Some thread-safe processing function:
... process_document(document)
>>>
>>> # Get up to 4 cursors.
...
>>> cursors = collection.parallel_scan(4)
>>> threads = [
... threading.Thread(target=process_cursor, args=(cursor,))
... for cursor in cursors]
>>>
>>> for thread in threads:
... thread.start()
>>>
>>> for thread in threads:
... thread.join()
>>>
>>> # All documents have now been processed.
The :meth:`parallel_scan` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `num_cursors`: the number of cursors to return
- `**kwargs`: additional options for the parallelCollectionScan
command can be passed as keyword arguments.
.. note:: Requires server version **>= 2.5.5**.
.. versionchanged:: 3.4
Added back support for arbitrary keyword arguments. MongoDB 3.4
adds support for maxTimeMS as an option to the
parallelCollectionScan command.
.. versionchanged:: 3.0
Removed support for arbitrary keyword arguments, since
the parallelCollectionScan command has no optional arguments.
"""
cmd = SON([('parallelCollectionScan', self.__name),
('numCursors', num_cursors)])
cmd.update(kwargs)
with self._socket_for_reads() as (sock_info, slave_ok):
result = self._command(sock_info, cmd, slave_ok,
read_concern=self.read_concern)
return [CommandCursor(self, cursor['cursor'], sock_info.address)
for cursor in result['cursors']]
def _count(self, cmd, collation=None):
"""Internal count helper."""
with self._socket_for_reads() as (sock_info, slave_ok):
res = self._command(
sock_info, cmd, slave_ok,
allowable_errors=["ns missing"],
codec_options=self.__write_response_codec_options,
read_concern=self.read_concern,
collation=collation)
if res.get("errmsg", "") == "ns missing":
return 0
return int(res["n"])
def count(self, filter=None, **kwargs):
"""Get the number of documents in this collection.
All optional count parameters should be passed as keyword arguments
to this method. Valid options include:
- `hint` (string or list of tuples): The index to use. Specify either
the index name as a string or the index specification as a list of
tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]).
- `limit` (int): The maximum number of documents to count.
- `skip` (int): The number of matching documents to skip before
returning results.
- `maxTimeMS` (int): The maximum amount of time to allow the count
command to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
The :meth:`count` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `filter` (optional): A query document that selects which documents
to count in the collection.
- `**kwargs` (optional): See list of options above.
.. versionchanged:: 3.4
Support the `collation` option.
"""
cmd = SON([("count", self.__name)])
if filter is not None:
if "query" in kwargs:
raise ConfigurationError("can't pass both filter and query")
kwargs["query"] = filter
if "hint" in kwargs and not isinstance(kwargs["hint"], string_type):
kwargs["hint"] = helpers._index_document(kwargs["hint"])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
return self._count(cmd, collation)
def create_indexes(self, indexes):
"""Create one or more indexes on this collection.
>>> from pymongo import IndexModel, ASCENDING, DESCENDING
>>> index1 = IndexModel([("hello", DESCENDING),
... ("world", ASCENDING)], name="hello_world")
>>> index2 = IndexModel([("goodbye", DESCENDING)])
>>> db.test.create_indexes([index1, index2])
["hello_world"]
:Parameters:
- `indexes`: A list of :class:`~pymongo.operations.IndexModel`
instances.
.. note:: `create_indexes` uses the ``createIndexes`` command
introduced in MongoDB **2.6** and cannot be used with earlier
versions.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. versionadded:: 3.0
"""
if not isinstance(indexes, list):
raise TypeError("indexes must be a list")
names = []
def gen_indexes():
for index in indexes:
if not isinstance(index, IndexModel):
raise TypeError("%r is not an instance of "
"pymongo.operations.IndexModel" % (index,))
document = index.document
names.append(document["name"])
yield document
cmd = SON([('createIndexes', self.name),
('indexes', list(gen_indexes()))])
with self._socket_for_writes() as sock_info:
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
write_concern=self.write_concern,
parse_write_concern_error=True)
return names
def __create_index(self, keys, index_options):
"""Internal create index helper.
:Parameters:
- `keys`: a list of tuples [(key, type), (key, type), ...]
- `index_options`: a dict of index options.
"""
index_doc = helpers._index_document(keys)
index = {"key": index_doc}
collation = validate_collation_or_none(
index_options.pop('collation', None))
index.update(index_options)
with self._socket_for_writes() as sock_info:
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
else:
index['collation'] = collation
cmd = SON([('createIndexes', self.name), ('indexes', [index])])
try:
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
write_concern=self.write_concern,
parse_write_concern_error=True)
except OperationFailure as exc:
if exc.code in common.COMMAND_NOT_FOUND_CODES:
index["ns"] = self.__full_name
wcn = (self.write_concern if
self.write_concern.acknowledged else WriteConcern())
self.__database.system.indexes._insert(
sock_info, index, True, False, False, wcn)
else:
raise
def create_index(self, keys, **kwargs):
"""Creates an index on this collection.
Takes either a single key or a list of (key, direction) pairs.
The key(s) must be an instance of :class:`basestring`
(:class:`str` in python 3), and the direction(s) must be one of
(:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`,
:data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`,
:data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`,
:data:`~pymongo.TEXT`).
To create a single key ascending index on the key ``'mike'`` we just
use a string argument::
>>> my_collection.create_index("mike")
For a compound index on ``'mike'`` descending and ``'eliot'``
ascending we need to use a list of tuples::
>>> my_collection.create_index([("mike", pymongo.DESCENDING),
... ("eliot", pymongo.ASCENDING)])
All optional index creation parameters should be passed as
keyword arguments to this method. For example::
>>> my_collection.create_index([("mike", pymongo.DESCENDING)],
... background=True)
Valid options include, but are not limited to:
- `name`: custom name to use for this index - if none is
given, a name will be generated.
- `unique`: if ``True`` creates a uniqueness constraint on the index.
- `background`: if ``True`` this index should be created in the
background.
- `sparse`: if ``True``, omit from the index any documents that lack
the indexed field.
- `bucketSize`: for use with geoHaystack indexes.
Number of documents to group together within a certain proximity
to a given longitude and latitude.
- `min`: minimum value for keys in a :data:`~pymongo.GEO2D`
index.
- `max`: maximum value for keys in a :data:`~pymongo.GEO2D`
index.
- `expireAfterSeconds`: <int> Used to create an expiring (TTL)
collection. MongoDB will automatically delete documents from
this collection after <int> seconds. The indexed field must
be a UTC datetime or the data will not expire.
- `partialFilterExpression`: A document that specifies a filter for
a partial index.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
See the MongoDB documentation for a full list of supported options by
server version.
.. warning:: `dropDups` is not supported by MongoDB 3.0 or newer. The
option is silently ignored by the server and unique index builds
using the option will fail if a duplicate value is detected.
.. note:: `partialFilterExpression` requires server version **>= 3.2**
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
:Parameters:
- `keys`: a single key or a list of (key, direction)
pairs specifying the index to create
- `**kwargs` (optional): any additional index creation
options (see the above list) should be passed as keyword
arguments
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4. Support the `collation` option.
.. versionchanged:: 3.2
Added partialFilterExpression to support partial indexes.
.. versionchanged:: 3.0
Renamed `key_or_list` to `keys`. Removed the `cache_for` option.
:meth:`create_index` no longer caches index names. Removed support
for the drop_dups and bucket_size aliases.
.. mongodoc:: indexes
"""
keys = helpers._index_list(keys)
name = kwargs.setdefault("name", helpers._gen_index_name(keys))
self.__create_index(keys, kwargs)
return name
def ensure_index(self, key_or_list, cache_for=300, **kwargs):
"""**DEPRECATED** - Ensures that an index exists on this collection.
.. versionchanged:: 3.0
**DEPRECATED**
"""
warnings.warn("ensure_index is deprecated. Use create_index instead.",
DeprecationWarning, stacklevel=2)
# The types supported by datetime.timedelta.
if not (isinstance(cache_for, integer_types) or
isinstance(cache_for, float)):
raise TypeError("cache_for must be an integer or float.")
if "drop_dups" in kwargs:
kwargs["dropDups"] = kwargs.pop("drop_dups")
if "bucket_size" in kwargs:
kwargs["bucketSize"] = kwargs.pop("bucket_size")
keys = helpers._index_list(key_or_list)
name = kwargs.setdefault("name", helpers._gen_index_name(keys))
# Note that there is a race condition here. One thread could
# check if the index is cached and be preempted before creating
# and caching the index. This means multiple threads attempting
# to create the same index concurrently could send the index
# to the server two or more times. This has no practical impact
# other than wasted round trips.
if not self.__database.client._cached(self.__database.name,
self.__name, name):
self.__create_index(keys, kwargs)
self.__database.client._cache_index(self.__database.name,
self.__name, name, cache_for)
return name
return None
def drop_indexes(self):
"""Drops all indexes on this collection.
Can be used on non-existant collections or collections with no indexes.
Raises OperationFailure on an error.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
self.__database.client._purge_index(self.__database.name, self.__name)
self.drop_index("*")
def drop_index(self, index_or_name):
"""Drops the specified index on this collection.
Can be used on non-existant collections or collections with no
indexes. Raises OperationFailure on an error (e.g. trying to
drop an index that does not exist). `index_or_name`
can be either an index name (as returned by `create_index`),
or an index specifier (as passed to `create_index`). An index
specifier should be a list of (key, direction) pairs. Raises
TypeError if index is not an instance of (str, unicode, list).
.. warning::
if a custom name was used on index creation (by
passing the `name` parameter to :meth:`create_index` or
:meth:`ensure_index`) the index **must** be dropped by name.
:Parameters:
- `index_or_name`: index (or name of index) to drop
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
name = index_or_name
if isinstance(index_or_name, list):
name = helpers._gen_index_name(index_or_name)
if not isinstance(name, string_type):
raise TypeError("index_or_name must be an index name or list")
self.__database.client._purge_index(
self.__database.name, self.__name, name)
cmd = SON([("dropIndexes", self.__name), ("index", name)])
with self._socket_for_writes() as sock_info:
self._command(sock_info,
cmd,
read_preference=ReadPreference.PRIMARY,
allowable_errors=["ns not found"],
write_concern=self.write_concern,
parse_write_concern_error=True)
def reindex(self):
"""Rebuilds all indexes on this collection.
.. warning:: reindex blocks all other operations (indexes
are built in the foreground) and will be slow for large
collections.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
cmd = SON([("reIndex", self.__name)])
with self._socket_for_writes() as sock_info:
return self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
write_concern=self.write_concern,
parse_write_concern_error=True)
def list_indexes(self):
"""Get a cursor over the index documents for this collection.
>>> for index in db.test.list_indexes():
... print(index)
...
SON([(u'v', 1), (u'key', SON([(u'_id', 1)])),
(u'name', u'_id_'), (u'ns', u'test.test')])
:Returns:
An instance of :class:`~pymongo.command_cursor.CommandCursor`.
.. versionadded:: 3.0
"""
codec_options = CodecOptions(SON)
coll = self.with_options(codec_options)
with self._socket_for_primary_reads() as (sock_info, slave_ok):
cmd = SON([("listIndexes", self.__name), ("cursor", {})])
if sock_info.max_wire_version > 2:
cursor = self._command(sock_info, cmd, slave_ok,
ReadPreference.PRIMARY,
codec_options)["cursor"]
return CommandCursor(coll, cursor, sock_info.address)
else:
namespace = _UJOIN % (self.__database.name, "system.indexes")
res = helpers._first_batch(
sock_info, self.__database.name, "system.indexes",
{"ns": self.__full_name}, 0, slave_ok, codec_options,
ReadPreference.PRIMARY, cmd,
self.database.client._event_listeners)
data = res["data"]
cursor = {
"id": res["cursor_id"],
"firstBatch": data,
"ns": namespace,
}
# Note that a collection can only have 64 indexes, so we don't
# technically have to pass len(data) here. There will never be
# an OP_GET_MORE call.
return CommandCursor(
coll, cursor, sock_info.address, len(data))
def index_information(self):
"""Get information on this collection's indexes.
Returns a dictionary where the keys are index names (as
returned by create_index()) and the values are dictionaries
containing information about each index. The dictionary is
guaranteed to contain at least a single key, ``"key"`` which
is a list of (key, direction) pairs specifying the index (as
passed to create_index()). It will also contain any other
metadata about the indexes, except for the ``"ns"`` and
``"name"`` keys, which are cleaned. Example output might look
like this:
>>> db.test.ensure_index("x", unique=True)
u'x_1'
>>> db.test.index_information()
{u'_id_': {u'key': [(u'_id', 1)]},
u'x_1': {u'unique': True, u'key': [(u'x', 1)]}}
"""
cursor = self.list_indexes()
info = {}
for index in cursor:
index["key"] = index["key"].items()
index = dict(index)
info[index.pop("name")] = index
return info
def options(self):
"""Get the options set on this collection.
Returns a dictionary of options and their values - see
:meth:`~pymongo.database.Database.create_collection` for more
information on the possible options. Returns an empty
dictionary if the collection has not been created yet.
"""
with self._socket_for_primary_reads() as (sock_info, slave_ok):
if sock_info.max_wire_version > 2:
criteria = {"name": self.__name}
else:
criteria = {"name": self.__full_name}
cursor = self.__database._list_collections(sock_info,
slave_ok,
criteria)
result = None
for doc in cursor:
result = doc
break
if not result:
return {}
options = result.get("options", {})
if "create" in options:
del options["create"]
return options
def aggregate(self, pipeline, **kwargs):
"""Perform an aggregation using the aggregation framework on this
collection.
All optional aggregate parameters should be passed as keyword arguments
to this method. Valid options include, but are not limited to:
- `allowDiskUse` (bool): Enables writing to temporary files. When set
to True, aggregation stages can write data to the _tmp subdirectory
of the --dbpath directory. The default is False.
- `maxTimeMS` (int): The maximum amount of time to allow the operation
to run in milliseconds.
- `batchSize` (int): The maximum number of documents to return per
batch. Ignored if the connected mongod or mongos does not support
returning aggregate results using a cursor, or `useCursor` is
``False``.
- `useCursor` (bool): Requests that the `server` provide results
using a cursor, if possible. Ignored if the connected mongod or
mongos does not support returning aggregate results using a cursor.
The default is ``True``. Set this to ``False`` when upgrading a 2.4
or older sharded cluster to 2.6 or newer (see the warning below).
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
The :meth:`aggregate` method obeys the :attr:`read_preference` of this
:class:`Collection`. Please note that using the ``$out`` pipeline stage
requires a read preference of
:attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` (the default).
The server will raise an error if the ``$out`` pipeline stage is used
with any other read preference.
.. warning:: When upgrading a 2.4 or older sharded cluster to 2.6 or
newer the `useCursor` option **must** be set to ``False``
until all shards have been upgraded to 2.6 or newer.
.. note:: This method does not support the 'explain' option. Please
use :meth:`~pymongo.database.Database.command` instead. An
example is included in the :ref:`aggregate-examples` documentation.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
:Parameters:
- `pipeline`: a list of aggregation pipeline stages
- `**kwargs` (optional): See list of options above.
:Returns:
A :class:`~pymongo.command_cursor.CommandCursor` over the result
set.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4. Support the `collation` option.
.. versionchanged:: 3.0
The :meth:`aggregate` method always returns a CommandCursor. The
pipeline argument must be a list.
.. versionchanged:: 2.7
When the cursor option is used, return
:class:`~pymongo.command_cursor.CommandCursor` instead of
:class:`~pymongo.cursor.Cursor`.
.. versionchanged:: 2.6
Added cursor support.
.. versionadded:: 2.3
.. seealso:: :doc:`/examples/aggregation`
.. _aggregate command:
http://docs.mongodb.org/manual/applications/aggregation
"""
if not isinstance(pipeline, list):
raise TypeError("pipeline must be a list")
if "explain" in kwargs:
raise ConfigurationError("The explain option is not supported. "
"Use Database.command instead.")
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd = SON([("aggregate", self.__name),
("pipeline", pipeline)])
# Remove things that are not command options.
batch_size = common.validate_positive_integer_or_none(
"batchSize", kwargs.pop("batchSize", None))
use_cursor = common.validate_boolean(
"useCursor", kwargs.pop("useCursor", True))
# If the server does not support the "cursor" option we
# ignore useCursor and batchSize.
with self._socket_for_reads() as (sock_info, slave_ok):
if sock_info.max_wire_version > 0:
if use_cursor:
if "cursor" not in kwargs:
kwargs["cursor"] = {}
if batch_size is not None:
kwargs["cursor"]["batchSize"] = batch_size
dollar_out = pipeline and '$out' in pipeline[-1]
if (sock_info.max_wire_version >= 5 and dollar_out and
self.write_concern):
cmd['writeConcern'] = self.write_concern.document
cmd.update(kwargs)
# Apply this Collection's read concern if $out is not in the
# pipeline.
if sock_info.max_wire_version >= 4 and 'readConcern' not in cmd:
if dollar_out:
result = self._command(sock_info, cmd, slave_ok,
parse_write_concern_error=True,
collation=collation)
else:
result = self._command(sock_info, cmd, slave_ok,
read_concern=self.read_concern,
collation=collation)
else:
result = self._command(sock_info, cmd, slave_ok,
parse_write_concern_error=dollar_out,
collation=collation)
if "cursor" in result:
cursor = result["cursor"]
else:
# Pre-MongoDB 2.6. Fake a cursor.
cursor = {
"id": 0,
"firstBatch": result["result"],
"ns": self.full_name,
}
return CommandCursor(
self, cursor, sock_info.address).batch_size(batch_size or 0)
# key and condition ought to be optional, but deprecation
# would be painful as argument order would have to change.
def group(self, key, condition, initial, reduce, finalize=None, **kwargs):
"""Perform a query similar to an SQL *group by* operation.
Returns an array of grouped items.
The `key` parameter can be:
- ``None`` to use the entire document as a key.
- A :class:`list` of keys (each a :class:`basestring`
(:class:`str` in python 3)) to group by.
- A :class:`basestring` (:class:`str` in python 3), or
:class:`~bson.code.Code` instance containing a JavaScript
function to be applied to each document, returning the key
to group by.
The :meth:`group` method obeys the :attr:`read_preference` of this
:class:`Collection`.
:Parameters:
- `key`: fields to group by (see above description)
- `condition`: specification of rows to be
considered (as a :meth:`find` query specification)
- `initial`: initial value of the aggregation counter object
- `reduce`: aggregation function as a JavaScript string
- `finalize`: function to be called on each object in output list.
- `**kwargs` (optional): additional arguments to the group command
may be passed as keyword arguments to this helper method
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 2.2
Removed deprecated argument: command
"""
group = {}
if isinstance(key, string_type):
group["$keyf"] = Code(key)
elif key is not None:
group = {"key": helpers._fields_list_to_dict(key, "key")}
group["ns"] = self.__name
group["$reduce"] = Code(reduce)
group["cond"] = condition
group["initial"] = initial
if finalize is not None:
group["finalize"] = Code(finalize)
cmd = SON([("group", group)])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
with self._socket_for_reads() as (sock_info, slave_ok):
return self._command(sock_info, cmd, slave_ok,
collation=collation)["retval"]
def rename(self, new_name, **kwargs):
"""Rename this collection.
If operating in auth mode, client must be authorized as an
admin to perform this operation. Raises :class:`TypeError` if
`new_name` is not an instance of :class:`basestring`
(:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidName`
if `new_name` is not a valid collection name.
:Parameters:
- `new_name`: new name for this collection
- `**kwargs` (optional): additional arguments to the rename command
may be passed as keyword arguments to this helper method
(i.e. ``dropTarget=True``)
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
if not isinstance(new_name, string_type):
raise TypeError("new_name must be an "
"instance of %s" % (string_type.__name__,))
if not new_name or ".." in new_name:
raise InvalidName("collection names cannot be empty")
if new_name[0] == "." or new_name[-1] == ".":
raise InvalidName("collecion names must not start or end with '.'")
if "$" in new_name and not new_name.startswith("oplog.$main"):
raise InvalidName("collection names must not contain '$'")
new_name = "%s.%s" % (self.__database.name, new_name)
cmd = SON([("renameCollection", self.__full_name), ("to", new_name)])
with self._socket_for_writes() as sock_info:
if sock_info.max_wire_version >= 5 and self.write_concern:
cmd['writeConcern'] = self.write_concern.document
cmd.update(kwargs)
sock_info.command('admin', cmd, parse_write_concern_error=True)
def distinct(self, key, filter=None, **kwargs):
"""Get a list of distinct values for `key` among all documents
in this collection.
Raises :class:`TypeError` if `key` is not an instance of
:class:`basestring` (:class:`str` in python 3).
All optional distinct parameters should be passed as keyword arguments
to this method. Valid options include:
- `maxTimeMS` (int): The maximum amount of time to allow the count
command to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
The :meth:`distinct` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `key`: name of the field for which we want to get the distinct
values
- `filter` (optional): A query document that specifies the documents
from which to retrieve the distinct values.
- `**kwargs` (optional): See list of options above.
.. versionchanged:: 3.4
Support the `collation` option.
"""
if not isinstance(key, string_type):
raise TypeError("key must be an "
"instance of %s" % (string_type.__name__,))
cmd = SON([("distinct", self.__name),
("key", key)])
if filter is not None:
if "query" in kwargs:
raise ConfigurationError("can't pass both filter and query")
kwargs["query"] = filter
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
with self._socket_for_reads() as (sock_info, slave_ok):
return self._command(sock_info, cmd, slave_ok,
read_concern=self.read_concern,
collation=collation)["values"]
def map_reduce(self, map, reduce, out, full_response=False, **kwargs):
"""Perform a map/reduce operation on this collection.
If `full_response` is ``False`` (default) returns a
:class:`~pymongo.collection.Collection` instance containing
the results of the operation. Otherwise, returns the full
response from the server to the `map reduce command`_.
:Parameters:
- `map`: map function (as a JavaScript string)
- `reduce`: reduce function (as a JavaScript string)
- `out`: output collection name or `out object` (dict). See
the `map reduce command`_ documentation for available options.
Note: `out` options are order sensitive. :class:`~bson.son.SON`
can be used to specify multiple options.
e.g. SON([('replace', <collection name>), ('db', <database name>)])
- `full_response` (optional): if ``True``, return full response to
this command - otherwise just return the result collection
- `**kwargs` (optional): additional arguments to the
`map reduce command`_ may be passed as keyword arguments to this
helper method, e.g.::
>>> db.test.map_reduce(map, reduce, "myresults", limit=2)
.. note:: The :meth:`map_reduce` method does **not** obey the
:attr:`read_preference` of this :class:`Collection`. To run
mapReduce on a secondary use the :meth:`inline_map_reduce` method
instead.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation (if the
output is not inline) when using MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. seealso:: :doc:`/examples/aggregation`
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 2.2
Removed deprecated arguments: merge_output and reduce_output
.. _map reduce command: http://docs.mongodb.org/manual/reference/command/mapReduce/
.. mongodoc:: mapreduce
"""
if not isinstance(out, (string_type, collections.Mapping)):
raise TypeError("'out' must be an instance of "
"%s or a mapping" % (string_type.__name__,))
cmd = SON([("mapreduce", self.__name),
("map", map),
("reduce", reduce),
("out", out)])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
inline = 'inline' in cmd['out']
with self._socket_for_primary_reads() as (sock_info, slave_ok):
if (sock_info.max_wire_version >= 5 and self.write_concern and
not inline):
cmd['writeConcern'] = self.write_concern.document
cmd.update(kwargs)
if (sock_info.max_wire_version >= 4 and 'readConcern' not in cmd and
inline):
# No need to parse 'writeConcernError' here, since the command
# is an inline map reduce.
response = self._command(
sock_info, cmd, slave_ok, ReadPreference.PRIMARY,
read_concern=self.read_concern,
collation=collation)
else:
response = self._command(
sock_info, cmd, slave_ok, ReadPreference.PRIMARY,
parse_write_concern_error=not inline,
collation=collation)
if full_response or not response.get('result'):
return response
elif isinstance(response['result'], dict):
dbase = response['result']['db']
coll = response['result']['collection']
return self.__database.client[dbase][coll]
else:
return self.__database[response["result"]]
def inline_map_reduce(self, map, reduce, full_response=False, **kwargs):
"""Perform an inline map/reduce operation on this collection.
Perform the map/reduce operation on the server in RAM. A result
collection is not created. The result set is returned as a list
of documents.
If `full_response` is ``False`` (default) returns the
result documents in a list. Otherwise, returns the full
response from the server to the `map reduce command`_.
The :meth:`inline_map_reduce` method obeys the :attr:`read_preference`
of this :class:`Collection`.
:Parameters:
- `map`: map function (as a JavaScript string)
- `reduce`: reduce function (as a JavaScript string)
- `full_response` (optional): if ``True``, return full response to
this command - otherwise just return the result collection
- `**kwargs` (optional): additional arguments to the
`map reduce command`_ may be passed as keyword arguments to this
helper method, e.g.::
>>> db.test.inline_map_reduce(map, reduce, limit=2)
.. versionchanged:: 3.4
Added the `collation` option.
"""
cmd = SON([("mapreduce", self.__name),
("map", map),
("reduce", reduce),
("out", {"inline": 1})])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
with self._socket_for_reads() as (sock_info, slave_ok):
if sock_info.max_wire_version >= 4 and 'readConcern' not in cmd:
res = self._command(sock_info, cmd, slave_ok,
read_concern=self.read_concern,
collation=collation)
else:
res = self._command(sock_info, cmd, slave_ok,
collation=collation)
if full_response:
return res
else:
return res.get("results")
def __find_and_modify(self, filter, projection, sort, upsert=None,
return_document=ReturnDocument.BEFORE, **kwargs):
"""Internal findAndModify helper."""
common.validate_is_mapping("filter", filter)
if not isinstance(return_document, bool):
raise ValueError("return_document must be "
"ReturnDocument.BEFORE or ReturnDocument.AFTER")
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd = SON([("findAndModify", self.__name),
("query", filter),
("new", return_document)])
cmd.update(kwargs)
if projection is not None:
cmd["fields"] = helpers._fields_list_to_dict(projection,
"projection")
if sort is not None:
cmd["sort"] = helpers._index_document(sort)
if upsert is not None:
common.validate_boolean("upsert", upsert)
cmd["upsert"] = upsert
with self._socket_for_writes() as sock_info:
if sock_info.max_wire_version >= 4 and 'writeConcern' not in cmd:
wc_doc = self.write_concern.document
if wc_doc:
cmd['writeConcern'] = wc_doc
out = self._command(sock_info, cmd,
read_preference=ReadPreference.PRIMARY,
allowable_errors=[_NO_OBJ_ERROR],
collation=collation)
_check_write_command_response([(0, out)])
return out.get("value")
def find_one_and_delete(self, filter,
projection=None, sort=None, **kwargs):
"""Finds a single document and deletes it, returning the document.
>>> db.test.count({'x': 1})
2
>>> db.test.find_one_and_delete({'x': 1})
{u'x': 1, u'_id': ObjectId('54f4e12bfba5220aa4d6dee8')}
>>> db.test.count({'x': 1})
1
If multiple documents match *filter*, a *sort* can be applied.
>>> for doc in db.test.find({'x': 1}):
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> db.test.find_one_and_delete(
... {'x': 1}, sort=[('_id', pymongo.DESCENDING)])
{u'x': 1, u'_id': 2}
The *projection* option can be used to limit the fields returned.
>>> db.test.find_one_and_delete({'x': 1}, projection={'_id': False})
{u'x': 1}
:Parameters:
- `filter`: A query that matches the document to delete.
- `projection` (optional): a list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is deleted.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
kwargs['remove'] = True
return self.__find_and_modify(filter, projection, sort, **kwargs)
def find_one_and_replace(self, filter, replacement,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE, **kwargs):
"""Finds a single document and replaces it, returning either the
original or the replaced document.
The :meth:`find_one_and_replace` method differs from
:meth:`find_one_and_update` by replacing the document matched by
*filter*, rather than modifying the existing document.
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> db.test.find_one_and_replace({'x': 1}, {'y': 1})
{u'x': 1, u'_id': 0}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'y': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The replacement document.
- `projection` (optional): A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is replaced.
- `upsert` (optional): When ``True``, inserts a new document if no
document matches the query. Defaults to ``False``.
- `return_document`: If
:attr:`ReturnDocument.BEFORE` (the default),
returns the original document before it was replaced, or ``None``
if no document matches. If
:attr:`ReturnDocument.AFTER`, returns the replaced
or inserted document.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionadded:: 3.0
"""
common.validate_ok_for_replace(replacement)
kwargs['update'] = replacement
return self.__find_and_modify(filter, projection,
sort, upsert, return_document, **kwargs)
def find_one_and_update(self, filter, update,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE, **kwargs):
"""Finds a single document and updates it, returning either the
original or the updated document.
>>> db.test.find_one_and_update(
... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}})
{u'_id': 665, u'done': False, u'count': 25}}
By default :meth:`find_one_and_update` returns the original version of
the document before the update was applied. To return the updated
version of the document instead, use the *return_document* option.
>>> from pymongo import ReturnDocument
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... return_document=ReturnDocument.AFTER)
{u'_id': u'userid', u'seq': 1}
You can limit the fields returned with the *projection* option.
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... projection={'seq': True, '_id': False},
... return_document=ReturnDocument.AFTER)
{u'seq': 2}
The *upsert* option can be used to create the document if it doesn't
already exist.
>>> db.example.delete_many({}).deleted_count
1
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... projection={'seq': True, '_id': False},
... upsert=True,
... return_document=ReturnDocument.AFTER)
{u'seq': 1}
If multiple documents match *filter*, a *sort* can be applied.
>>> for doc in db.test.find({'done': True}):
... print(doc)
...
{u'_id': 665, u'done': True, u'result': {u'count': 26}}
{u'_id': 701, u'done': True, u'result': {u'count': 17}}
>>> db.test.find_one_and_update(
... {'done': True},
... {'$set': {'final': True}},
... sort=[('_id', pymongo.DESCENDING)])
{u'_id': 701, u'done': True, u'result': {u'count': 17}}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The update operations to apply.
- `projection` (optional): A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a dict to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is updated.
- `upsert` (optional): When ``True``, inserts a new document if no
document matches the query. Defaults to ``False``.
- `return_document`: If
:attr:`ReturnDocument.BEFORE` (the default),
returns the original document before it was updated, or ``None``
if no document matches. If
:attr:`ReturnDocument.AFTER`, returns the updated
or inserted document.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionadded:: 3.0
"""
common.validate_ok_for_update(update)
kwargs['update'] = update
return self.__find_and_modify(filter, projection,
sort, upsert, return_document, **kwargs)
def save(self, to_save, manipulate=True, check_keys=True, **kwargs):
"""Save a document in this collection.
**DEPRECATED** - Use :meth:`insert_one` or :meth:`replace_one` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("save is deprecated. Use insert_one or replace_one "
"instead", DeprecationWarning, stacklevel=2)
common.validate_is_document_type("to_save", to_save)
write_concern = None
collation = validate_collation_or_none(kwargs.pop('collation', None))
if kwargs:
write_concern = WriteConcern(**kwargs)
with self._socket_for_writes() as sock_info:
if not (isinstance(to_save, RawBSONDocument) or "_id" in to_save):
return self._insert(sock_info, to_save, True,
check_keys, manipulate, write_concern)
else:
self._update(sock_info, {"_id": to_save["_id"]}, to_save, True,
check_keys, False, manipulate, write_concern,
collation=collation)
return to_save.get("_id")
def insert(self, doc_or_docs, manipulate=True,
check_keys=True, continue_on_error=False, **kwargs):
"""Insert a document(s) into this collection.
**DEPRECATED** - Use :meth:`insert_one` or :meth:`insert_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("insert is deprecated. Use insert_one or insert_many "
"instead.", DeprecationWarning, stacklevel=2)
write_concern = None
if kwargs:
write_concern = WriteConcern(**kwargs)
with self._socket_for_writes() as sock_info:
return self._insert(sock_info, doc_or_docs, not continue_on_error,
check_keys, manipulate, write_concern)
def update(self, spec, document, upsert=False, manipulate=False,
multi=False, check_keys=True, **kwargs):
"""Update a document(s) in this collection.
**DEPRECATED** - Use :meth:`replace_one`, :meth:`update_one`, or
:meth:`update_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("update is deprecated. Use replace_one, update_one or "
"update_many instead.", DeprecationWarning, stacklevel=2)
common.validate_is_mapping("spec", spec)
common.validate_is_mapping("document", document)
if document:
# If a top level key begins with '$' this is a modify operation
# and we should skip key validation. It doesn't matter which key
# we check here. Passing a document with a mix of top level keys
# starting with and without a '$' is invalid and the server will
# raise an appropriate exception.
first = next(iter(document))
if first.startswith('$'):
check_keys = False
write_concern = None
collation = validate_collation_or_none(kwargs.pop('collation', None))
if kwargs:
write_concern = WriteConcern(**kwargs)
with self._socket_for_writes() as sock_info:
return self._update(sock_info, spec, document, upsert,
check_keys, multi, manipulate, write_concern,
collation=collation)
def remove(self, spec_or_id=None, multi=True, **kwargs):
"""Remove a document(s) from this collection.
**DEPRECATED** - Use :meth:`delete_one` or :meth:`delete_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("remove is deprecated. Use delete_one or delete_many "
"instead.", DeprecationWarning, stacklevel=2)
if spec_or_id is None:
spec_or_id = {}
if not isinstance(spec_or_id, collections.Mapping):
spec_or_id = {"_id": spec_or_id}
write_concern = None
collation = validate_collation_or_none(kwargs.pop('collation', None))
if kwargs:
write_concern = WriteConcern(**kwargs)
with self._socket_for_writes() as sock_info:
return self._delete(sock_info, spec_or_id, multi, write_concern,
collation=collation)
def find_and_modify(self, query={}, update=None,
upsert=False, sort=None, full_response=False,
manipulate=False, **kwargs):
"""Update and return an object.
**DEPRECATED** - Use :meth:`find_one_and_delete`,
:meth:`find_one_and_replace`, or :meth:`find_one_and_update` instead.
"""
warnings.warn("find_and_modify is deprecated, use find_one_and_delete"
", find_one_and_replace, or find_one_and_update instead",
DeprecationWarning, stacklevel=2)
if not update and not kwargs.get('remove', None):
raise ValueError("Must either update or remove")
if update and kwargs.get('remove', None):
raise ValueError("Can't do both update and remove")
# No need to include empty args
if query:
kwargs['query'] = query
if update:
kwargs['update'] = update
if upsert:
kwargs['upsert'] = upsert
if sort:
# Accept a list of tuples to match Cursor's sort parameter.
if isinstance(sort, list):
kwargs['sort'] = helpers._index_document(sort)
# Accept OrderedDict, SON, and dict with len == 1 so we
# don't break existing code already using find_and_modify.
elif (isinstance(sort, _ORDERED_TYPES) or
isinstance(sort, dict) and len(sort) == 1):
warnings.warn("Passing mapping types for `sort` is deprecated,"
" use a list of (key, direction) pairs instead",
DeprecationWarning, stacklevel=2)
kwargs['sort'] = sort
else:
raise TypeError("sort must be a list of (key, direction) "
"pairs, a dict of len 1, or an instance of "
"SON or OrderedDict")
fields = kwargs.pop("fields", None)
if fields is not None:
kwargs["fields"] = helpers._fields_list_to_dict(fields, "fields")
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd = SON([("findAndModify", self.__name)])
cmd.update(kwargs)
with self._socket_for_writes() as sock_info:
if sock_info.max_wire_version >= 4 and 'writeConcern' not in cmd:
wc_doc = self.write_concern.document
if wc_doc:
cmd['writeConcern'] = wc_doc
out = self._command(sock_info, cmd,
read_preference=ReadPreference.PRIMARY,
allowable_errors=[_NO_OBJ_ERROR],
collation=collation)
_check_write_command_response([(0, out)])
if not out['ok']:
if out["errmsg"] == _NO_OBJ_ERROR:
return None
else:
# Should never get here b/c of allowable_errors
raise ValueError("Unexpected Error: %s" % (out,))
if full_response:
return out
else:
document = out.get('value')
if manipulate:
document = self.__database._fix_outgoing(document, self)
return document
def __iter__(self):
return self
def __next__(self):
raise TypeError("'Collection' object is not iterable")
next = __next__
def __call__(self, *args, **kwargs):
"""This is only here so that some API misusages are easier to debug.
"""
if "." not in self.__name:
raise TypeError("'Collection' object is not callable. If you "
"meant to call the '%s' method on a 'Database' "
"object it is failing because no such method "
"exists." %
self.__name)
raise TypeError("'Collection' object is not callable. If you meant to "
"call the '%s' method on a 'Collection' object it is "
"failing because no such method exists." %
self.__name.split(".")[-1])
| gpl-3.0 |
kvar/ansible | lib/ansible/modules/cloud/vmware/_vmware_host_vmhba_facts.py | 21 | 8352 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Christian Kotte <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_vmhba_facts
deprecated:
removed_in: '2.13'
why: Deprecated in favour of C(_info) module.
alternative: Use M(vmware_host_vmhba_info) instead.
short_description: Gathers facts about vmhbas available on the given ESXi host
description:
- This module can be used to gather facts about vmhbas available on the given ESXi host.
- If C(cluster_name) is provided, then vmhba facts about all hosts from given cluster will be returned.
- If C(esxi_hostname) is provided, then vmhba facts about given host system will be returned.
version_added: '2.8'
author:
- Christian Kotte (@ckotte)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
esxi_hostname:
description:
- Name of the host system to work with.
- Vmhba facts about this ESXi server will be returned.
- This parameter is required if C(cluster_name) is not specified.
type: str
cluster_name:
description:
- Name of the cluster from which all host systems will be used.
- Vmhba facts about each ESXi server will be returned for the given cluster.
- This parameter is required if C(esxi_hostname) is not specified.
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather facts about vmhbas of all ESXi Host in the given Cluster
vmware_host_vmhba_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
delegate_to: localhost
register: cluster_host_vmhbas
- name: Gather facts about vmhbas of an ESXi Host
vmware_host_vmhba_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
delegate_to: localhost
register: host_vmhbas
'''
RETURN = r'''
hosts_vmhbas_facts:
description:
- dict with hostname as key and dict with vmhbas facts as value.
returned: hosts_vmhbas_facts
type: dict
sample:
{
"10.76.33.204": {
"vmhba_details": [
{
"adapter": "HPE Smart Array P440ar",
"bus": 3,
"device": "vmhba0",
"driver": "nhpsa",
"location": "0000:03:00.0",
"model": "Smart Array P440ar",
"node_wwn": "50:01:43:80:37:18:9e:a0",
"status": "unknown",
"type": "SAS"
},
{
"adapter": "QLogic Corp ISP2532-based 8Gb Fibre Channel to PCI Express HBA",
"bus": 5,
"device": "vmhba1",
"driver": "qlnativefc",
"location": "0000:05:00.0",
"model": "ISP2532-based 8Gb Fibre Channel to PCI Express HBA",
"node_wwn": "57:64:96:32:15:90:23:95:82",
"port_type": "unknown",
"port_wwn": "57:64:96:32:15:90:23:95:82",
"speed": 8,
"status": "online",
"type": "Fibre Channel"
},
{
"adapter": "QLogic Corp ISP2532-based 8Gb Fibre Channel to PCI Express HBA",
"bus": 8,
"device": "vmhba2",
"driver": "qlnativefc",
"location": "0000:08:00.0",
"model": "ISP2532-based 8Gb Fibre Channel to PCI Express HBA",
"node_wwn": "57:64:96:32:15:90:23:95:21",
"port_type": "unknown",
"port_wwn": "57:64:96:32:15:90:23:95:21",
"speed": 8,
"status": "online",
"type": "Fibre Channel"
}
],
}
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
class HostVmhbaMgr(PyVmomi):
"""Class to manage vmhba facts"""
def __init__(self, module):
super(HostVmhbaMgr, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
if not self.hosts:
self.module.fail_json(msg="Failed to find host system.")
def gather_host_vmhba_facts(self):
"""Gather vmhba facts"""
hosts_vmhba_facts = {}
for host in self.hosts:
host_vmhba_facts = dict()
host_st_system = host.configManager.storageSystem
if host_st_system:
device_info = host_st_system.storageDeviceInfo
host_vmhba_facts['vmhba_details'] = []
for hba in device_info.hostBusAdapter:
hba_facts = dict()
if hba.pci:
hba_facts['location'] = hba.pci
for pci_device in host.hardware.pciDevice:
if pci_device.id == hba.pci:
hba_facts['adapter'] = pci_device.vendorName + ' ' + pci_device.deviceName
break
else:
hba_facts['location'] = 'PCI'
hba_facts['device'] = hba.device
# contains type as string in format of 'key-vim.host.FibreChannelHba-vmhba1'
hba_type = hba.key.split(".")[-1].split("-")[0]
if hba_type == 'SerialAttachedHba':
hba_facts['type'] = 'SAS'
elif hba_type == 'FibreChannelHba':
hba_facts['type'] = 'Fibre Channel'
else:
hba_facts['type'] = hba_type
hba_facts['bus'] = hba.bus
hba_facts['status'] = hba.status
hba_facts['model'] = hba.model
hba_facts['driver'] = hba.driver
try:
hba_facts['node_wwn'] = self.format_number(hba.nodeWorldWideName)
except AttributeError:
pass
try:
hba_facts['port_wwn'] = self.format_number(hba.portWorldWideName)
except AttributeError:
pass
try:
hba_facts['port_type'] = hba.portType
except AttributeError:
pass
try:
hba_facts['speed'] = hba.speed
except AttributeError:
pass
host_vmhba_facts['vmhba_details'].append(hba_facts)
hosts_vmhba_facts[host.name] = host_vmhba_facts
return hosts_vmhba_facts
@staticmethod
def format_number(number):
"""Format number"""
string = str(number)
return ':'.join(a + b for a, b in zip(string[::2], string[1::2]))
def main():
"""Main"""
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True,
)
host_vmhba_mgr = HostVmhbaMgr(module)
module.exit_json(changed=False, hosts_vmhbas_facts=host_vmhba_mgr.gather_host_vmhba_facts())
if __name__ == "__main__":
main()
| gpl-3.0 |
ansible/ansible | test/units/config/test_data.py | 113 | 1266 | # Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from ansible.config.data import ConfigData
from ansible.config.manager import Setting
mykey = Setting('mykey', 'myvalue', 'test', 'string')
mykey2 = Setting('mykey2', 'myvalue2', ['test', 'test2'], 'list')
mykey3 = Setting('mykey3', 'myvalue3', 11111111111, 'integer')
class TestConfigData(unittest.TestCase):
def setUp(self):
self.cdata = ConfigData()
def tearDown(self):
self.cdata = None
def test_update_setting(self):
for setting in [mykey, mykey2, mykey3]:
self.cdata.update_setting(setting)
self.assertEqual(setting, self.cdata._global_settings.get(setting.name))
def test_update_setting_with_plugin(self):
pass
def test_get_setting(self):
self.cdata._global_settings = {'mykey': mykey}
self.assertEqual(mykey, self.cdata.get_setting('mykey'))
def test_get_settings(self):
all_settings = {'mykey': mykey, 'mykey2': mykey2}
self.cdata._global_settings = all_settings
for setting in self.cdata.get_settings():
self.assertEqual(all_settings[setting.name], setting)
| gpl-3.0 |
legacysurvey/rapala | ninetyprime/linearitycheck.py | 2 | 17953 | #!/usr/bin/env python
import os
import glob
import numpy as np
import fitsio
import matplotlib.pyplot as plt
from matplotlib import ticker
from matplotlib.backends.backend_pdf import PdfPages
from astropy.table import Table
from bokpipe import *
from bokpipe.bokoscan import _convertfitsreg
def init_data_map(datadir,outdir,expTimes=None,files=None):
dataMap = {}
if not os.path.exists(outdir):
os.mkdir(outdir)
dataMap['outdir'] = outdir
if files is None:
dataMap['files'] = sorted(glob.glob(datadir+'*.fits') +
glob.glob(datadir+'*.fits.gz') +
glob.glob(datadir+'*.fits.fz'))
else:
dataMap['files'] = files
dataMap['rawFiles'] = dataMap['files']
dataMap['oscan'] = bokio.FileNameMap(outdir)
dataMap['proc'] = bokio.FileNameMap(outdir,'_p')
dataMap['files'] = [ dataMap['oscan'](f) for f in dataMap['files'] ]
if expTimes is None:
dataMap['expTime'] = np.array([fitsio.read_header(f)['EXPTIME']
for f in dataMap['files']])
else:
dataMap['expTime'] = expTimes
try:
# assume they are all the same
dataMap['dataSec'] = \
_convertfitsreg(fitsio.read_header(
dataMap['files'][0],'IM4')['DATASEC'])
except IOError:
pass
return dataMap
def process_data(dataMap,redo=True,withvar=True,oscanims=False,bias2d=False):
oscanSubtract = BokOverscanSubtract(output_map=dataMap['oscan'],
overwrite=redo,
write_overscan_image=oscanims,
oscan_cols_file=dataMap['outdir']+'oscan_cols',
oscan_rows_file=dataMap['outdir']+'oscan_rows',
verbose=10)#method='median_value')
oscanSubtract.process_files(dataMap['rawFiles'])
if bias2d:
biasname = 'bias'
biasStack = bokproc.BokBiasStack(#reject=None,
overwrite=redo,
with_variance=withvar)
bias2dFile = os.path.join(dataMap['outdir'],biasname+'.fits')
biasStack.stack(dataMap['biasFiles'],bias2dFile)
#imProcess = bokproc.BokCCDProcess(bias2dFile,
# output_map=dataMap['proc'])
#imProcess.process_files(flatFrames)
def imstat(dataMap,outfn='stats'):
from astropy.stats import sigma_clip
from scipy.stats import mode,scoreatpercentile
array_stats = bokutil.array_stats
fnlen = len(os.path.basename(dataMap['files'][0]))
st = np.zeros(len(dataMap['flatSequence']),
dtype=[('file','S%d'%fnlen),
('expTime','f4'),
('median','16f4'),
('mean','16f4'),
('mode','16f4'),
('iqr25','16f4'),
('iqr75','16f4'),
('iqr10','16f4'),
('iqr90','16f4')])
for _i,i in enumerate(dataMap['flatSequence']):
expTime = dataMap['expTime'][i]
fn = os.path.basename(dataMap['files'][i])
fits = fitsio.FITS(dataMap['files'][i])
print '%s %4.1f ' % (fn,expTime),
st['file'][_i] = fn
st['expTime'][_i] = expTime
for j,extn in enumerate(['IM%d' % n for n in range(1,17)]):
modeVal,pix = array_stats(fits[extn].read()[dataMap['statsPix']],
method='mode',retArray=True)
st['mode'][_i,j] = modeVal
st['mean'][_i,j] = pix.mean()
st['median'][_i,j] = np.ma.median(pix)
st['iqr25'][_i,j] = scoreatpercentile(pix,25)
st['iqr75'][_i,j] = scoreatpercentile(pix,75)
st['iqr10'][_i,j] = scoreatpercentile(pix,10)
st['iqr90'][_i,j] = scoreatpercentile(pix,90)
print '%5d ' % (modeVal),
print
fitsio.write(outfn+'.fits',st,clobber=True)
def scaled_histograms(dataMap,nims=None,outfn='pixhist'):
pdf = PdfPages(outfn+'.pdf')
for _i,i in enumerate(dataMap['flatSequence']):
if nims is not None and _i==nims:
break
expTime = dataMap['expTime'][i]
expScale = dataMap['refExpTime'] / expTime
print dataMap['files'][i]
fn = os.path.basename(dataMap['files'][i])
fits = fitsio.FITS(dataMap['files'][i])
fig = plt.figure(figsize=(8.0,10))
plt.subplots_adjust(0.08,0.08,0.92,0.92,0.3,0.35)
for j,extn in enumerate(['IM%d' % n for n in range(1,17)]):
ax = plt.subplot(8,2,j+1)
pix = fits[extn].read()[dataMap['statsPix']]
ax.hist(expScale*pix.flatten(),100,(0,40000),edgecolor='none')
ax.text(0.05,0.9,extn,va='top',size=9,transform=ax.transAxes)
ax.set_xlim(0,40000)
ax.xaxis.set_major_locator(ticker.MultipleLocator(10000))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(2000))
ax.yaxis.set_major_locator(ticker.MultipleLocator(50000))
plt.figtext(0.5,0.99,fn+' exp=%.1f' % expTime,ha='center',va='top')
pdf.savefig(fig)
plt.close(fig)
pdf.close()
def plot_sequence(dataMap,st,imNum,which='median'):
expScale = dataMap['refExpTime']/st['expTime']
seqno = 1 + np.arange(len(st))
ref = np.isclose(expScale,1.0)
j = imNum - 1
plt.figure(figsize=(8,6))
plt.subplots_adjust(0.11,0.08,0.96,0.95)
plt.errorbar(seqno[ref],expScale[ref]*st[which][ref,j],
[expScale[ref]*(st[which]-st['iqr10'])[ref,j],
expScale[ref]*(st['iqr90']-st[which])[ref,j]],
fmt='bs-')
plt.errorbar(seqno[~ref],expScale[~ref]*st[which][~ref,j],
[expScale[~ref]*(st[which]-st['iqr10'])[~ref,j],
expScale[~ref]*(st['iqr90']-st[which])[~ref,j]],
fmt='cs-')
#plt.scatter(seqno,expScale*st['mode'][:,j],marker='+',c='r')
#plt.scatter(seqno,expScale*st['mean'][:,j],marker='x',c='g')
plt.xlabel('sequence number')
plt.ylabel('counts scaled by exp time')
plt.title('IM%d'%imNum)
plt.xlim(0.5,len(st)+0.5)
def fit_ref_exposures(dataMap,st,imNum,
which='median',method='spline',doplot=False):
from scipy.interpolate import UnivariateSpline
seqno = 1 + np.arange(len(st))
t = st['expTime']
ref = np.isclose(t,dataMap['refExpTime'])
j = imNum - 1
refCounts = st[which][ref,j][0]
if method=='linear':
_fit = np.polyfit(seqno[ref],refCounts/st[which][ref,j],1)
fit = lambda x: np.polyval(_fit,x)
elif method=='spline':
fit = UnivariateSpline(seqno[ref],refCounts/st[which][ref,j],
s=1e-5,k=3)
else:
raise ValueError
if doplot:
plt.figure()
plt.subplot(211)
plt.plot(seqno[ref],st[which][ref,j],'bs-')
plt.plot(seqno,refCounts/fit(seqno),c='r')
plt.subplot(212)
plt.plot(seqno[ref],(st[which][ref,j]-refCounts/fit(seqno[ref]))
/st[which][ref,j],'bs-')
plt.axhline(0,c='r')
return fit
def plot_linearity_curves(dataMap,st,which='median',correct=True,isPTC=False,
refCor=None,fitmethod='spline',outfn='linearity',
onlyim=None):
seqno = 1 + np.arange(len(st))
t = st['expTime']
print seqno,t
refExpTime = dataMap['refExpTime']
ref = np.isclose(t,refExpTime)
refCorFit = None
ii = np.arange(len(st))
# only use the increasing sequence, not the reference exposures
ii = ii[~ref]
if isPTC:
# for PTCs skip every other image since they are done in pairs
ii = ii[::2]
# only fit to unsaturated frames
try:
firstsat = np.where(np.any(st[which][ii,:] > 55000,axis=1))[0][0]
except IndexError:
firstsat = -1
if onlyim is None:
pdf = PdfPages(outfn+'.pdf')
for imNum in range(1,17):
if onlyim is not None and imNum != onlyim:
continue
j = imNum - 1
# correct lamp variation
if correct:
if refCor is None:
fscl_fit = fit_ref_exposures(dataMap,st,imNum,which,
method=fitmethod)
else:
if refCorFit is None:
refCorFit = fit_ref_exposures(dataMap,st,imNum,which)
fscl_fit = refCorFit
fscl = fscl_fit(seqno)
else:
fscl = np.ones_like(seqno)
fit = np.polyfit(t[ii[:firstsat]],
fscl[ii[:firstsat]]*st[which][ii[:firstsat],j],1)
fitv = np.polyval(fit,t)
slope = fit[0] / (st[which][ref,j][0]/refExpTime)
#
pltindex = imNum % 4
if onlyim is None:
if pltindex == 1:
fig = plt.figure(figsize=(8,10))
plt.subplots_adjust(0.11,0.08,0.96,0.95,0.25,0.2)
ax = plt.subplot(4,2,2*(j%4)+1)
else:
fig = plt.figure(figsize=(6,2.5))
plt.subplots_adjust(0.11,0.23,0.99,0.98,0.35,0.2)
ax = plt.subplot(1,2,1)
plt.plot(t[ii],fscl[ii]*st[which][ii,j],'bs-')
plt.xlim(0.9*t.min(),t.max()+0.5)
plt.xscale('log')
plt.ylim(1e2,9e4)
plt.yscale('log')
plt.ylabel('counts [%s]' % which)
tt = np.logspace(-1,np.log10(1.3*t.max()),100)
plt.plot(tt,np.polyval(fit,tt),c='r')
plt.text(0.05,0.9,'IM%d'%imNum,va='top',transform=ax.transAxes)
plt.text(0.95,0.18,r'y = %.1f $\times$ t + %.1f' % tuple(fit),
ha='right',va='top',size=9,transform=ax.transAxes)
plt.text(0.95,0.10,r'y = %.3f $\times$ counts + %.1f' % (slope,fit[1]),
ha='right',va='top',size=9,transform=ax.transAxes)
if pltindex==0 or onlyim is not None:
plt.xlabel('exptime (s)')
#
if onlyim is None:
ax = plt.subplot(4,2,2*(j%4)+2)
else:
ax = plt.subplot(1,2,2)
plt.plot(t[ii],100*(fscl[ii]*st[which][ii,j]-fitv[ii])/fitv[ii],'bs-')
plt.axhline(0,c='r')
#ax.xaxis.set_major_locator(ticker.MultipleLocator(10))
#ax.xaxis.set_minor_locator(ticker.MultipleLocator(2))
ax.yaxis.set_major_locator(ticker.MultipleLocator(2))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(0.5))
plt.ylim(-5,5)
plt.xlim(0.9*t.min(),t.max()+0.5)
plt.xscale('log')
if pltindex==0 or onlyim is not None:
plt.xlabel('exptime (s)')
plt.ylabel('residual \%')
if onlyim is None:
if pltindex == 0:
pdf.savefig(fig)
plt.close(fig)
if onlyim is None:
pdf.close()
def rel_gain(dataMap,st,which='median',correct=True,fitmethod='spline',
nskip=0):
seqno = 1 + np.arange(len(st))
t = st['expTime']
refExpTime = dataMap['refExpTime']
ref = np.isclose(t,refExpTime)
refCorFit = None
ii = np.arange(len(st))
ii = ii[~ref]
ii = ii[nskip:]
sky4 = st[which][ii,3]
fit_ii = ii[np.where((sky4>5000)&(sky4<25000))[0]]
plt.figure()
for imNum in range(1,17):
j = imNum - 1
# correct lamp variation
if correct:
if True: #refCor is None:
fscl_fit = fit_ref_exposures(dataMap,st,imNum,which,
method=fitmethod)
else:
if refCorFit is None:
refCorFit = fit_ref_exposures(dataMap,st,imNum,which)
fscl_fit = refCorFit
fscl = fscl_fit(seqno)
else:
fscl = np.ones_like(seqno)
fit = np.polyfit(t[fit_ii],fscl[fit_ii]*st[which][fit_ii,j],1)
fitv = np.polyval(fit,t)
# slope = fit[0] / (st[which][ref,j][0]/refExpTime)
xx = np.array(0,1.1*t.max())
plt.subplot(4,4,imNum)
if False:
plt.scatter(t[ii],fscl[ii]*st[which][ii,j])
plt.plot(xx,np.polyval(fit,xx),c='r')
else:
plt.scatter(t[ii],fscl[ii]*st[which][ii,j]/fitv[ii])
plt.axhline(1,c='r')
plt.ylim(0.7,1.3)
if True:
plt.xscale('log')
plt.xlim(0.9*t.min(),1.1*t.max())
def get_first_saturated_frame(seq):
try:
firstsat = np.where(seq > 55000)[0][0]
except IndexError:
firstsat = -1
return firstsat
def compare_oscan_levels(dataMap,st):
files = [ dataMap['files'][i] for i in dataMap['flatSequence'] ]
oscans = np.zeros((len(files),16))
for j in range(16):
oscans[:,j] = [ fitsio.read_header(f,'IM%d'%(j+1))['OSCANMED']
for f in files ]
seqno = 1 + np.arange(len(st))
plt.figure()
for j in range(8,16):
ax = plt.subplot(8,2,2*(j%8)+1)
i1 = get_first_saturated_frame(st['median'][:,j])
plt.scatter(st['median'][:i1,j],oscans[:i1,j],c='b')
plt.ylabel('IM%d'%(j+1))
ax = plt.subplot(8,2,2*(j%8)+2)
plt.scatter(seqno[:i1],oscans[:i1,j],c='b')
def init_sep09bss_data_map():
datadir = os.environ.get('BASSDATA')+'/20150909/bss/20150908/'
exptimes = np.loadtxt(datadir+'../bss.20150909.log',usecols=(3,))
exptimes = exptimes[50:]
print exptimes
rdxdir = os.environ.get('GSCRATCH','tmp_sep')+'/bss_sep09/'
if not os.path.exists(rdxdir):
os.makedirs(rdxdir)
dataMap = init_data_map(datadir,rdxdir,
expTimes=exptimes,files=None)
dataMap['rawFiles'] = dataMap['rawFiles'][50:]
dataMap['files'] = dataMap['files'][50:]
dataMap['biasFiles'] = dataMap['files'][-5:]
#dataMap['flatSequence'] = range(50,68)
dataMap['flatSequence'] = range(18)
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = 40.0
return dataMap
def init_sep29ptc_data_map():
dataMap = init_data_map(
"/home/ian/dev/rapala/bokpipe/scratch/sep29ptcs/ptc/",'sep29ptcs/')
dataMap['biasFiles'] = [dataMap['files'][0],]
dataMap['flatSequence'] = range(1,len(dataMap['files']))
dataMap['statsPix'] = np.s_[20:-20,100:-100]
dataMap['refExpTime'] = 10.0
return dataMap
def init_oct02ptc_data_map():
dataMap = init_data_map(os.environ.get('GSCRATCH')+'/02oct15/ptc/',
os.environ.get('GSCRATCH')+'/02oct15/ptc_proc/')
dataMap['biasFiles'] = [dataMap['files'][0],]
dataMap['flatSequence'] = range(1,len(dataMap['files']))
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = 10.0
return dataMap
def init_oct20_data_map():
datadir = os.environ.get('BASSDATA')+'/20151020/'
exptimes = np.loadtxt(datadir+'images.log',usecols=(6,))
nuse = 53
exptimes = exptimes[:nuse]
print exptimes
dataMap = init_data_map(datadir,'tmp_oct20',expTimes=exptimes)
dataMap['rawFiles'] = dataMap['rawFiles'][:nuse]
dataMap['files'] = [ dataMap['oscan'](f)
for f in dataMap['files'][:nuse] ]
dataMap['biasFiles'] = dataMap['files'][:20]
dataMap['flatSequence'] = range(20,nuse)
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = 3.0
return dataMap
def init_nov11g_data_map():
datadir = os.environ.get('BASSDATA')+'/Nov2015/'
log = Table.read(datadir+'bassLog_Nov2015.fits')
exptimes = log['expTime'][111:150]
files = [ datadir+f['utDir']+'/'+f['fileName']+'.fits'
for f in log[111:150] ]
dataMap = init_data_map(datadir,'tmp_nov11g',
expTimes=exptimes,files=files)
dataMap['biasFiles'] = dataMap['files'][-10:]
dataMap['flatSequence'] = np.arange(len(dataMap['files'])-10)
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = 3.0
return dataMap
def init_nov14_data_map(filt):
datadir = os.environ.get('BASSDATA')+'/Nov2015/'
log = Table.read(datadir+'bassLog_Nov2015.fits')
if filt=='g':
frames = np.r_[np.s_[297:345],np.s_[247:257]]
else:
frames = np.r_[np.s_[345:393],np.s_[247:257]]
exptimes = log['expTime'][frames]
files = [ datadir+f['utDir']+'/'+f['fileName']+'.fits'
for f in log[frames] ]
dataMap = init_data_map(datadir,'tmp_nov14'+filt,
expTimes=exptimes,files=files)
dataMap['biasFiles'] = dataMap['files'][-10:]
dataMap['flatSequence'] = np.arange(len(dataMap['files'])-10)
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = {'Ha':10.0,'g':3.0}[filt]
return dataMap
def init_jan3_data_map(filt):
datadir = os.environ.get('BASSDATA')
log = Table.read('basslogs/log_ut20160103.fits')
if filt=='g':
frames = np.r_[np.s_[57:105],np.s_[160:170]]
else:
frames = np.r_[np.s_[105:160],np.s_[160:170]]
exptimes = log['expTime'][frames]
files = [ datadir+'/'+f['utDir'].strip()+'/'+f['fileName'].strip()+'.fits'
for f in log[frames] ]
dataMap = init_data_map(datadir,'tmp_jan3'+filt,
expTimes=exptimes,files=files)
dataMap['biasFiles'] = dataMap['files'][-10:]
dataMap['flatSequence'] = np.arange(len(dataMap['files'])-10)
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = {'Ha':10.0,'g':3.0}[filt]
return dataMap
def init_data_map_fromfile(filename,outdir='tmp',nersc=True):
datadir = os.environ.get('BASSDATA')
if nersc:
datadir = os.path.join(datadir,'BOK_Raw')
log = np.loadtxt(filename,dtype=[('frameNum','i4'),('utDir','S8'),
('fileName','S35'),
('imType','S10'),('filter','S8'),
('expTime','f4')],skiprows=1)
exptimes = log['expTime']
files = [ datadir+'/'+f['utDir'].strip()+'/'+f['fileName'].strip()+'.fits'
for f in log ]
if nersc:
files = [ f+'.fz' for f in files ]
dataMap = init_data_map(datadir,outdir,
expTimes=exptimes,files=files)
dataMap['biasFiles'] = np.array(dataMap['files'])[log['imType']=='zero']
dataMap['flatSequence'] = np.where(log['imType']=='flat')[0]
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
# assume it starts with reference
dataMap['refExpTime'] = exptimes[dataMap['flatSequence'][0]]
return dataMap
if __name__=='__main__':
import sys
dataset = sys.argv[1]
if dataset == 'sep09bss':
dataMap = init_sep09bss_data_map()
elif dataset == 'oct02':
dataMap = init_oct02ptc_data_map()
elif dataset == 'oct20':
dataMap = init_oct20_data_map()
elif dataset == 'nov11g':
dataMap = init_nov11g_data_map()
elif dataset == 'nov14g':
dataMap = init_nov14_data_map('g')
elif dataset == 'nov14Ha':
dataMap = init_nov14_data_map('Ha')
elif dataset == 'jan3g':
dataMap = init_jan3_data_map('g')
elif dataset == 'jan3Ha':
dataMap = init_jan3_data_map('Ha')
else:
dataMap = init_data_map_fromfile(sys.argv[2],dataset)
print 'processing ',dataset
if not os.path.exists('stats_'+dataset+'.fits'):
process_data(dataMap,bias2d=True)
imstat(dataMap,outfn='stats_'+dataset)
st = fitsio.read('stats_'+dataset+'.fits')
plot_linearity_curves(dataMap,st,outfn='linearity_'+dataset)
if True:
plot_linearity_curves(dataMap,st,outfn='linearity_'+dataset,
onlyim=4)
plt.savefig('linearity_IM4_%s.png'%dataset)
plot_sequence(dataMap,st,4)
plt.savefig('linsequence_IM4_%s.png'%dataset)
| bsd-3-clause |
Jailander/COSMOS | kriging_exploration/scripts/explorator.py | 1 | 34183 | #!/usr/bin/env python
import cv2
import sys
import yaml
import signal
import numpy as np
#import utm
import matplotlib as mpl
import matplotlib.cm as cm
import rospy
import argparse
import actionlib
from cosmos_msgs.msg import KrigInfo
from cosmos_msgs.srv import CompareModels
import kriging_exploration.map_coords
import std_msgs.msg
import open_nav.msg
from kriging_exploration.data_grid import DataGrid
from kriging_exploration.map_coords import MapCoords
from kriging_exploration.visualiser import KrigingVisualiser
from kriging_exploration.canvas import ViewerCanvas
from kriging_exploration.topological_map import TopoMap
from kriging_exploration.exploration import ExplorationPlan
from sensor_msgs.msg import NavSatFix
def overlay_image_alpha(img, img_overlay):
"""Overlay img_overlay on top of img at the position specified by
pos and blend using alpha_mask.
"""
show_image = img.copy()
alpha = img_overlay[:, :, 3] / 255.0 # Alpha mask must contain values
# within the range [0, 1]
# and be the same size as img_overlay.
# Image ranges
y1, y2 = 0, img.shape[0]
x1, x2 = 0, img.shape[1]
channels = img.shape[2]
alpha_inv = 1.0 - alpha
for c in range(channels):
show_image[y1:y2, x1:x2, c] = (alpha * img_overlay[y1:y2, x1:x2, c] + alpha_inv * img[y1:y2, x1:x2, c])
return show_image
class Explorator(KrigingVisualiser):
#_w_shape=[(0, 16), (1, 17), (3, 17), (5, 16), (8, 15), (10, 15), (12, 14), (14, 13), (12, 12), (10, 11), (8, 11), (5, 10), (8, 9), (10, 9), (12, 8), (14, 7), (12, 6), (10, 5), (8, 5), (6, 4), (4, 3), (3, 2), (4, 1), (5, 0), (7, 0)]
#_w_shape=[(17, 0), (17, 1), (17, 3), (16, 5), (15, 8), (15, 10), (14, 12), (13, 14), (12, 12), (11, 10), (11, 8), (10, 5), (9, 8), (9, 10), (8, 12), (7, 14), (6, 12), (5, 10), (5, 8), (4, 6), (3, 4), (2, 3), (1, 4), (0, 5), (0, 7)]
#_w_shape=[(17, 0), (17,1), (17, 2), (17, 4), (16, 4), (16, 6), (16, 8), (15, 8), (15, 10), (14, 10), (14, 12), (13, 12), (13, 14), (12, 14), (12, 12), (11, 12), (11, 10), (10, 10), (10, 8), (10, 6), (10, 4), (9, 4), (9, 6), (9, 8), (9, 10), (8, 10), (8, 12), (7, 12), (7, 14), (6, 14), (6, 12), (5, 12), (5, 10), (4, 10), (4, 8), (4, 6), (4, 4), (3, 4), (3, 3), (2, 3), (2, 4), (1,4), (1, 6), (0,6), (1, 8), (0,8), (1, 10), (0, 10), (0, 12), (0, 14)]
_w_shape=[(17, 0), (16, 1), (14, 6), (12, 11), (10, 14), (8, 9), (5, 14), (3, 11), (2, 6), (0, 3)]
def __init__(self, lat_deg, lon_deg, zoom, size, args):
self.targets = []
self.results =[]
self.result_counter=0
self.explodist=0
self.running = True
self.last_coord=None
signal.signal(signal.SIGINT, self.signal_handler)
self.expid=args.experiment_name
print "Creating visualiser object"
super(Explorator, self).__init__(lat_deg, lon_deg, zoom, size)
cv2.namedWindow('explorator')
cv2.setMouseCallback('explorator', self.click_callback)
self.current_model=-1
self.draw_mode = 'none'
self.grid = DataGrid(args.limits_file, args.cell_size)
self.topo_map= TopoMap(self.grid)
self.visited_wp=[]
explo_type = args.area_coverage_type
self.define_exploration_type(explo_type)
self.navigating = False
self.pause_exp = False
self.exploring = 0
self.n_inputs = 0
print "NUMBER OF TARGETS:"
print len(self.explo_plan.targets)
self.limits_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.grid_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.exploration_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.gps_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.limits_canvas.draw_polygon(self.grid.limits, (0,0,255,128), thickness=1)
self.grid_canvas.draw_grid(self.grid.cells, args.cell_size, (128,128,128,2), thickness=1)
self.redraw()
self.redraw_kriged=True
self.redraw_var=True
self.redraw_devi=True
self.model_canvas=[]
self.model_legend=[]
self.kriging_canvas=[]
self.klegend_canvas=[]
self.klegend2_canvas=[]
self.klegend3_canvas=[]
self.sigma_canvas=[]
self.sigma2_canvas=[]
self.model_canvas_names=[]
self.mean_out_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.mean_out_legend_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.mean_var_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.mean_var_legend_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.mean_dev_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.mean_dev_legend_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
rospy.loginfo("Subscribing to Krig Info")
rospy.Subscriber("/kriging_data", KrigInfo, self.data_callback)
rospy.Subscriber("/fix", NavSatFix, self.gps_callback)
rospy.Subscriber('/penetrometer_scan', std_msgs.msg.String, self.scan_callback)
self.req_data_pub = rospy.Publisher('/request_scan', std_msgs.msg.String, latch=False, queue_size=1)
rospy.loginfo(" ... Connecting to Open_nav")
self.open_nav_client = actionlib.SimpleActionClient('/open_nav', open_nav.msg.OpenNavAction)
self.open_nav_client.wait_for_server()
rospy.loginfo(" ... done")
tim1 = rospy.Timer(rospy.Duration(0.2), self.drawing_timer_callback)
tim2 = rospy.Timer(rospy.Duration(0.1), self.control_timer_callback)
self.refresh()
while(self.running):
cv2.imshow('explorator', self.show_image)
k = cv2.waitKey(20) & 0xFF
self._change_mode(k)
tim1.shutdown()
tim2.shutdown()
cv2.destroyAllWindows()
sys.exit(0)
# EXPLORATION PARAMS HERE!!!!
def define_exploration_type(self, explo_type):
self.exploration_strategy=explo_type
self.n_goals=10
if explo_type=='area_split':
self.grid._split_area(3,3)
sb=[]
for i in self.grid.area_splits_coords:
(y, x) = self.grid.get_cell_inds_from_coords(i)
sb.append((x,y))
self.explo_plan = ExplorationPlan(self.topo_map, args.initial_waypoint, args.initial_percent, ac_model=explo_type, ac_coords=sb)
elif explo_type=='random':
self.explo_plan = ExplorationPlan(self.topo_map, args.initial_waypoint, args.initial_percent)
elif explo_type=='w_shape':
self.explo_plan = ExplorationPlan(self.topo_map, args.initial_waypoint, args.initial_percent, ac_model=explo_type, ac_coords=self._w_shape)
else: #greedy
self.explo_plan = ExplorationPlan(self.topo_map, args.initial_waypoint, args.initial_percent, exploration_type='greedy', ac_model=explo_type)
def drawing_timer_callback(self, event):
self.refresh()
def control_timer_callback(self, event):
if self.navigating:
if self.open_nav_client.simple_state ==2:
print "DONE NAVIGATING"
self.navigating = False
if self.exploring==1:
self.exploring=2
elif self.exploring==2:
if not self.pause_exp:
self.explo_plan.explored_wp.append(self.explo_plan.route.pop(0))
info_str='Do_reading'
self.req_data_pub.publish(info_str)
self.exploring=3
elif self.exploring==4:
if not self.pause_exp:
if len(self.explo_plan.route) >0:
gg=self.explo_plan.route[0]
self.open_nav_client.cancel_goal()
targ = open_nav.msg.OpenNavActionGoal()
targ.goal.coords.header.stamp=rospy.Time.now()
targ.goal.coords.latitude=gg.coord.lat
targ.goal.coords.longitude=gg.coord.lon
print "Going TO: ", gg
self.exploring=1
self.navigating=True
self.open_nav_client.send_goal(targ.goal)
else:
print "Done Exploring"
self.exploring = 0
# else:
# if self.exploring:
# print "waiting for new goal"
def gps_callback(self, data):
if not np.isnan(data.latitude):
self.gps_canvas.clear_image()
gps_coord = MapCoords(data.latitude,data.longitude)
self.gps_canvas.draw_coordinate(gps_coord,'black',size=2, thickness=2, alpha=255)
if self.last_coord:
dist = gps_coord - self.last_coord
self.explodist+= dist[0]
self.last_coord=gps_coord
def data_callback(self, msg):
point_coord = kriging_exploration.map_coords.coord_from_satnav_fix(msg.coordinates)
for i in msg.data:
self.grid.add_data_point(i.model_name, point_coord, i.measurement)
self.vmin, self.vmax = self.grid.get_max_min_vals()
self.n_models=len(self.grid.models)
for i in self.grid.models:
if i.name not in self.model_canvas_names:
print i.name
self.model_canvas_names.append(i.name)
self.model_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.model_legend.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.kriging_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.klegend_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.klegend2_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.klegend3_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.sigma_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.sigma2_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.draw_inputs(self.model_canvas_names.index(i.name))
self.n_inputs+=1
if self.exploring==3:
if self.n_inputs>3:
self.krieg_all_mmodels()
rospy.sleep(0.1)
self.grid.calculate_mean_grid()
rospy.sleep(0.1)
self.draw_means()
self.draw_mode="means"
resp = self.get_errors()
self.result_counter+=1
d={}
d['step']=self.result_counter
d['id']=self.expid
d['ns']=len(self.explo_plan.targets)
d['coord']={}
d['coord']['lat']=self.last_coord.lat
d['coord']['lon']=self.last_coord.lon
d['dist']=float(self.explodist)
d['results']={}
d['results']['groundtruth']=resp
d['results']['var']={}
d['results']['var']['mean']={}
d['results']['var']['mean']['mean']= float(np.mean(self.grid.mean_variance))
d['results']['var']['mean']['max']= float(np.max(self.grid.mean_variance))
d['results']['var']['mean']['min']= float(np.min(self.grid.mean_variance))
# d['results']['var']['std']['mean']= np.mean(self.grid.mean_deviation)
# d['results']['var']['std']['max']= np.max(self.grid.mean_deviation)
# d['results']['var']['std']['min']= np.min(self.grid.mean_deviation)
means=[]
maxs=[]
mins=[]
for i in range(self.n_models):
means.append(float(np.mean(self.grid.models[i].variance)))
maxs.append(float(np.max(self.grid.models[i].variance)))
mins.append(float(np.min(self.grid.models[i].variance)))
d['results']['models']={}
d['results']['models']['means']=means
d['results']['models']['maxs']=maxs
d['results']['models']['mins']=mins
rospy.sleep(0.1)
self.results.append(d)
if self.exploration_strategy == 'greedy':
nwp = len(self.explo_plan.route) + len(self.explo_plan.explored_wp)
print nwp, " nodes in plan"
if nwp <= self.n_goals:
#THIS IS the ONE
#self.explo_plan.add_limited_greedy_goal(self.grid.mean_variance, self.last_coord)
self.explo_plan.add_greedy_goal(self.grid.mean_variance)
#self.explo_plan.add_montecarlo_goal(self.grid.mean_variance, self.last_coord)
#self.draw_mode="deviation"
# self.current_model=0
# if self.redraw_devi:
# self.draw_all_devs()
self.redraw()
rospy.sleep(0.1)
self.exploring=4
def scan_callback(self, msg):
if msg.data == 'Reading':
print "GOT READING!!!"
cx, cy = self.grid.get_cell_inds_from_coords(self.last_coord)
if cx <0 or cy<0:
print "Reading outside the grid"
else:
print 'Reading at: ', cx, cy
for i in self.topo_map.waypoints:
if (cy,cx) == i.ind:
print 'Setting: ', i.name, i.coord, "as Visited"
i.visited= True
self.visited_wp.append(i)
self.grid_canvas.draw_waypoints(self.topo_map.waypoints, (0,255,0,2), thickness=1)
self.grid_canvas.draw_waypoints(self.visited_wp, (0,0,255,2), thickness=1)
self.redraw()
def refresh(self):
#self.show_image = self.image.copy()
#self.show_image = cv2.addWeighted(self.gps_canvas.image, 0.7, self.image, 1.0, 0)
#self.show_image = transparentOverlay(self.image, self.gps_canvas.image)
self.show_image = overlay_image_alpha(self.image,self.gps_canvas.image)
def redraw(self):
self.image = cv2.addWeighted(self.grid_canvas.image, 0.5, self.base_image, 1.0, 0)
self.image = cv2.addWeighted(self.limits_canvas.image, 0.75, self.image, 1.0, 0)
self.image = cv2.addWeighted(self.exploration_canvas.image, 0.75, self.image, 1.0, 0)
if self.draw_mode == "inputs" and self.current_model>=0 :
self.image = cv2.addWeighted(self.model_canvas[self.current_model].image, 0.75, self.image, 1.0, 0)
self.image = overlay_image_alpha(self.image, self.model_legend[self.current_model].image)
if self.draw_mode == "kriging":# and self.current_model>=0 :
self.image = cv2.addWeighted(self.kriging_canvas[self.current_model].image, 0.75, self.image, 1.0, 0)
#self.image = cv2.addWeighted(self.klegend_canvas[self.current_model].image, 1.0, self.image, 1.0, 0)
self.image = overlay_image_alpha(self.image, self.klegend_canvas[self.current_model].image)
if self.draw_mode == "deviation":# and self.current_model>=0 :
self.image = cv2.addWeighted(self.sigma_canvas[self.current_model].image, 0.75, self.image, 1.0, 0)
#self.image = cv2.addWeighted(self.klegend3_canvas[self.current_model].image, 1.0, self.image, 1.0, 0)
self.image = overlay_image_alpha(self.image, self.klegend3_canvas[self.current_model].image)
if self.draw_mode == "variance":# and self.current_model>=0 :
self.image = cv2.addWeighted(self.sigma2_canvas[self.current_model].image, 0.75, self.image, 1.0, 0)
#self.image = cv2.addWeighted(self.klegend2_canvas[self.current_model].image, 1.0, self.image, 1.0, 0)
self.image = overlay_image_alpha(self.image, self.klegend2_canvas[self.current_model].image)
if self.draw_mode == "means":
self.image = cv2.addWeighted(self.mean_dev_canvas.image, 0.75, self.image, 1.0, 0)
#self.image = cv2.addWeighted(self.klegend2_canvas[self.current_model].image, 1.0, self.image, 1.0, 0)
self.image = overlay_image_alpha(self.image, self.mean_dev_legend_canvas.image)
self.show_image = self.image.copy()
def click_callback(self, event, x, y, flags, param):
if event == cv2.EVENT_RBUTTONDOWN:
click_coord = self.satellite._pix2coord(x,y)
cx, cy = self.grid.get_cell_inds_from_coords(click_coord)
if cx <0 or cy<0:
print "click outside the grid"
else:
print cx, cy
for i in self.topo_map.waypoints:
if (cy,cx) == i.ind:
print i.name, i.coord.easting, i.coord.northing
i.visited= True
self.visited_wp.append(i)
self.grid_canvas.draw_waypoints(self.topo_map.waypoints, (0,255,0,2), thickness=1)
self.grid_canvas.draw_waypoints(self.visited_wp, (0,0,255,2), thickness=1)
self.redraw()
if event == cv2.EVENT_LBUTTONDOWN:
click_coord = self.satellite._pix2coord(x,y)
cx, cy = self.grid.get_cell_inds_from_coords(click_coord)
if cx <0 or cy<0:
print "click outside the grid"
else:
print cx, cy
for i in self.topo_map.waypoints:
if (cy,cx) == i.ind:
self.open_nav_client.cancel_goal()
targ = open_nav.msg.OpenNavActionGoal()
#goal.goal.goal.header.
targ.goal.coords.header.stamp=rospy.Time.now()
targ.goal.coords.latitude=i.coord.lat
targ.goal.coords.longitude=i.coord.lon
print targ
self.navigating=True
self.open_nav_client.send_goal(targ.goal)
#self.client.wait_for_result()
# Prints out the result of executing the action
#ps = self.client.get_result()
#print ps
def draw_inputs(self, nm):
minv = self.grid.models[nm].lims[0]
maxv = self.grid.models[nm].lims[1]
if (maxv-minv) <=1:
maxv = maxv + 50
minv = minv - 50
norm = mpl.colors.Normalize(vmin=minv, vmax=maxv)
cmap = cm.jet
colmap = cm.ScalarMappable(norm=norm, cmap=cmap)
self.model_canvas[nm].clear_image()
self.model_legend[nm].clear_image()
for i in self.grid.models[nm].orig_data:
cell = self.grid.cells[i.y][i.x]
a= colmap.to_rgba(int(i.value))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))
self.model_canvas[nm].draw_cell(cell, self.grid.cell_size, b, thickness=-1)
self.model_canvas[nm].put_text(self.grid.models[nm].name)
self.model_legend[nm].put_text(self.grid.models[nm].name)
self.model_legend[nm].draw_legend(minv, maxv, colmap, title="Kriging")
def draw_krigged(self, nm):
print "drawing kriging" + str(nm)
minv = self.grid.models[nm].min_val
maxv = self.grid.models[nm].max_val
if (maxv-minv) <=1:
maxv = maxv + 50
minv = minv - 50
norm = mpl.colors.Normalize(vmin=minv, vmax=maxv)
cmap = cm.jet
colmap = cm.ScalarMappable(norm=norm, cmap=cmap)
self.kriging_canvas[nm].clear_image()
self.klegend_canvas[nm].clear_image()
for i in range(self.grid.models[nm].shape[0]):
for j in range(self.grid.models[nm].shape[1]):
cell = self.grid.cells[i][j]
a= colmap.to_rgba(int(self.grid.models[nm].output[i][j]))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))
self.kriging_canvas[nm].draw_cell(cell, self.grid.cell_size, b, thickness=-1)
self.klegend_canvas[nm].put_text(self.grid.models[nm].name)
self.klegend_canvas[nm].draw_legend(minv, maxv, colmap, title="Kriging")
self.redraw()
def draw_variance(self, nm):
print "drawing variance" + str(nm)
minv = self.grid.models[nm].min_var
maxv = self.grid.models[nm].max_var
if (maxv-minv) <=1:
maxv = maxv + 50
minv = minv - 50
norm = mpl.colors.Normalize(vmin=minv, vmax= maxv)
cmap = cm.jet
colmap = cm.ScalarMappable(norm=norm, cmap=cmap)
self.sigma_canvas[nm].clear_image()
self.klegend2_canvas[nm].clear_image()
for i in range(self.grid.models[nm].shape[0]):
for j in range(self.grid.models[nm].shape[1]):
cell = self.grid.cells[i][j]
a= colmap.to_rgba(int(self.grid.models[nm].variance[i][j]))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))
self.sigma2_canvas[nm].draw_cell(cell, self.grid.cell_size, b, thickness=-1)
self.klegend2_canvas[nm].put_text(self.grid.models[nm].name)
self.klegend2_canvas[nm].draw_legend(minv, maxv, colmap, title="Variance")
self.redraw()
def draw_means(self):
print "drawing mean deviation ..."
minv = self.grid.min_mean_deviation
maxv = self.grid.max_mean_deviation
if (maxv-minv) <=1:
maxv = maxv + 50
minv = minv - 50
norm = mpl.colors.Normalize(vmin=minv, vmax=maxv)
cmap = cm.jet
colmap = cm.ScalarMappable(norm=norm, cmap=cmap)
self.mean_dev_canvas.clear_image()
self.mean_dev_legend_canvas.clear_image()
for i in range(self.grid.shape[0]):
for j in range(self.grid.shape[1]):
cell = self.grid.cells[i][j]
a= colmap.to_rgba(int(self.grid.mean_deviation[i][j]))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))
self.mean_dev_canvas.draw_cell(cell, self.grid.cell_size, b, thickness=-1)
#self.mean_dev_legend_canvas.put_text(self.grid.models[nm].name)
self.mean_dev_legend_canvas.draw_legend(minv, maxv, colmap, title="Mean Deviation")
#self.draw_mode="means"
self.redraw()
def draw_deviation(self, nm):
print "drawing deviation" + str(nm)
minv = self.grid.models[nm].min_dev
maxv = self.grid.models[nm].max_dev
if (maxv-minv) <=1:
maxv = maxv + 50
minv = minv - 50
norm = mpl.colors.Normalize(vmin=minv, vmax=maxv)
cmap = cm.jet
colmap = cm.ScalarMappable(norm=norm, cmap=cmap)
self.sigma_canvas[nm].clear_image()
self.klegend3_canvas[nm].clear_image()
for i in range(self.grid.models[nm].shape[0]):
for j in range(self.grid.models[nm].shape[1]):
cell = self.grid.cells[i][j]
a= colmap.to_rgba(int(self.grid.models[nm].deviation[i][j]))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))
self.sigma_canvas[nm].draw_cell(cell, self.grid.cell_size, b, thickness=-1)
self.klegend3_canvas[nm].put_text(self.grid.models[nm].name)
self.klegend3_canvas[nm].draw_legend(minv, maxv, colmap, title="Deviation")
self.redraw()
def krieg_all_mmodels(self):
for i in self.grid.models:
i.do_krigging()
self.redraw_kriged=True
self.redraw_var=True
self.redraw_devi=True
def draw_all_outputs(self):
for i in self.grid.models:
self.draw_krigged(self.model_canvas_names.index(i.name))
self.redraw_kriged=False
def draw_all_vars(self):
for i in self.grid.models:
self.draw_variance(self.model_canvas_names.index(i.name))
self.redraw_var=False
def draw_all_devs(self):
for i in self.grid.models:
self.draw_deviation(self.model_canvas_names.index(i.name))
self.redraw_devi=False
def _change_mode(self, k):
if k == 27:
self.running = False
elif k == ord('q'):
self.running = False
elif k == ord('n'):
print len(self.grid.models)
elif k == ord('i'):
if self.n_models > 0:
self.draw_mode="inputs"
self.current_model=0
self.redraw()
elif k == ord('d'):
if self.n_models > 0:
self.draw_mode="deviation"
self.current_model=0
if self.redraw_devi:
self.draw_all_devs()
self.redraw()
elif k == ord('v'):
if self.n_models > 0:
self.draw_mode="variance"
self.current_model=0
if self.redraw_var:
self.draw_all_vars()
self.redraw()
elif k == ord('t'):
self.krieg_all_mmodels()
self.grid.calculate_mean_grid()
if self.n_models > 0:
self.draw_all_outputs()
self.draw_mode="kriging"
self.current_model=0
self.redraw()
elif k == ord('k'):
if self.n_models > 0:
self.draw_mode="kriging"
self.current_model=0
if self.redraw_kriged:
self.draw_all_outputs()
self.redraw()
elif k == ord('>'):
self.current_model+=1
if self.current_model >= self.n_models:
self.current_model=0
self.redraw()
elif k == ord('<'):
self.current_model-=1
if self.current_model < 0:
self.current_model=self.n_models-1
self.redraw()
elif k == ord('w'):
self.grid_canvas.draw_waypoints(self.topo_map.waypoints, (0,255,0,2), thickness=1)
self.grid_canvas.draw_waypoints(self.visited_wp, (0,0,255,2), thickness=1)
self.redraw()
elif k == ord('e'):
self.exploration_canvas.draw_waypoints(self.explo_plan.targets, (255,200,128,255), thickness=3)
self.exploration_canvas.draw_plan(self.explo_plan.route, 'cyan', thickness=1)
self.redraw()
#xnames = [x.name for x in self.explo_plan.route]
#print xnames
elif k == ord('g'):
if len(self.explo_plan.route) >0:
gg=self.explo_plan.route[0]
self.open_nav_client.cancel_goal()
targ = open_nav.msg.OpenNavActionGoal()
targ.goal.coords.header.stamp=rospy.Time.now()
targ.goal.coords.latitude=gg.coord.lat
targ.goal.coords.longitude=gg.coord.lon
print "Going TO: ", gg
self.exploring=1
self.navigating=True
self.open_nav_client.send_goal(targ.goal)
self.result_counter=0
self.explodist=0
else:
print "Done Exploring"
self.exploring = 0
elif k == ord('y'):
vwp = []
for i in self.visited_wp:
vwp.append(i.name)
yml = yaml.safe_dump(vwp, default_flow_style=False)
fh = open("visited.yaml", "w")
s_output = str(yml)
fh.write(s_output)
fh.close
elif k == ord('l'):
print "loading visited"
with open("visited.yaml", 'r') as f:
visited = yaml.load(f)
for i in visited:
for l in self.topo_map.waypoints:
if i == l.name:
self.visited_wp.append(l)
break
elif k == ord('a'):
self.grid.calculate_mean_grid()
self.draw_means()
self.draw_mode="means"
elif k == ord('p'):
self.pause_exp= not self.pause_exp
elif k == ord('c'):
print self.grid.limits
print "Area: ", self.grid.calculate_area(self.grid.limits)
print "Area of Area: ", self.grid.area.area_size
colours=['magenta','cyan', 'grey','white','red','yellow','green','blue']
nc=0
for j in self.grid.area_splits:
print j.area_size
#self.limits_canvas.draw_coordinate(j.centre, 'crimson', size=3, thickness=2)
for i in j.limit_lines:
#self.limits_canvas.draw_line(i, colours[nc], thickness=1)
self.limits_canvas.draw_line(i, 'white', thickness=1)
if nc < len(colours)-1:
nc+=1
else:
nc=0
self.redraw()
elif k== ord('r'):
#diff = (self.grid.models[1].output - self.grid.models[0].output)
#print np.mean(diff), np.std(diff), diff.dtype
print self.get_errors()
elif k== ord('o'):
print self.results
outfile = self.expid + '.yaml'
#print self.data_out
yml = yaml.safe_dump(self.results, default_flow_style=False)
fh = open(outfile, "w")
s_output = str(yml)
#print s_output
fh.write(s_output)
fh.close
def get_errors(self):
error_chain=[]
shapeo = self.grid.models[0].output.shape
#print vals
print "Waiting for Service"
rospy.wait_for_service('/compare_model')
compare_serv = rospy.ServiceProxy('/compare_model', CompareModels)
for i in range(self.n_models):
try:
d={}
print "going for it ", i
vals = np.reshape(self.grid.models[i].output, -1)
resp1 = compare_serv('kriging', i, shapeo[0], shapeo[1], vals.tolist())
d['name']= self.grid.models[i].name
d['type']= 'kriging'
d['errors']={}
d['errors']['error']=resp1.error
d['errors']['mse']=resp1.mse
d['errors']['std']=resp1.std
d['errors']['var']=resp1.var
#print resp1
error_chain.append(d)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
try:
d={}
print "Mean "
vals = np.reshape(self.grid.mean_output, -1)
resp1 = compare_serv('mean', 0, shapeo[0], shapeo[1], vals.tolist())
#print self.grid.mean_output
d['name']= 'mean'
d['type']= 'mean'
d['errors']={}
d['errors']['error']=resp1.error
d['errors']['mse']=resp1.mse
d['errors']['std']=resp1.std
d['errors']['var']=resp1.var
#print resp1
error_chain.append(d)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
return error_chain
def signal_handler(self, signal, frame):
self.running = False
print('You pressed Ctrl+C!')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--cell_size", type=int, default=10,
help="cell size in meters")
parser.add_argument("--initial_percent", type=float, default=0.05,
help="Percentage of cells to be explored on the initial plan")
parser.add_argument("--limits_file", type=str, default='limits.coords',
help="Percentage of cells to be explored on the initial plan")
parser.add_argument("--initial_waypoint", type=str, default='WayPoint498',
help="Percentage of cells to be explored on the initial plan")
parser.add_argument("--area_coverage_type", type=str, default='area_split',
help="Type of area coverage, random or area_split")
parser.add_argument("--experiment_name", type=str, default='exp1',
help="Experiment ID")
args = parser.parse_args()
rospy.init_node('kriging_exploration')
#Explorator(53.261685, -0.527158, 16, 640, args.cell_size)
#Explorator(53.267213, -0.533420, 17, 640, args) #Football Field
Explorator(53.261576, -0.526648, 17, 640, args) #Half cosmos field
#Explorator(53.261685, -0.525158, 17, 640, args) #COSMOS Field
| mit |
ntt-sic/cinder | cinder/api/contrib/hosts.py | 3 | 10182 | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hosts admin extension."""
from oslo.config import cfg
import webob.exc
from xml.parsers import expat
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import db
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import utils
from cinder.volume import api as volume_api
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('volume', 'hosts')
class HostIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hosts')
elem = xmlutil.SubTemplateElement(root, 'host', selector='hosts')
elem.set('service-status')
elem.set('service')
elem.set('zone')
elem.set('service-state')
elem.set('host_name')
elem.set('last-update')
return xmlutil.MasterTemplate(root, 1)
class HostUpdateTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
root.set('host')
root.set('status')
return xmlutil.MasterTemplate(root, 1)
class HostActionTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
root.set('host')
return xmlutil.MasterTemplate(root, 1)
class HostShowTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
elem = xmlutil.make_flat_dict('resource', selector='host',
subselector='resource')
root.append(elem)
return xmlutil.MasterTemplate(root, 1)
class HostDeserializer(wsgi.XMLDeserializer):
def default(self, string):
try:
node = utils.safe_minidom_parse_string(string)
except expat.ExpatError:
msg = _("cannot understand XML")
raise exception.MalformedRequestBody(reason=msg)
updates = {}
for child in node.childNodes[0].childNodes:
updates[child.tagName] = self.extract_text(child)
return dict(body=updates)
def _list_hosts(req, service=None):
"""Returns a summary list of hosts."""
curr_time = timeutils.utcnow()
context = req.environ['cinder.context']
services = db.service_get_all(context, False)
zone = ''
if 'zone' in req.GET:
zone = req.GET['zone']
if zone:
services = [s for s in services if s['availability_zone'] == zone]
hosts = []
for host in services:
delta = curr_time - (host['updated_at'] or host['created_at'])
alive = abs(utils.total_seconds(delta)) <= CONF.service_down_time
status = (alive and "available") or "unavailable"
active = 'enabled'
if host['disabled']:
active = 'disabled'
LOG.debug('status, active and update: %s, %s, %s' %
(status, active, host['updated_at']))
hosts.append({'host_name': host['host'],
'service': host['topic'],
'zone': host['availability_zone'],
'service-status': status,
'service-state': active,
'last-update': host['updated_at']})
if service:
hosts = [host for host in hosts
if host["service"] == service]
return hosts
def check_host(fn):
"""Makes sure that the host exists."""
def wrapped(self, req, id, service=None, *args, **kwargs):
listed_hosts = _list_hosts(req, service)
hosts = [h["host_name"] for h in listed_hosts]
if id in hosts:
return fn(self, req, id, *args, **kwargs)
else:
message = _("Host '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=message)
return wrapped
class HostController(object):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
self.api = volume_api.HostAPI()
super(HostController, self).__init__()
@wsgi.serializers(xml=HostIndexTemplate)
def index(self, req):
authorize(req.environ['cinder.context'])
return {'hosts': _list_hosts(req)}
@wsgi.serializers(xml=HostUpdateTemplate)
@wsgi.deserializers(xml=HostDeserializer)
@check_host
def update(self, req, id, body):
authorize(req.environ['cinder.context'])
update_values = {}
for raw_key, raw_val in body.iteritems():
key = raw_key.lower().strip()
val = raw_val.lower().strip()
if key == "status":
if val in ("enable", "disable"):
update_values['status'] = val.startswith("enable")
else:
explanation = _("Invalid status: '%s'") % raw_val
raise webob.exc.HTTPBadRequest(explanation=explanation)
else:
explanation = _("Invalid update setting: '%s'") % raw_key
raise webob.exc.HTTPBadRequest(explanation=explanation)
update_setters = {'status': self._set_enabled_status}
result = {}
for key, value in update_values.iteritems():
result.update(update_setters[key](req, id, value))
return result
def _set_enabled_status(self, req, host, enabled):
"""Sets the specified host's ability to accept new volumes."""
context = req.environ['cinder.context']
state = "enabled" if enabled else "disabled"
LOG.audit(_("Setting host %(host)s to %(state)s."),
{'host': host, 'state': state})
result = self.api.set_host_enabled(context,
host=host,
enabled=enabled)
if result not in ("enabled", "disabled"):
# An error message was returned
raise webob.exc.HTTPBadRequest(explanation=result)
return {"host": host, "status": result}
@wsgi.serializers(xml=HostShowTemplate)
def show(self, req, id):
"""Shows the volume usage info given by hosts.
:param context: security context
:param host: hostname
:returns: expected to use HostShowTemplate.
ex.::
{'host': {'resource':D},..}
D: {'host': 'hostname','project': 'admin',
'volume_count': 1, 'total_volume_gb': 2048}
"""
host = id
context = req.environ['cinder.context']
if not context.is_admin:
msg = _("Describe-resource is admin only functionality")
raise webob.exc.HTTPForbidden(explanation=msg)
try:
host_ref = db.service_get_by_host_and_topic(context,
host,
CONF.volume_topic)
except exception.ServiceNotFound:
raise webob.exc.HTTPNotFound(explanation=_("Host not found"))
# Getting total available/used resource
# TODO(jdg): Add summary info for Snapshots
volume_refs = db.volume_get_all_by_host(context, host_ref['host'])
(count, sum) = db.volume_data_get_for_host(context,
host_ref['host'])
snap_count_total = 0
snap_sum_total = 0
resources = [{'resource': {'host': host, 'project': '(total)',
'volume_count': str(count),
'total_volume_gb': str(sum),
'snapshot_count': str(snap_count_total),
'total_snapshot_gb': str(snap_sum_total)}}]
project_ids = [v['project_id'] for v in volume_refs]
project_ids = list(set(project_ids))
for project_id in project_ids:
(count, sum) = db.volume_data_get_for_project(context, project_id)
(snap_count, snap_sum) = db.snapshot_data_get_for_project(
context,
project_id)
resources.append(
{'resource':
{'host': host,
'project': project_id,
'volume_count': str(count),
'total_volume_gb': str(sum),
'snapshot_count': str(snap_count),
'total_snapshot_gb': str(snap_sum)}})
snap_count_total += int(snap_count)
snap_sum_total += int(snap_sum)
resources[0]['resource']['snapshot_count'] = str(snap_count_total)
resources[0]['resource']['total_snapshot_gb'] = str(snap_sum_total)
return {"host": resources}
class Hosts(extensions.ExtensionDescriptor):
"""Admin-only host administration"""
name = "Hosts"
alias = "os-hosts"
namespace = "http://docs.openstack.org/volume/ext/hosts/api/v1.1"
updated = "2011-06-29T00:00:00+00:00"
def get_resources(self):
resources = [extensions.ResourceExtension('os-hosts',
HostController(),
collection_actions={
'update': 'PUT'},
member_actions={
'startup': 'GET',
'shutdown': 'GET',
'reboot': 'GET'})]
return resources
| apache-2.0 |
azumimuo/family-xbmc-addon | script.module.youtube.dl/lib/youtube_dl/extractor/vyborymos.py | 73 | 2031 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
class VyboryMosIE(InfoExtractor):
_VALID_URL = r'https?://vybory\.mos\.ru/(?:#precinct/|account/channels\?.*?\bstation_id=)(?P<id>\d+)'
_TESTS = [{
'url': 'http://vybory.mos.ru/#precinct/13636',
'info_dict': {
'id': '13636',
'ext': 'mp4',
'title': 're:^Участковая избирательная комиссия №2231 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'Россия, Москва, улица Введенского, 32А',
'is_live': True,
},
'params': {
'skip_download': True,
}
}, {
'url': 'http://vybory.mos.ru/account/channels?station_id=13636',
'only_matching': True,
}]
def _real_extract(self, url):
station_id = self._match_id(url)
channels = self._download_json(
'http://vybory.mos.ru/account/channels?station_id=%s' % station_id,
station_id, 'Downloading channels JSON')
formats = []
for cam_num, (sid, hosts, name, _) in enumerate(channels, 1):
for num, host in enumerate(hosts, 1):
formats.append({
'url': 'http://%s/master.m3u8?sid=%s' % (host, sid),
'ext': 'mp4',
'format_id': 'camera%d-host%d' % (cam_num, num),
'format_note': '%s, %s' % (name, host),
})
info = self._download_json(
'http://vybory.mos.ru/json/voting_stations/%s/%s.json'
% (compat_str(station_id)[:3], station_id),
station_id, 'Downloading station JSON', fatal=False)
return {
'id': station_id,
'title': self._live_title(info['name'] if info else station_id),
'description': info.get('address'),
'is_live': True,
'formats': formats,
}
| gpl-2.0 |
mekanix/geonode | geonode/people/tests.py | 19 | 2719 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.core import mail
from django.contrib.sites.models import Site
class PeopleTest(TestCase):
fixtures = ('people_data.json', 'bobby.json')
def test_forgot_username(self):
url = reverse('forgot_username')
# page renders
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
# and responds for a bad email
response = self.client.post(url, data={
'email': '[email protected]'
})
# self.assertContains(response, "No user could be found with that email address.")
admin = get_user_model().objects.get(username='bobby')
response = self.client.post(url, data={
'email': admin.email
})
# and sends a mail for a good one
self.assertEqual(len(mail.outbox), 1)
site = Site.objects.get_current()
# Verify that the subject of the first message is correct.
self.assertEqual(mail.outbox[0].subject, "Your username for " + site.name)
def test_account_email_sync(self):
'''verify we can create an account and modify it keeping emails in sync'''
from geonode.people.models import Profile
email = '[email protected]'
joebob = Profile.objects.create(username='joebob', email=email)
self.assertEqual(joebob.emailaddress_set.get(primary=True).email, email)
email = '[email protected]'
joebob.email = email
joebob.save()
self.assertEqual(joebob.emailaddress_set.get(primary=True).email, email)
email = joebob.emailaddress_set.get(primary=True)
email.email = '[email protected]'
email.save()
joebob = Profile.objects.get(id=joebob.id)
self.assertEqual(email.email, joebob.email)
| gpl-3.0 |
jkonecny12/anaconda | pyanaconda/modules/network/nm_client.py | 1 | 63957 | #
# utility functions using libnm
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
import gi
gi.require_version("NM", "1.0")
from gi.repository import NM
import socket
from queue import Queue, Empty
from pykickstart.constants import BIND_TO_MAC
from pyanaconda.modules.network.constants import NM_CONNECTION_UUID_LENGTH, \
CONNECTION_ACTIVATION_TIMEOUT, NM_CONNECTION_TYPE_WIFI, NM_CONNECTION_TYPE_ETHERNET, \
NM_CONNECTION_TYPE_VLAN, NM_CONNECTION_TYPE_BOND, NM_CONNECTION_TYPE_TEAM, \
NM_CONNECTION_TYPE_BRIDGE, NM_CONNECTION_TYPE_INFINIBAND, CONNECTION_ADDING_TIMEOUT
from pyanaconda.modules.network.kickstart import default_ks_vlan_interface_name
from pyanaconda.modules.network.utils import is_s390, get_s390_settings, netmask2prefix, \
prefix2netmask
from pyanaconda.modules.network.config_file import is_config_file_for_system
from pyanaconda.anaconda_loggers import get_module_logger
log = get_module_logger(__name__)
NM_BRIDGE_DUMPED_SETTINGS_DEFAULTS = {
NM.SETTING_BRIDGE_MAC_ADDRESS: None,
NM.SETTING_BRIDGE_STP: True,
NM.SETTING_BRIDGE_PRIORITY: 32768,
NM.SETTING_BRIDGE_FORWARD_DELAY: 15,
NM.SETTING_BRIDGE_HELLO_TIME: 2,
NM.SETTING_BRIDGE_MAX_AGE: 20,
NM.SETTING_BRIDGE_AGEING_TIME: 300,
NM.SETTING_BRIDGE_GROUP_FORWARD_MASK: 0,
NM.SETTING_BRIDGE_MULTICAST_SNOOPING: True
}
def get_iface_from_connection(nm_client, uuid):
"""Get the name of device that would be used for the connection.
In installer it should be just one device.
We need to account also for the case of configurations bound to mac address
(HWADDR), eg network --bindto=mac command.
"""
connection = nm_client.get_connection_by_uuid(uuid)
if not connection:
return None
iface = connection.get_setting_connection().get_interface_name()
if not iface:
wired_setting = connection.get_setting_wired()
if wired_setting:
mac = wired_setting.get_mac_address()
if mac:
iface = get_iface_from_hwaddr(nm_client, mac)
return iface
def get_vlan_interface_name_from_connection(nm_client, connection):
"""Get vlan interface name from vlan connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
If no interface name is specified in the connection settings, infer the
value as <PARENT_IFACE>.<VLAN_ID> - same as NetworkManager.
"""
iface = connection.get_setting_connection().get_interface_name()
if not iface:
setting_vlan = connection.get_setting_vlan()
if setting_vlan:
vlanid = setting_vlan.get_id()
parent = setting_vlan.get_parent()
# if parent is specified by UUID
if len(parent) == NM_CONNECTION_UUID_LENGTH:
parent = get_iface_from_connection(nm_client, parent)
if vlanid is not None and parent:
iface = default_ks_vlan_interface_name(parent, vlanid)
return iface
def get_iface_from_hwaddr(nm_client, hwaddr):
"""Find the name of device specified by mac address."""
for device in nm_client.get_devices():
if device.get_device_type() in (NM.DeviceType.ETHERNET,
NM.DeviceType.WIFI):
try:
address = device.get_permanent_hw_address()
if not address:
address = device.get_hw_address()
except AttributeError as e:
log.warning("Device %s: %s", device.get_iface(), e)
address = device.get_hw_address()
else:
address = device.get_hw_address()
# per #1703152, at least in *some* case, we wind up with
# address as None here, so we need to guard against that
if address and address.upper() == hwaddr.upper():
return device.get_iface()
return None
def get_team_port_config_from_connection(nm_client, uuid):
connection = nm_client.get_connection_by_uuid(uuid)
if not connection:
return None
team_port = connection.get_setting_team_port()
if not team_port:
return None
config = team_port.get_config()
return config
def get_device_name_from_network_data(nm_client, network_data, supported_devices, bootif):
"""Get the device name from kickstart device specification.
Generally given by --device option. For vlans also --interfacename
and --vlanid comes into play.
Side effect: for vlan sets network_data.parent value from --device option
:param network_data: a kickstart device configuartion
:type network_data: kickstart NetworkData object
:param supported_devices: list of names of supported devices
:type supported_devices: list(str)
:param bootif: MAC addres of device to be used for --device=bootif specification
:type bootif: str
:returns: device name the configuration should be used for
:rtype: str
"""
spec = network_data.device
device_name = ""
msg = ""
if not spec:
msg = "device specification missing"
# Specification by device name
if spec in supported_devices:
device_name = spec
msg = "existing device found"
# Specification by mac address
elif ':' in spec:
device_name = get_iface_from_hwaddr(nm_client, spec) or ""
msg = "existing device found"
# Specification by BOOTIF boot option
elif spec == 'bootif':
if bootif:
device_name = get_iface_from_hwaddr(nm_client, bootif) or ""
msg = "existing device for {} found".format(bootif)
else:
msg = "BOOTIF value is not specified in boot options"
# First device with carrier (sorted lexicographically)
elif spec == 'link':
device_name = get_first_iface_with_link(nm_client, supported_devices) or ""
msg = "first device with link found"
if device_name:
if device_name not in supported_devices:
msg = "{} device found is not supported".format(device_name)
device_name = ""
# Virtual devices don't have to exist
elif spec and any((network_data.vlanid,
network_data.bondslaves,
network_data.teamslaves,
network_data.bridgeslaves)):
device_name = spec
msg = "virtual device does not exist, which is OK"
if network_data.vlanid:
network_data.parent = device_name
if network_data.interfacename:
device_name = network_data.interfacename
msg = "vlan device name specified by --interfacename"
else:
device_name = default_ks_vlan_interface_name(device_name, network_data.vlanid)
msg = "vlan device name inferred from parent and vlanid"
log.debug("kickstart specification --device=%s -> %s (%s)", spec, device_name, msg)
return device_name
def _create_vlan_bond_connection_from_ksdata(network_data):
con = _create_new_connection(network_data, network_data.device)
_update_bond_connection_from_ksdata(con, network_data)
# No ip configuration on vlan parent (bond)
s_ip4 = NM.SettingIP4Config.new()
s_ip4.set_property(NM.SETTING_IP_CONFIG_METHOD,
NM.SETTING_IP4_CONFIG_METHOD_DISABLED)
con.add_setting(s_ip4)
s_ip6 = NM.SettingIP6Config.new()
s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD,
NM.SETTING_IP6_CONFIG_METHOD_DISABLED)
con.add_setting(s_ip6)
return con
def _update_bond_connection_from_ksdata(connection, network_data):
"""Update connection with values from bond kickstart configuration.
:param connection: connection to be updated before adding to NM
:type connection: NM.SimpleConnection
:param network_data: kickstart configuration
:type network_data: pykickstart NetworkData
"""
s_con = connection.get_setting_connection()
s_con.props.type = NM_CONNECTION_TYPE_BOND
s_bond = NM.SettingBond.new()
opts = network_data.bondopts
if opts:
for option in opts.split(';' if ';' in opts else ','):
key, _sep, value = option.partition("=")
if s_bond.validate_option(key, value):
s_bond.add_option(key, value)
else:
log.warning("ignoring invalid bond option '%s=%s'", key, value)
connection.add_setting(s_bond)
def _add_existing_virtual_device_to_bridge(nm_client, device_name, bridge_spec):
"""Add existing virtual device to a bridge.
:param device_name: name of the virtual device to be added
:type device_name: str
:param bridge_spec: specification of the bridge (interface name or connection uuid)
:type bridge_spec: str
:returns: uuid of the updated connection or None
:rtype: str
"""
supported_virtual_types = (
NM_CONNECTION_TYPE_BOND,
)
port_connection = None
cons = nm_client.get_connections()
for con in cons:
if con.get_interface_name() == device_name \
and con.get_connection_type() in supported_virtual_types:
port_connection = con
break
if not port_connection:
return None
update_connection_values(
port_connection,
[
(NM.SETTING_CONNECTION_SETTING_NAME,
NM.SETTING_CONNECTION_SLAVE_TYPE,
'bridge'),
(NM.SETTING_CONNECTION_SETTING_NAME,
NM.SETTING_CONNECTION_MASTER,
bridge_spec),
]
)
commit_changes_with_autoconnection_blocked(port_connection)
return port_connection.get_uuid()
def _update_team_connection_from_ksdata(connection, network_data):
"""Update connection with values from team kickstart configuration.
:param connection: connection to be updated before adding to NM
:type connection: NM.SimpleConnection
:param network_data: kickstart configuration
:type network_data: pykickstart NetworkData
"""
s_con = connection.get_setting_connection()
s_con.props.type = "team"
s_team = NM.SettingTeam.new()
s_team.props.config = network_data.teamconfig
connection.add_setting(s_team)
def _update_vlan_connection_from_ksdata(connection, network_data):
"""Update connection with values from vlan kickstart configuration.
:param connection: connection to be updated before adding to NM
:type connection: NM.SimpleConnection
:param network_data: kickstart configuration
:type network_data: pykickstart NetworkData
:returns: interface name of the device
:rtype: str
"""
s_con = connection.get_setting_connection()
s_con.props.type = NM_CONNECTION_TYPE_VLAN
if network_data.interfacename:
s_con.props.id = network_data.interfacename
s_con.props.interface_name = network_data.interfacename
else:
s_con.props.interface_name = None
s_vlan = NM.SettingVlan.new()
s_vlan.props.id = int(network_data.vlanid)
s_vlan.props.parent = network_data.parent
connection.add_setting(s_vlan)
return s_con.props.interface_name
def _update_bridge_connection_from_ksdata(connection, network_data):
"""Update connection with values from bridge kickstart configuration.
:param connection: connection to be updated before adding to NM
:type connection: NM.SimpleConnection
:param network_data: kickstart configuration
:type network_data: pykickstart NetworkData
"""
s_con = connection.get_setting_connection()
s_con.props.type = NM_CONNECTION_TYPE_BRIDGE
s_bridge = NM.SettingBridge.new()
for opt in network_data.bridgeopts.split(","):
key, _sep, value = opt.partition("=")
if key in ("stp", "multicast-snooping"):
if value == "yes":
value = True
elif value == "no":
value = False
else:
try:
value = int(value)
except ValueError:
log.error("Invalid bridge option %s", opt)
continue
s_bridge.set_property(key, value)
connection.add_setting(s_bridge)
def _update_infiniband_connection_from_ksdata(connection, network_data):
"""Update connection with values from infiniband kickstart configuration.
:param connection: connection to be updated before adding to NM
:type connection: NM.SimpleConnection
:param network_data: kickstart configuration
:type network_data: pykickstart NetworkData
"""
s_con = connection.get_setting_connection()
s_con.props.type = NM_CONNECTION_TYPE_INFINIBAND
s_ib = NM.SettingInfiniband.new()
s_ib.props.transport_mode = "datagram"
connection.add_setting(s_ib)
def _update_ethernet_connection_from_ksdata(connection, network_data, bound_mac):
"""Update connection with values from ethernet kickstart configuration.
:param connection: connection to be updated before adding to NM
:type connection: NM.SimpleConnection
:param network_data: kickstart configuration
:type network_data: pykickstart NetworkData
:param bound_mac: MAC address the device name is bound to (ifname=)
:type bound_mac: str
"""
s_con = connection.get_setting_connection()
s_con.props.type = NM_CONNECTION_TYPE_ETHERNET
s_wired = NM.SettingWired.new()
if bound_mac:
s_wired.props.mac_address = bound_mac
connection.add_setting(s_wired)
def _update_wired_connection_with_s390_settings(connection, s390cfg):
"""Update connection with values specific for s390 architecture.
:param connection: connection to be updated before adding to NM
:type connection: NM.SimpleConnection
:param s390cfg: dictionary storing s390 specific settings
:type s390cfg: dict
"""
s_wired = connection.get_setting_wired()
if s390cfg['SUBCHANNELS']:
subchannels = s390cfg['SUBCHANNELS'].split(",")
s_wired.props.s390_subchannels = subchannels
if s390cfg['NETTYPE']:
s_wired.props.s390_nettype = s390cfg['NETTYPE']
if s390cfg['OPTIONS']:
opts = s390cfg['OPTIONS'].split(" ")
opts_dict = {k: v for k, v in (o.split("=") for o in opts)}
s_wired.props.s390_options = opts_dict
def _create_new_connection(network_data, device_name):
con_uuid = NM.utils_uuid_generate()
con = NM.SimpleConnection.new()
s_con = NM.SettingConnection.new()
s_con.props.uuid = con_uuid
s_con.props.id = device_name
s_con.props.interface_name = device_name
s_con.props.autoconnect = network_data.onboot
con.add_setting(s_con)
return con
def create_connections_from_ksdata(nm_client, network_data, device_name, ifname_option_values=None):
"""Create NM connections from kickstart configuration.
:param network_data: kickstart configuration
:type network_data: pykickstart NetworkData
:param device_name: name of the device to be configured by kickstart
:type device_name: str
:param ifname_option_values: list of ifname boot option values
:type ifname_option_values: list(str)
:return: list of tuples (CONNECTION, NAME_OF_DEVICE_TO_BE_ACTIVATED)
:rtype: list((NM.RemoteConnection, str))
"""
ifname_option_values = ifname_option_values or []
port_connections = []
connections = []
device_to_activate = device_name
con = _create_new_connection(network_data, device_name)
bond_con = None
update_connection_ip_settings_from_ksdata(con, network_data)
# type "bond"
if network_data.bondslaves:
# vlan over bond
if network_data.vlanid:
# create bond connection, vlan connection will be created later
bond_controller = network_data.device
bond_con = _create_vlan_bond_connection_from_ksdata(network_data)
connections.append((bond_con, bond_controller))
else:
bond_controller = device_name
_update_bond_connection_from_ksdata(con, network_data)
for i, port in enumerate(network_data.bondslaves.split(","), 1):
port_con = create_port_connection('bond', i, port, bond_controller,
network_data.onboot)
bind_connection(nm_client, port_con, network_data.bindto, port)
port_connections.append((port_con, port))
# type "team"
if network_data.teamslaves:
_update_team_connection_from_ksdata(con, network_data)
for i, (port, cfg) in enumerate(network_data.teamslaves, 1):
s_team_port = NM.SettingTeamPort.new()
s_team_port.props.config = cfg
port_con = create_port_connection('team', i, port, device_name,
network_data.onboot, settings=[s_team_port])
bind_connection(nm_client, port_con, network_data.bindto, port)
port_connections.append((port_con, port))
# type "vlan"
if network_data.vlanid:
device_to_activate = _update_vlan_connection_from_ksdata(con, network_data) \
or device_to_activate
# type "bridge"
if network_data.bridgeslaves:
# bridge connection is autoactivated
_update_bridge_connection_from_ksdata(con, network_data)
for i, port in enumerate(network_data.bridgeslaves.split(","), 1):
if not _add_existing_virtual_device_to_bridge(nm_client, port, device_name):
port_con = create_port_connection('bridge', i, port, device_name,
network_data.onboot)
bind_connection(nm_client, port_con, network_data.bindto, port)
port_connections.append((port_con, port))
# type "infiniband"
if is_infiniband_device(nm_client, device_name):
_update_infiniband_connection_from_ksdata(con, network_data)
# type "802-3-ethernet"
if is_ethernet_device(nm_client, device_name):
bound_mac = bound_hwaddr_of_device(nm_client, device_name, ifname_option_values)
_update_ethernet_connection_from_ksdata(con, network_data, bound_mac)
if bound_mac:
log.debug("add connection: mac %s is bound to name %s",
bound_mac, device_name)
else:
bind_connection(nm_client, con, network_data.bindto, device_name)
# Add s390 settings
if is_s390():
s390cfg = get_s390_settings(device_name)
_update_wired_connection_with_s390_settings(con, s390cfg)
connections.append((con, device_to_activate))
connections.extend(port_connections)
return connections
def add_connection_from_ksdata(nm_client, network_data, device_name, activate=False,
ifname_option_values=None):
"""Add NM connection created from kickstart configuration.
:param network_data: kickstart configuration
:type network_data: pykickstart NetworkData
:param device_name: name of the device to be configured by kickstart
:type device_name: str
:param activate: activate the added connection
:type activate: bool
:param ifname_option_values: list of ifname boot option values
:type ifname_option_values: list(str)
"""
connections = create_connections_from_ksdata(
nm_client,
network_data,
device_name,
ifname_option_values
)
for connection, device_name in connections:
log.debug("add connection (activate=%s): %s for %s\n%s",
activate, connection.get_uuid(), device_name,
connection.to_dbus(NM.ConnectionSerializationFlags.NO_SECRETS))
added_connection = add_connection_sync(
nm_client,
connection,
)
if not added_connection:
continue
if activate:
if device_name:
device = nm_client.get_device_by_iface(device_name)
if device:
log.debug("activating with device %s", device.get_iface())
else:
log.debug("activating without device specified - device %s not found",
device_name)
else:
device = None
log.debug("activating without device specified")
nm_client.activate_connection_async(added_connection, device, None, None)
return connections
def add_connection_sync(nm_client, connection):
"""Add a connection synchronously and optionally activate asynchronously.
:param connection: connection to be added
:type connection: NM.SimpleConnection
:return: added connection or None on timeout
:rtype: NM.RemoteConnection
"""
sync_queue = Queue()
def finish_callback(nm_client, result, sync_queue):
con, result = nm_client.add_connection2_finish(result)
log.debug("connection %s added:\n%s", con.get_uuid(),
con.to_dbus(NM.ConnectionSerializationFlags.NO_SECRETS))
sync_queue.put(con)
nm_client.add_connection2(
connection.to_dbus(NM.ConnectionSerializationFlags.ALL),
(NM.SettingsAddConnection2Flags.TO_DISK |
NM.SettingsAddConnection2Flags.BLOCK_AUTOCONNECT),
None,
False,
None,
finish_callback,
sync_queue
)
try:
ret = sync_queue.get(timeout=CONNECTION_ADDING_TIMEOUT)
except Empty:
log.error("Adding of connection %s timed out.", connection.get_uuid())
ret = None
return ret
def create_port_connection(port_type, port_idx, port, controller, autoconnect, settings=None):
"""Create a port NM connection for virtual connection (bond, team, bridge).
:param port_type: type of port ("bond", "team", "bridge")
:type port_type: str
:param port_idx: index of the port for naming
:type port_idx: int
:param port: port's device name
:type port: str
:param controller: port's controller device name
:type controller: str
:param autoconnect: connection autoconnect value
:type autoconnect: bool
:param settings: list of other settings to be added to the connection
:type settings: list(NM.Setting)
:return: created connection
:rtype: NM.SimpleConnection
"""
settings = settings or []
port_name = "%s_slave_%d" % (controller, port_idx)
con = NM.SimpleConnection.new()
s_con = NM.SettingConnection.new()
s_con.props.uuid = NM.utils_uuid_generate()
s_con.props.id = port_name
s_con.props.slave_type = port_type
s_con.props.master = controller
s_con.props.type = NM_CONNECTION_TYPE_ETHERNET
s_con.props.autoconnect = autoconnect
con.add_setting(s_con)
s_wired = NM.SettingWired.new()
con.add_setting(s_wired)
for setting in settings:
con.add_setting(setting)
return con
def is_infiniband_device(nm_client, device_name):
"""Is the type of the device infiniband?"""
device = nm_client.get_device_by_iface(device_name)
if device and device.get_device_type() == NM.DeviceType.INFINIBAND:
return True
return False
def is_ethernet_device(nm_client, device_name):
"""Is the type of the device ethernet?"""
device = nm_client.get_device_by_iface(device_name)
if device and device.get_device_type() == NM.DeviceType.ETHERNET:
return True
return False
def is_ibft_connection(connection):
"""Is the connection generated by NM from iBFT?"""
return connection.get_id().startswith("iBFT Connection")
def bound_hwaddr_of_device(nm_client, device_name, ifname_option_values):
"""Check and return mac address of device bound by device renaming.
For example ifname=ens3:f4:ce:46:2c:44:7a should bind the device name ens3
to the MAC address (and rename the device in initramfs eventually). If
hwaddress of the device devname is the same as the MAC address, its value
is returned.
:param device_name: device name
:type device_name: str
:param ifname_option_values: list of ifname boot option values
:type ifname_option_values: list(str)
:return: hwaddress of the device if bound, or None
:rtype: str or None
"""
for ifname_value in ifname_option_values:
iface, mac = ifname_value.split(":", 1)
if iface == device_name:
if iface == get_iface_from_hwaddr(nm_client, mac):
return mac.upper()
else:
log.warning("MAC address of ifname %s does not correspond to ifname=%s",
iface, ifname_value)
return None
def update_connection_from_ksdata(nm_client, connection, network_data, device_name,
ifname_option_values=None):
"""Update NM connection specified by uuid from kickstart configuration.
:param connection: existing NetworkManager connection to be updated
:type connection: NM.RemoteConnection
:param network_data: kickstart network configuration
:type network_data: pykickstart NetworkData
:param device_name: device name the connection should be bound to eventually
:type device_name: str
:param ifname_option_values: list of ifname boot option values
:type ifname_option_values: list(str)
"""
log.debug("updating connection %s:\n%s", connection.get_uuid(),
connection.to_dbus(NM.ConnectionSerializationFlags.NO_SECRETS))
ifname_option_values = ifname_option_values or []
# IP configuration
update_connection_ip_settings_from_ksdata(connection, network_data)
s_con = connection.get_setting_connection()
s_con.set_property(NM.SETTING_CONNECTION_AUTOCONNECT, network_data.onboot)
if connection.get_connection_type() not in (NM_CONNECTION_TYPE_BOND,
NM_CONNECTION_TYPE_TEAM,
NM_CONNECTION_TYPE_VLAN,
NM_CONNECTION_TYPE_BRIDGE):
bound_mac = bound_hwaddr_of_device(nm_client, device_name, ifname_option_values)
if bound_mac:
log.debug("update connection: mac %s is bound to name %s", bound_mac, device_name)
# The connection is already bound to iface name by NM in initramfs,
# still bind also to MAC until this method of renaming is abandoned (rhbz#1875485)
bind_connection(nm_client, connection, BIND_TO_MAC, device_name,
bind_exclusively=False)
else:
bind_connection(nm_client, connection, network_data.bindto, device_name)
commit_changes_with_autoconnection_blocked(connection)
log.debug("updated connection %s:\n%s", connection.get_uuid(),
connection.to_dbus(NM.ConnectionSerializationFlags.NO_SECRETS))
def update_connection_ip_settings_from_ksdata(connection, network_data):
"""Update NM connection from kickstart IP configuration in place.
:param connection: existing NetworkManager connection to be updated
:type connection: NM.RemoteConnection
:param network_data: kickstart configuation containing the IP configuration
to be applied to the connection
:type network_data: pykickstart NetworkData
"""
# ipv4 settings
if network_data.noipv4:
method4 = "disabled"
elif network_data.bootProto == "static":
method4 = "manual"
else:
method4 = "auto"
connection.remove_setting(NM.SettingIP4Config)
s_ip4 = NM.SettingIP4Config.new()
s_ip4.set_property(NM.SETTING_IP_CONFIG_METHOD, method4)
if method4 == "manual":
prefix4 = netmask2prefix(network_data.netmask)
addr4 = NM.IPAddress.new(socket.AF_INET, network_data.ip, prefix4)
s_ip4.add_address(addr4)
if network_data.gateway:
s_ip4.props.gateway = network_data.gateway
if network_data.nodefroute:
s_ip4.props.never_default = True
connection.add_setting(s_ip4)
# ipv6 settings
if network_data.noipv6:
method6 = "ignore"
elif not network_data.ipv6 or network_data.ipv6 == "auto":
method6 = "auto"
elif network_data.ipv6 == "dhcp":
method6 = "dhcp"
else:
method6 = "manual"
connection.remove_setting(NM.SettingIP6Config)
s_ip6 = NM.SettingIP6Config.new()
s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD, method6)
s_ip6.set_property(NM.SETTING_IP6_CONFIG_ADDR_GEN_MODE,
NM.SettingIP6ConfigAddrGenMode.EUI64)
if method6 == "manual":
addr6, _slash, prefix6 = network_data.ipv6.partition("/")
if prefix6:
prefix6 = int(prefix6)
else:
prefix6 = 64
addr6 = NM.IPAddress.new(socket.AF_INET6, addr6, prefix6)
s_ip6.add_address(addr6)
if network_data.ipv6gateway:
s_ip6.props.gateway = network_data.ipv6gateway
connection.add_setting(s_ip6)
# nameservers
if network_data.nameserver:
for ns in [str.strip(i) for i in network_data.nameserver.split(",")]:
if NM.utils_ipaddr_valid(socket.AF_INET6, ns):
s_ip6.add_dns(ns)
elif NM.utils_ipaddr_valid(socket.AF_INET, ns):
s_ip4.add_dns(ns)
else:
log.error("IP address %s is not valid", ns)
def bind_settings_to_mac(nm_client, s_connection, s_wired, device_name=None, bind_exclusively=True):
"""Bind the settings to the mac address of the device.
:param s_connection: connection setting to be updated
:type s_connection: NM.SettingConnection
:param s_wired: wired setting to be updated
:type s_wired: NM.SettingWired
:param device_name: name of the device to be bound
:type device_name: str
:param bind_exclusively: remove reference to the device name from the settings
:type bind_exclusively: bool
:returns: True if the settings were modified, False otherwise
:rtype: bool
"""
mac_address = s_wired.get_mac_address()
interface_name = s_connection.get_interface_name()
modified = False
if mac_address:
log.debug("Bind to mac: already bound to %s", mac_address)
else:
iface = device_name or interface_name
if not iface:
log.warning("Bind to mac: no device name provided to look for mac")
return False
device = nm_client.get_device_by_iface(iface)
if device:
try:
perm_hwaddr = device.get_permanent_hw_address()
except AttributeError:
perm_hwaddr = None
hwaddr = perm_hwaddr or device.get_hw_address()
s_wired.props.mac_address = hwaddr
log.debug("Bind to mac: bound to %s", hwaddr)
modified = True
if bind_exclusively and interface_name:
s_connection.props.interface_name = None
log.debug("Bind to mac: removed interface-name %s from connection", interface_name)
modified = True
return modified
def bind_settings_to_device(nm_client, s_connection, s_wired, device_name=None,
bind_exclusively=True):
"""Bind the settings to the name of the device.
:param s_connection: connection setting to be updated
:type s_connection: NM.SettingConnection
:param s_wired: wired setting to be updated
:type s_wired: NM.SettingWired
:param device_name: name of the device to be bound
:type device_name: str
:param bind_exclusively: remove reference to the mac address from the settings
:type bind_exclusively: bool
:returns: True if the settings were modified, False otherwise
:rtype: bool
"""
mac_address = s_wired.get_mac_address()
interface_name = s_connection.get_interface_name()
modified = False
if device_name:
s_connection.props.interface_name = device_name
log.debug("Bind to device: %s -> %s", interface_name, device_name)
modified = interface_name != device_name
else:
if not interface_name:
log.debug("Bind to device: no device to bind to")
return False
else:
log.debug("Bind to device: already bound to %s", interface_name)
if bind_exclusively and mac_address:
s_wired.props.mac_address = None
log.debug("Bind to device: removed mac-address from connection")
modified = True
return modified
def bind_connection(nm_client, connection, bindto, device_name=None, bind_exclusively=True):
"""Bind the connection to device name or mac address.
:param connection: connection to be updated before adding to NM
:type connection: NM.SimpleConnection
:param bindto: type of binding of the connection (mac address of device name)
- BIND_TO_MAC for mac address
- None for device name (default)
:type bindto: pykickstart --bindto constant
:param device_name: device name for binding
:type device_name: str
:param bind_exclusively: when binding to an entity, remove reference to the other
:type bind_exclusively: bool
:returns: True if the connection was modified, False otherwise
:rtype: bool
"""
msg = "Bind connection {} to {}:".format(connection.get_uuid(), bindto or "iface")
s_con = connection.get_setting_connection()
if not s_con:
log.warning("%s no connection settings, bailing", msg)
return False
s_wired = connection.get_setting_wired()
if bindto == BIND_TO_MAC:
if not s_wired:
log.warning("%s no wired settings, bailing", msg)
return False
modified = bind_settings_to_mac(nm_client, s_con, s_wired, device_name, bind_exclusively)
else:
modified = bind_settings_to_device(nm_client, s_con, s_wired, device_name, bind_exclusively)
return modified
def get_connections_available_for_iface(nm_client, iface):
"""Get all connections available for given interface.
:param iface: interface name
:type iface: str
:return: list of all available connections
:rtype: list(NM.RemoteConnection)
"""
cons = []
device = nm_client.get_device_by_iface(iface)
if device:
cons = device.get_available_connections()
else:
# Try also non-existing (not real) virtual devices
for device in nm_client.get_all_devices():
if not device.is_real() and device.get_iface() == iface:
cons = device.get_available_connections()
if cons:
break
else:
# Getting available connections does not seem to work quite well for
# non-real team - try to look them up in all connections.
for con in nm_client.get_connections():
interface_name = con.get_interface_name()
if not interface_name and con.get_connection_type() == NM_CONNECTION_TYPE_VLAN:
interface_name = get_vlan_interface_name_from_connection(nm_client, con)
if interface_name == iface:
cons.append(con)
return cons
def update_connection_values(connection, new_values):
"""Update setting values of a connection.
:param connection: existing NetworkManager connection to be updated
:type connection: NM.RemoteConnection
:param new_values: list of properties to be updated
:type new_values: [(SETTING_NAME, SETTING_PROPERTY, VALUE)]
"""
for setting_name, setting_property, value in new_values:
setting = connection.get_setting_by_name(setting_name)
if setting:
setting.set_property(setting_property, value)
log.debug("updating connection %s setting '%s' '%s' to '%s'",
connection.get_uuid(), setting_name, setting_property, value)
else:
log.debug("setting '%s' not found while updating connection %s",
setting_name, connection.get_uuid())
log.debug("updated connection %s:\n%s", connection.get_uuid(),
connection.to_dbus(NM.ConnectionSerializationFlags.ALL))
def devices_ignore_ipv6(nm_client, device_types):
"""All connections of devices of given type ignore ipv6."""
device_types = device_types or []
for device in nm_client.get_devices():
if device.get_device_type() in device_types:
cons = device.get_available_connections()
for con in cons:
s_ipv6 = con.get_setting_ip6_config()
if s_ipv6 and s_ipv6.get_method() != NM.SETTING_IP6_CONFIG_METHOD_IGNORE:
return False
return True
def get_first_iface_with_link(nm_client, ifaces):
"""Find first iface having link (in lexicographical order)."""
for iface in sorted(ifaces):
device = nm_client.get_device_by_iface(iface)
if device and device.get_carrier():
return device.get_iface()
return None
def get_connections_dump(nm_client):
"""Dumps all connections for logging."""
con_dumps = []
for con in nm_client.get_connections():
con_dumps.append(str(con.to_dbus(NM.ConnectionSerializationFlags.NO_SECRETS)))
return "\n".join(con_dumps)
def commit_changes_with_autoconnection_blocked(connection, save_to_disk=True):
"""Implementation of NM CommitChanges() method with blocked autoconnection.
Update2() API is used to implement the functionality (called synchronously).
Prevents autoactivation of the connection on its update which would happen
with CommitChanges if "autoconnect" is set True.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param save_to_disk: should the changes be written also to disk?
:type save_to_disk: bool
:return: on success result of the Update2() call, None of failure
:rtype: GVariant of type "a{sv}" or None
"""
sync_queue = Queue()
def finish_callback(connection, result, sync_queue):
ret = connection.update2_finish(result)
sync_queue.put(ret)
flags = NM.SettingsUpdate2Flags.BLOCK_AUTOCONNECT
if save_to_disk:
flags |= NM.SettingsUpdate2Flags.TO_DISK
con2 = NM.SimpleConnection.new_clone(connection)
connection.update2(
con2.to_dbus(NM.ConnectionSerializationFlags.ALL),
flags,
None,
None,
finish_callback,
sync_queue
)
return sync_queue.get()
def clone_connection_sync(nm_client, connection, con_id=None, uuid=None):
"""Clone a connection synchronously.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param con_id: id of the cloned connection
:type con_id: str
:param uuid: uuid of the cloned connection (None to be generated)
:type uuid: str
:return: NetworkManager connection or None on timeout
:rtype: NM.RemoteConnection
"""
sync_queue = Queue()
def finish_callback(nm_client, result, sync_queue):
con, result = nm_client.add_connection2_finish(result)
log.debug("connection %s cloned:\n%s", con.get_uuid(),
con.to_dbus(NM.ConnectionSerializationFlags.NO_SECRETS))
sync_queue.put(con)
cloned_connection = NM.SimpleConnection.new_clone(connection)
s_con = cloned_connection.get_setting_connection()
s_con.props.uuid = uuid or NM.utils_uuid_generate()
s_con.props.id = con_id or "{}-clone".format(connection.get_id())
nm_client.add_connection2(
cloned_connection.to_dbus(NM.ConnectionSerializationFlags.ALL),
(NM.SettingsAddConnection2Flags.TO_DISK |
NM.SettingsAddConnection2Flags.BLOCK_AUTOCONNECT),
None,
False,
None,
finish_callback,
sync_queue
)
try:
ret = sync_queue.get(timeout=CONNECTION_ACTIVATION_TIMEOUT)
except Empty:
log.error("Cloning of a connection timed out.")
ret = None
return ret
def get_dracut_arguments_from_connection(nm_client, connection, iface, target_ip,
hostname, ibft=False):
"""Get dracut arguments for the iface and SAN target from NM connection.
Examples of SAN: iSCSI, FCoE
The dracut arguments would activate the iface in initramfs so that the
SAN target can be attached (usually to mount root filesystem).
:param nm_client: instance of NetworkManager client
:type nm_client: NM.Client
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param iface: network interface used to connect to the target
(can be none if ibft is used)
:type iface: str
:param target_ip: IP of the SAN target
:type target_ip: str
:param hostname: static hostname to be configured
:type hostname: str
:param ibft: network should be configured from ibft
:type ibft: bool
:returns: dracut arguments
:rtype: set(str)
"""
netargs = set()
if ibft:
netargs.add("rd.iscsi.ibft")
elif target_ip:
if hostname is None:
hostname = ""
if ':' in target_ip:
# Using IPv6 target IP
ipv6_arg = _get_dracut_ipv6_argument(connection, iface, hostname)
if ipv6_arg:
netargs.add(ipv6_arg)
else:
log.error("No IPv6 configuration found in connection %s", connection.get_uuid())
else:
# Using IPv4 target IP
ipv4_arg = _get_dracut_ipv4_argument(connection, iface, hostname)
if ipv4_arg:
netargs.add(ipv4_arg)
else:
log.error("No IPv4 configuration found in connection %s", connection.get_uuid())
ifname_arg = _get_dracut_ifname_argument_from_connection(connection, iface)
if ifname_arg:
netargs.add(ifname_arg)
team_arg = _get_dracut_team_argument_from_connection(nm_client, connection, iface)
if team_arg:
netargs.add(team_arg)
vlan_arg, vlan_parent_connection = _get_dracut_vlan_argument_from_connection(nm_client,
connection,
iface)
if vlan_arg:
netargs.add(vlan_arg)
# For vlan the parent connection defines the s390 znet argument values
if vlan_parent_connection:
connection = vlan_parent_connection
znet_arg = _get_dracut_znet_argument_from_connection(connection)
if znet_arg:
netargs.add(znet_arg)
return netargs
def _get_dracut_ipv6_argument(connection, iface, hostname):
"""Get dracut ip IPv6 configuration for given interface and NM connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param iface: network interface to be used
:type iface: str
:param hostname: static hostname to be configured
:type hostname: str
:returns: dracut ip argument or "" if the configuration can't be find
:rtype: set(str)
"""
argument = ""
ip6_config = connection.get_setting_ip6_config()
ip6_method = ip6_config.get_method()
if ip6_method == NM.SETTING_IP6_CONFIG_METHOD_AUTO:
argument = "ip={}:auto6".format(iface)
elif ip6_method == NM.SETTING_IP6_CONFIG_METHOD_DHCP:
# Most probably not working
argument = "ip={}:dhcp6".format(iface)
elif ip6_method == NM.SETTING_IP6_CONFIG_METHOD_MANUAL:
ipaddr = ""
if ip6_config.get_num_addresses() > 0:
addr = ip6_config.get_address(0)
ipaddr = "[{}/{}]".format(addr.get_address(), addr.get_prefix())
gateway = ip6_config.get_gateway() or ""
if gateway:
gateway = "[{}]".format(gateway)
if ipaddr or gateway:
argument = ("ip={}::{}::{}:{}:none".format(ipaddr, gateway, hostname, iface))
return argument
def _get_dracut_ipv4_argument(connection, iface, hostname):
"""Get dracut ip IPv4 configuration for given interface and NM connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param iface: network interface to be used
:type iface: str
:param hostname: static hostname to be configured
:type hostname: str
:returns: dracut ip argument or "" if the configuration can't be find
:rtype: str
"""
argument = ""
ip4_config = connection.get_setting_ip4_config()
ip4_method = ip4_config.get_method()
if ip4_method == NM.SETTING_IP4_CONFIG_METHOD_AUTO:
argument = "ip={}:dhcp".format(iface)
elif ip4_method == NM.SETTING_IP4_CONFIG_METHOD_MANUAL:
if ip4_config.get_num_addresses() > 0:
addr = ip4_config.get_address(0)
ip = addr.get_address()
netmask = prefix2netmask(addr.get_prefix())
gateway = ip4_config.get_gateway() or ""
argument = "ip={}::{}:{}:{}:{}:none".format(ip, gateway, netmask, hostname, iface)
return argument
def _get_dracut_ifname_argument_from_connection(connection, iface):
"""Get dracut ifname configuration for given interface and NM connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param iface: network interface to be used
:type iface: str
:returns: dracut ifname argument or "" if the configuration does not apply
:rtype: str
"""
argument = ""
wired_setting = connection.get_setting_wired()
if wired_setting:
hwaddr = wired_setting.get_mac_address()
if hwaddr:
argument = "ifname={}:{}".format(iface, hwaddr.lower())
return argument
def _get_dracut_team_argument_from_connection(nm_client, connection, iface):
"""Get dracut team configuration for given interface and NM connection.
:param nm_client: instance of NetworkManager client
:type nm_client: NM.Client
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param iface: network interface to be used
:type iface: str
:returns: dracut team argument or "" if the configuration does not apply
:rtype: str
"""
argument = ""
if connection.get_connection_type() == NM_CONNECTION_TYPE_TEAM:
ports = get_ports_from_connections(
nm_client,
["team"],
[iface, connection.get_uuid()]
)
port_ifaces = sorted(s_iface for _name, s_iface, _uuid in ports if s_iface)
argument = "team={}:{}".format(iface, ",".join(port_ifaces))
return argument
def _get_dracut_vlan_argument_from_connection(nm_client, connection, iface):
"""Get dracut vlan configuration for given interface and NM connection.
Returns also parent vlan connection.
:param nm_client: instance of NetworkManager client
:type nm_client: NM.Client
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param iface: network interface to be used
:type iface: str
:returns: tuple (ARGUMENT, PARENT_CONNECTION) where
ARGUMENT is dracut vlan argument or "" if the configuration does not apply
PARENT_CONNECTION is vlan parent connection of the connection
:rtype: tuple(str, NM.RemoteConnection)
"""
argument = ""
parent_con = None
if connection.get_connection_type() == NM_CONNECTION_TYPE_VLAN:
setting_vlan = connection.get_setting_vlan()
parent_spec = setting_vlan.get_parent()
parent = None
# parent can be specified by connection uuid (eg from nm-c-e)
if len(parent_spec) == NM_CONNECTION_UUID_LENGTH:
parent_con = nm_client.get_connection_by_uuid(parent_spec)
if parent_con:
# On s390 with net.ifnames=0 there is no DEVICE so use NAME
parent = parent_con.get_interface_name() or parent_con.get_id()
# parent can be specified by interface
else:
parent = parent_spec
parent_cons = get_connections_available_for_iface(nm_client, parent)
if len(parent_cons) != 1:
log.error("unexpected number of connections found for vlan parent %s",
parent_spec)
if parent_cons:
parent_con = parent_cons[0]
if parent:
argument = "vlan={}:{}".format(iface, parent)
else:
log.error("can't find parent interface of vlan device %s specified by %s",
iface, parent_spec)
if not parent_con:
log.error("can't find parent connection of vlan device %s specified by %s",
iface, parent_spec)
return argument, parent_con
def _get_dracut_znet_argument_from_connection(connection):
"""Get dracut znet (s390) configuration for given NM connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:returns: dracut znet argument or "" if the configuration does not apply
:rtype: str
"""
argument = ""
wired_setting = connection.get_setting_wired()
if wired_setting and is_s390():
nettype = wired_setting.get_s390_nettype()
subchannels = wired_setting.get_s390_subchannels()
if nettype and subchannels:
argument = "rd.znet={},{}".format(nettype, subchannels)
options = wired_setting.get_property(NM.SETTING_WIRED_S390_OPTIONS)
if options:
options_string = ','.join("{}={}".format(key, val) for key, val in options.items())
argument += ",{}".format(options_string)
return argument
def get_ports_from_connections(nm_client, port_types, controller_specs):
"""Get ports of controller of given type specified by uuid or interface.
:param nm_client: instance of NetworkManager client
:type nm_client: NM.Client
:param port_types: type of the port - NM setting "slave-type" value (eg. "team")
:type port_types: list(str)
:param controller_specs: a list containing sepcification of a controller:
interface name or connection uuid or both
:type controller_specs: list(str)
:returns: ports specified by name, interface and connection uuid
:rtype: set((str,str,str))
"""
ports = set()
for con in nm_client.get_connections():
if not con.get_setting_connection().get_slave_type() in port_types:
continue
if con.get_setting_connection().get_master() in controller_specs:
iface = get_iface_from_connection(nm_client, con.get_uuid())
name = con.get_id()
ports.add((name, iface, con.get_uuid()))
return ports
def get_config_file_connection_of_device(nm_client, device_name, device_hwaddr=None):
"""Find connection of the device's configuration file.
:param nm_client: instance of NetworkManager client
:type nm_client: NM.Client
:param device_name: name of the device
:type device_name: str
:param device_hwaddr: hardware address of the device
:type device_hwaddr: str
:returns: uuid of NetworkManager connection
:rtype: str
"""
cons = []
for con in nm_client.get_connections():
filename = con.get_filename() or ""
# Ignore connections from initramfs in
# /run/NetworkManager/system-connections
if not is_config_file_for_system(filename):
continue
con_type = con.get_connection_type()
if con_type == NM_CONNECTION_TYPE_ETHERNET:
# Ignore ports
if con.get_setting_connection().get_master():
continue
interface_name = con.get_interface_name()
mac_address = None
wired_setting = con.get_setting_wired()
if wired_setting:
mac_address = wired_setting.get_mac_address()
if interface_name:
if interface_name == device_name:
cons.append(con)
elif mac_address:
if device_hwaddr:
if device_hwaddr.upper() == mac_address.upper():
cons.append(con)
else:
iface = get_iface_from_hwaddr(nm_client, mac_address)
if iface == device_name:
cons.append(con)
elif is_s390():
# s390 setting generated in dracut with net.ifnames=0
# has neither DEVICE/interface-name nor HWADDR/mac-address set (#1249750)
if con.get_id() == device_name:
cons.append(con)
elif con_type in (NM_CONNECTION_TYPE_BOND, NM_CONNECTION_TYPE_TEAM,
NM_CONNECTION_TYPE_BRIDGE, NM_CONNECTION_TYPE_INFINIBAND):
if con.get_interface_name() == device_name:
cons.append(con)
elif con_type == NM_CONNECTION_TYPE_VLAN:
interface_name = get_vlan_interface_name_from_connection(nm_client, con)
if interface_name and interface_name == device_name:
cons.append(con)
if len(cons) > 1:
log.debug("Unexpected number of config files found for %s: %s", device_name,
[con.get_filename() for con in cons])
if cons:
return cons[0].get_uuid()
else:
log.debug("Config file for %s not found", device_name)
return ""
def get_kickstart_network_data(connection, nm_client, network_data_class):
"""Get kickstart data from NM connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param nm_client: instance of NetworkManager client
:type nm_client: NM.Client
:param network_data_class: pykickstart network command data class
:type: pykickstart BaseData
:returns: network_data object corresponding to the connection
:rtype: network_data_class object instance
"""
# no network command for non-virtual device ports
if connection.get_connection_type() not in (NM_CONNECTION_TYPE_BOND, NM_CONNECTION_TYPE_TEAM):
if connection.get_setting_connection().get_master():
return None
# no support for wireless
if connection.get_connection_type() == NM_CONNECTION_TYPE_WIFI:
return None
network_data = network_data_class()
# connection
network_data.onboot = connection.get_setting_connection().get_autoconnect()
iface = get_iface_from_connection(nm_client, connection.get_uuid())
if iface:
network_data.device = iface
_update_ip4_config_kickstart_network_data(connection, network_data)
_update_ip6_config_kickstart_network_data(connection, network_data)
_update_nameserver_kickstart_network_data(connection, network_data)
# --mtu
s_wired = connection.get_setting_wired()
if s_wired:
if s_wired.get_mtu():
network_data.mtu = s_wired.get_mtu()
# vlan
if connection.get_connection_type() == NM_CONNECTION_TYPE_VLAN:
_update_vlan_kickstart_network_data(nm_client, connection, network_data)
# bonding
if connection.get_connection_type() == NM_CONNECTION_TYPE_BOND:
_update_bond_kickstart_network_data(nm_client, iface, connection, network_data)
# bridging
if connection.get_connection_type() == NM_CONNECTION_TYPE_BRIDGE:
_update_bridge_kickstart_network_data(nm_client, iface, connection, network_data)
# teaming
if connection.get_connection_type() == NM_CONNECTION_TYPE_TEAM:
_update_team_kickstart_network_data(nm_client, iface, connection, network_data)
return network_data
def _update_nameserver_kickstart_network_data(connection, network_data):
"""Update nameserver configuration of network data from connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param network_data: kickstart configuration to be modified
:type network_data: pykickstart NetworkData
"""
# --nameserver is used both for ipv4 and ipv6
dns_list = []
s_ip4_config = connection.get_setting_ip4_config()
if s_ip4_config:
for i in range(s_ip4_config.get_num_dns()):
dns_list.append(s_ip4_config.get_dns(i))
s_ip6_config = connection.get_setting_ip6_config()
if s_ip6_config:
for i in range(s_ip6_config.get_num_dns()):
dns_list.append(s_ip6_config.get_dns(i))
dns_str = ','.join(dns_list)
if dns_str:
network_data.nameserver = dns_str
def _update_ip4_config_kickstart_network_data(connection, network_data):
"""Update IPv4 configuration of network data from connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param network_data: kickstart configuration to be modified
:type network_data: pykickstart NetworkData
"""
s_ip4_config = connection.get_setting_ip4_config()
if not s_ip4_config:
return
ip4_method = s_ip4_config.get_method()
if ip4_method == NM.SETTING_IP4_CONFIG_METHOD_DISABLED:
network_data.noipv4 = True
elif ip4_method == NM.SETTING_IP4_CONFIG_METHOD_AUTO:
network_data.bootProto = "dhcp"
elif ip4_method == NM.SETTING_IP4_CONFIG_METHOD_MANUAL:
network_data.bootProto = "static"
if s_ip4_config.get_num_addresses() > 0:
addr = s_ip4_config.get_address(0)
network_data.ip = addr.get_address()
netmask = prefix2netmask(addr.get_prefix())
if netmask:
network_data.netmask = netmask
gateway = s_ip4_config.get_gateway()
if gateway:
network_data.gateway = gateway
# --hostname
ip4_dhcp_hostname = s_ip4_config.get_dhcp_hostname()
if ip4_dhcp_hostname:
network_data.hostname = ip4_dhcp_hostname
def _update_ip6_config_kickstart_network_data(connection, network_data):
"""Update IPv6 configuration of network data from connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param network_data: kickstart configuration to be modified
:type network_data: pykickstart NetworkData
"""
s_ip6_config = connection.get_setting_ip6_config()
if not s_ip6_config:
return
ip6_method = s_ip6_config.get_method()
if ip6_method == NM.SETTING_IP6_CONFIG_METHOD_DISABLED:
network_data.noipv6 = True
elif ip6_method == NM.SETTING_IP6_CONFIG_METHOD_AUTO:
network_data.ipv6 = "auto"
elif ip6_method == NM.SETTING_IP6_CONFIG_METHOD_DHCP:
network_data.ipv6 = "dhcp"
elif ip6_method == NM.SETTING_IP6_CONFIG_METHOD_MANUAL:
if s_ip6_config.get_num_addresses() > 0:
addr = s_ip6_config.get_address(0)
network_data.ipv6 = "{}/{}".format(addr.get_address(), addr.get_prefix())
gateway = s_ip6_config.get_gateway()
if gateway:
network_data.ipv6gateway = gateway
def _update_vlan_kickstart_network_data(nm_client, connection, network_data):
"""Update vlan configuration of network data from connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param network_data: kickstart configuration to be modified
:type network_data: pykickstart NetworkData
"""
setting_vlan = connection.get_setting_vlan()
if setting_vlan:
interface_name = connection.get_setting_connection().get_interface_name()
vlanid = setting_vlan.get_id()
parent = setting_vlan.get_parent()
# if parent is specified by UUID
if len(parent) == NM_CONNECTION_UUID_LENGTH:
parent = get_iface_from_connection(nm_client, parent)
default_name = default_ks_vlan_interface_name(parent, vlanid)
if interface_name and interface_name != default_name:
network_data.interfacename = interface_name
network_data.vlanid = vlanid
network_data.device = parent
def _update_bond_kickstart_network_data(nm_client, iface, connection, network_data):
"""Update bond configuration of network data from connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param network_data: kickstart configuration to be modified
:type network_data: pykickstart NetworkData
"""
ports = get_ports_from_connections(
nm_client,
['bond'],
[iface, connection.get_uuid()]
)
if ports:
port_ifaces = sorted(s_iface for _name, s_iface, _uuid in ports if s_iface)
network_data.bondslaves = ",".join(port_ifaces)
s_bond = connection.get_setting_bond()
if s_bond:
option_list = []
for i in range(s_bond.get_num_options()):
_result, _name, _value = s_bond.get_option(i)
if _result:
option_list.append("{}={}".format(_name, _value))
if option_list:
network_data.bondopts = ",".join(option_list)
def _update_bridge_kickstart_network_data(nm_client, iface, connection, network_data):
"""Update bridge configuration of network data from connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param network_data: kickstart configuration to be modified
:type network_data: pykickstart NetworkData
"""
ports = get_ports_from_connections(
nm_client,
['bridge'],
[iface, connection.get_uuid()]
)
if ports:
port_ifaces = sorted(s_iface for _name, s_iface, _uuid in ports if s_iface)
network_data.bridgeslaves = ",".join(port_ifaces)
s_bridge = connection.get_setting_bridge()
if s_bridge:
bridge_options = []
for setting, default_value in NM_BRIDGE_DUMPED_SETTINGS_DEFAULTS.items():
value = s_bridge.get_property(setting)
if value != default_value:
bridge_options.append("{}={}".format(setting, value))
if bridge_options:
network_data.bridgeopts = ",".join(bridge_options)
def _update_team_kickstart_network_data(nm_client, iface, connection, network_data):
"""Update team configuration of network data from connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param network_data: kickstart configuration to be modified
:type network_data: pykickstart NetworkData
"""
ports = get_ports_from_connections(
nm_client,
['team'],
[iface, connection.get_uuid()]
)
if ports:
port_list = sorted((s_iface, s_uuid) for _name, s_iface, s_uuid in ports if s_iface)
for s_iface, s_uuid in port_list:
team_port_cfg = get_team_port_config_from_connection(nm_client, s_uuid) or ""
network_data.teamslaves.append((s_iface, team_port_cfg))
s_team = connection.get_setting_team()
if s_team:
teamconfig = s_team.get_config()
if teamconfig:
network_data.teamconfig = teamconfig.replace("\n", "").replace(" ", "")
| gpl-2.0 |
Glorf/servo | tests/wpt/web-platform-tests/tools/py/testing/path/test_local.py | 160 | 29652 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import py
import pytest
import os, sys
from py.path import local
import common
failsonjython = py.test.mark.xfail("sys.platform.startswith('java')")
failsonjywin32 = py.test.mark.xfail("sys.platform.startswith('java') "
"and getattr(os, '_name', None) == 'nt'")
win32only = py.test.mark.skipif(
"not (sys.platform == 'win32' or getattr(os, '_name', None) == 'nt')")
skiponwin32 = py.test.mark.skipif(
"sys.platform == 'win32' or getattr(os, '_name', None) == 'nt'")
def pytest_funcarg__path1(request):
def setup():
path1 = request.getfuncargvalue("tmpdir")
common.setuptestfs(path1)
return path1
def teardown(path1):
# post check
assert path1.join("samplefile").check()
return request.cached_setup(setup, teardown, scope="session")
class TestLocalPath(common.CommonFSTests):
def test_join_normpath(self, tmpdir):
assert tmpdir.join(".") == tmpdir
p = tmpdir.join("../%s" % tmpdir.basename)
assert p == tmpdir
p = tmpdir.join("..//%s/" % tmpdir.basename)
assert p == tmpdir
@skiponwin32
def test_dirpath_abs_no_abs(self, tmpdir):
p = tmpdir.join('foo')
assert p.dirpath('/bar') == tmpdir.join('bar')
assert tmpdir.dirpath('/bar', abs=True) == py.path.local('/bar')
def test_gethash(self, tmpdir):
md5 = py.builtin._tryimport('md5', 'hashlib').md5
lib = py.builtin._tryimport('sha', 'hashlib')
sha = getattr(lib, 'sha1', getattr(lib, 'sha', None))
fn = tmpdir.join("testhashfile")
data = 'hello'.encode('ascii')
fn.write(data, mode="wb")
assert fn.computehash("md5") == md5(data).hexdigest()
assert fn.computehash("sha1") == sha(data).hexdigest()
py.test.raises(ValueError, fn.computehash, "asdasd")
def test_remove_removes_readonly_file(self, tmpdir):
readonly_file = tmpdir.join('readonly').ensure()
readonly_file.chmod(0)
readonly_file.remove()
assert not readonly_file.check(exists=1)
def test_remove_removes_readonly_dir(self, tmpdir):
readonly_dir = tmpdir.join('readonlydir').ensure(dir=1)
readonly_dir.chmod(int("500", 8))
readonly_dir.remove()
assert not readonly_dir.check(exists=1)
def test_remove_removes_dir_and_readonly_file(self, tmpdir):
readonly_dir = tmpdir.join('readonlydir').ensure(dir=1)
readonly_file = readonly_dir.join('readonlyfile').ensure()
readonly_file.chmod(0)
readonly_dir.remove()
assert not readonly_dir.check(exists=1)
def test_remove_routes_ignore_errors(self, tmpdir, monkeypatch):
l = []
monkeypatch.setattr(py.std.shutil, 'rmtree',
lambda *args, **kwargs: l.append(kwargs))
tmpdir.remove()
assert not l[0]['ignore_errors']
for val in (True, False):
l[:] = []
tmpdir.remove(ignore_errors=val)
assert l[0]['ignore_errors'] == val
def test_initialize_curdir(self):
assert str(local()) == py.std.os.getcwd()
@skiponwin32
def test_chdir_gone(self, path1):
p = path1.ensure("dir_to_be_removed", dir=1)
p.chdir()
p.remove()
pytest.raises(py.error.ENOENT, py.path.local)
assert path1.chdir() is None
assert os.getcwd() == str(path1)
def test_as_cwd(self, path1):
dir = path1.ensure("subdir", dir=1)
old = py.path.local()
with dir.as_cwd() as x:
assert x == old
assert py.path.local() == dir
assert os.getcwd() == str(old)
def test_as_cwd_exception(self, path1):
old = py.path.local()
dir = path1.ensure("subdir", dir=1)
with pytest.raises(ValueError):
with dir.as_cwd():
raise ValueError()
assert old == py.path.local()
def test_initialize_reldir(self, path1):
with path1.as_cwd():
p = local('samplefile')
assert p.check()
@pytest.mark.xfail("sys.version_info < (2,6) and sys.platform == 'win32'")
def test_tilde_expansion(self, monkeypatch, tmpdir):
monkeypatch.setenv("HOME", str(tmpdir))
p = py.path.local("~", expanduser=True)
assert p == os.path.expanduser("~")
def test_eq_with_strings(self, path1):
path1 = path1.join('sampledir')
path2 = str(path1)
assert path1 == path2
assert path2 == path1
path3 = path1.join('samplefile')
assert path3 != path2
assert path2 != path3
def test_eq_with_none(self, path1):
assert path1 != None
def test_gt_with_strings(self, path1):
path2 = path1.join('sampledir')
path3 = str(path1.join("ttt"))
assert path3 > path2
assert path2 < path3
assert path2 < "ttt"
assert "ttt" > path2
path4 = path1.join("aaa")
l = [path2, path4,path3]
assert sorted(l) == [path4, path2, path3]
def test_open_and_ensure(self, path1):
p = path1.join("sub1", "sub2", "file")
with p.open("w", ensure=1) as f:
f.write("hello")
assert p.read() == "hello"
def test_write_and_ensure(self, path1):
p = path1.join("sub1", "sub2", "file")
p.write("hello", ensure=1)
assert p.read() == "hello"
@py.test.mark.multi(bin=(False, True))
def test_dump(self, tmpdir, bin):
path = tmpdir.join("dumpfile%s" % int(bin))
try:
d = {'answer' : 42}
path.dump(d, bin=bin)
f = path.open('rb+')
dnew = py.std.pickle.load(f)
assert d == dnew
finally:
f.close()
@failsonjywin32
def test_setmtime(self):
import tempfile
import time
try:
fd, name = tempfile.mkstemp()
py.std.os.close(fd)
except AttributeError:
name = tempfile.mktemp()
open(name, 'w').close()
try:
mtime = int(time.time())-100
path = local(name)
assert path.mtime() != mtime
path.setmtime(mtime)
assert path.mtime() == mtime
path.setmtime()
assert path.mtime() != mtime
finally:
py.std.os.remove(name)
def test_normpath(self, path1):
new1 = path1.join("/otherdir")
new2 = path1.join("otherdir")
assert str(new1) == str(new2)
def test_mkdtemp_creation(self):
d = local.mkdtemp()
try:
assert d.check(dir=1)
finally:
d.remove(rec=1)
def test_tmproot(self):
d = local.mkdtemp()
tmproot = local.get_temproot()
try:
assert d.check(dir=1)
assert d.dirpath() == tmproot
finally:
d.remove(rec=1)
def test_chdir(self, tmpdir):
old = local()
try:
res = tmpdir.chdir()
assert str(res) == str(old)
assert py.std.os.getcwd() == str(tmpdir)
finally:
old.chdir()
def test_ensure_filepath_withdir(self, tmpdir):
newfile = tmpdir.join('test1','test')
newfile.ensure()
assert newfile.check(file=1)
newfile.write("42")
newfile.ensure()
s = newfile.read()
assert s == "42"
def test_ensure_filepath_withoutdir(self, tmpdir):
newfile = tmpdir.join('test1file')
t = newfile.ensure()
assert t == newfile
assert newfile.check(file=1)
def test_ensure_dirpath(self, tmpdir):
newfile = tmpdir.join('test1','testfile')
t = newfile.ensure(dir=1)
assert t == newfile
assert newfile.check(dir=1)
def test_init_from_path(self, tmpdir):
l = local()
l2 = local(l)
assert l2 == l
wc = py.path.svnwc('.')
l3 = local(wc)
assert l3 is not wc
assert l3.strpath == wc.strpath
assert not hasattr(l3, 'commit')
@py.test.mark.xfail(run=False, reason="unreliable est for long filenames")
def test_long_filenames(self, tmpdir):
if sys.platform == "win32":
py.test.skip("win32: work around needed for path length limit")
# see http://codespeak.net/pipermail/py-dev/2008q2/000922.html
# testing paths > 260 chars (which is Windows' limitation, but
# depending on how the paths are used), but > 4096 (which is the
# Linux' limitation) - the behaviour of paths with names > 4096 chars
# is undetermined
newfilename = '/test' * 60
l = tmpdir.join(newfilename)
l.ensure(file=True)
l.write('foo')
l2 = tmpdir.join(newfilename)
assert l2.read() == 'foo'
def test_visit_depth_first(self, tmpdir):
p1 = tmpdir.ensure("a","1")
p2 = tmpdir.ensure("b","2")
p3 = tmpdir.ensure("breadth")
l = list(tmpdir.visit(lambda x: x.check(file=1)))
assert len(l) == 3
# check that breadth comes last
assert l[2] == p3
def test_visit_rec_fnmatch(self, tmpdir):
p1 = tmpdir.ensure("a","123")
p2 = tmpdir.ensure(".b","345")
l = list(tmpdir.visit("???", rec="[!.]*"))
assert len(l) == 1
# check that breadth comes last
assert l[0] == p1
def test_fnmatch_file_abspath(self, tmpdir):
b = tmpdir.join("a", "b")
assert b.fnmatch(os.sep.join("ab"))
pattern = os.sep.join([str(tmpdir), "*", "b"])
assert b.fnmatch(pattern)
def test_sysfind(self):
name = sys.platform == "win32" and "cmd" or "test"
x = py.path.local.sysfind(name)
assert x.check(file=1)
assert py.path.local.sysfind('jaksdkasldqwe') is None
assert py.path.local.sysfind(name, paths=[]) is None
x2 = py.path.local.sysfind(name, paths=[x.dirpath()])
assert x2 == x
class TestExecutionOnWindows:
pytestmark = win32only
def test_sysfind_bat_exe_before(self, tmpdir, monkeypatch):
monkeypatch.setenv("PATH", str(tmpdir), prepend=os.pathsep)
tmpdir.ensure("hello")
h = tmpdir.ensure("hello.bat")
x = py.path.local.sysfind("hello")
assert x == h
class TestExecution:
pytestmark = skiponwin32
def test_sysfind_no_permisson_ignored(self, monkeypatch, tmpdir):
noperm = tmpdir.ensure('noperm', dir=True)
monkeypatch.setenv("PATH", noperm, prepend=":")
noperm.chmod(0)
assert py.path.local.sysfind('jaksdkasldqwe') is None
def test_sysfind_absolute(self):
x = py.path.local.sysfind('test')
assert x.check(file=1)
y = py.path.local.sysfind(str(x))
assert y.check(file=1)
assert y == x
def test_sysfind_multiple(self, tmpdir, monkeypatch):
monkeypatch.setenv('PATH',
"%s:%s" % (tmpdir.ensure('a'),
tmpdir.join('b')),
prepend=":")
tmpdir.ensure('b', 'a')
checker = lambda x: x.dirpath().basename == 'b'
x = py.path.local.sysfind('a', checker=checker)
assert x.basename == 'a'
assert x.dirpath().basename == 'b'
checker = lambda x: None
assert py.path.local.sysfind('a', checker=checker) is None
def test_sysexec(self):
x = py.path.local.sysfind('ls')
out = x.sysexec('-a')
for x in py.path.local().listdir():
assert out.find(x.basename) != -1
def test_sysexec_failing(self):
x = py.path.local.sysfind('false')
py.test.raises(py.process.cmdexec.Error, """
x.sysexec('aksjdkasjd')
""")
def test_make_numbered_dir(self, tmpdir):
tmpdir.ensure('base.not_an_int', dir=1)
for i in range(10):
numdir = local.make_numbered_dir(prefix='base.', rootdir=tmpdir,
keep=2, lock_timeout=0)
assert numdir.check()
assert numdir.basename == 'base.%d' %i
if i>=1:
assert numdir.new(ext=str(i-1)).check()
if i>=2:
assert numdir.new(ext=str(i-2)).check()
if i>=3:
assert not numdir.new(ext=str(i-3)).check()
def test_make_numbered_dir_NotImplemented_Error(self, tmpdir, monkeypatch):
def notimpl(x, y):
raise NotImplementedError(42)
monkeypatch.setattr(py.std.os, 'symlink', notimpl)
x = tmpdir.make_numbered_dir(rootdir=tmpdir, lock_timeout=0)
assert x.relto(tmpdir)
assert x.check()
def test_locked_make_numbered_dir(self, tmpdir):
for i in range(10):
numdir = local.make_numbered_dir(prefix='base2.', rootdir=tmpdir,
keep=2)
assert numdir.check()
assert numdir.basename == 'base2.%d' %i
for j in range(i):
assert numdir.new(ext=str(j)).check()
def test_error_preservation(self, path1):
py.test.raises (EnvironmentError, path1.join('qwoeqiwe').mtime)
py.test.raises (EnvironmentError, path1.join('qwoeqiwe').read)
#def test_parentdirmatch(self):
# local.parentdirmatch('std', startmodule=__name__)
#
class TestImport:
def test_pyimport(self, path1):
obj = path1.join('execfile.py').pyimport()
assert obj.x == 42
assert obj.__name__ == 'execfile'
def test_pyimport_renamed_dir_creates_mismatch(self, tmpdir):
p = tmpdir.ensure("a", "test_x123.py")
p.pyimport()
tmpdir.join("a").move(tmpdir.join("b"))
pytest.raises(tmpdir.ImportMismatchError,
lambda: tmpdir.join("b", "test_x123.py").pyimport())
def test_pyimport_messy_name(self, tmpdir):
# http://bitbucket.org/hpk42/py-trunk/issue/129
path = tmpdir.ensure('foo__init__.py')
obj = path.pyimport()
def test_pyimport_dir(self, tmpdir):
p = tmpdir.join("hello_123")
p_init = p.ensure("__init__.py")
m = p.pyimport()
assert m.__name__ == "hello_123"
m = p_init.pyimport()
assert m.__name__ == "hello_123"
def test_pyimport_execfile_different_name(self, path1):
obj = path1.join('execfile.py').pyimport(modname="0x.y.z")
assert obj.x == 42
assert obj.__name__ == '0x.y.z'
def test_pyimport_a(self, path1):
otherdir = path1.join('otherdir')
mod = otherdir.join('a.py').pyimport()
assert mod.result == "got it"
assert mod.__name__ == 'otherdir.a'
def test_pyimport_b(self, path1):
otherdir = path1.join('otherdir')
mod = otherdir.join('b.py').pyimport()
assert mod.stuff == "got it"
assert mod.__name__ == 'otherdir.b'
def test_pyimport_c(self, path1):
otherdir = path1.join('otherdir')
mod = otherdir.join('c.py').pyimport()
assert mod.value == "got it"
def test_pyimport_d(self, path1):
otherdir = path1.join('otherdir')
mod = otherdir.join('d.py').pyimport()
assert mod.value2 == "got it"
def test_pyimport_and_import(self, tmpdir):
tmpdir.ensure('xxxpackage', '__init__.py')
mod1path = tmpdir.ensure('xxxpackage', 'module1.py')
mod1 = mod1path.pyimport()
assert mod1.__name__ == 'xxxpackage.module1'
from xxxpackage import module1
assert module1 is mod1
def test_pyimport_check_filepath_consistency(self, monkeypatch, tmpdir):
name = 'pointsback123'
ModuleType = type(py.std.os)
p = tmpdir.ensure(name + '.py')
for ending in ('.pyc', '$py.class', '.pyo'):
mod = ModuleType(name)
pseudopath = tmpdir.ensure(name+ending)
mod.__file__ = str(pseudopath)
monkeypatch.setitem(sys.modules, name, mod)
newmod = p.pyimport()
assert mod == newmod
monkeypatch.undo()
mod = ModuleType(name)
pseudopath = tmpdir.ensure(name+"123.py")
mod.__file__ = str(pseudopath)
monkeypatch.setitem(sys.modules, name, mod)
excinfo = py.test.raises(pseudopath.ImportMismatchError,
"p.pyimport()")
modname, modfile, orig = excinfo.value.args
assert modname == name
assert modfile == pseudopath
assert orig == p
assert issubclass(pseudopath.ImportMismatchError, ImportError)
def test_issue131_pyimport_on__init__(self, tmpdir):
# __init__.py files may be namespace packages, and thus the
# __file__ of an imported module may not be ourselves
# see issue
p1 = tmpdir.ensure("proja", "__init__.py")
p2 = tmpdir.ensure("sub", "proja", "__init__.py")
m1 = p1.pyimport()
m2 = p2.pyimport()
assert m1 == m2
def test_ensuresyspath_append(self, tmpdir):
root1 = tmpdir.mkdir("root1")
file1 = root1.ensure("x123.py")
assert str(root1) not in sys.path
file1.pyimport(ensuresyspath="append")
assert str(root1) == sys.path[-1]
assert str(root1) not in sys.path[:-1]
def test_pypkgdir(tmpdir):
pkg = tmpdir.ensure('pkg1', dir=1)
pkg.ensure("__init__.py")
pkg.ensure("subdir/__init__.py")
assert pkg.pypkgpath() == pkg
assert pkg.join('subdir', '__init__.py').pypkgpath() == pkg
def test_pypkgdir_unimportable(tmpdir):
pkg = tmpdir.ensure('pkg1-1', dir=1) # unimportable
pkg.ensure("__init__.py")
subdir = pkg.ensure("subdir/__init__.py").dirpath()
assert subdir.pypkgpath() == subdir
assert subdir.ensure("xyz.py").pypkgpath() == subdir
assert not pkg.pypkgpath()
def test_isimportable():
from py._path.local import isimportable
assert not isimportable("")
assert isimportable("x")
assert isimportable("x1")
assert isimportable("x_1")
assert isimportable("_")
assert isimportable("_1")
assert not isimportable("x-1")
assert not isimportable("x:1")
def test_homedir_from_HOME(monkeypatch):
path = os.getcwd()
monkeypatch.setenv("HOME", path)
assert py.path.local._gethomedir() == py.path.local(path)
def test_homedir_not_exists(monkeypatch):
monkeypatch.delenv("HOME", raising=False)
monkeypatch.delenv("HOMEDRIVE", raising=False)
homedir = py.path.local._gethomedir()
assert homedir is None
def test_samefile(tmpdir):
assert tmpdir.samefile(tmpdir)
p = tmpdir.ensure("hello")
assert p.samefile(p)
with p.dirpath().as_cwd():
assert p.samefile(p.basename)
if sys.platform == "win32":
p1 = p.__class__(str(p).lower())
p2 = p.__class__(str(p).upper())
assert p1.samefile(p2)
def test_listdir_single_arg(tmpdir):
tmpdir.ensure("hello")
assert tmpdir.listdir("hello")[0].basename == "hello"
def test_mkdtemp_rootdir(tmpdir):
dtmp = local.mkdtemp(rootdir=tmpdir)
assert tmpdir.listdir() == [dtmp]
class TestWINLocalPath:
pytestmark = win32only
def test_owner_group_not_implemented(self, path1):
py.test.raises(NotImplementedError, "path1.stat().owner")
py.test.raises(NotImplementedError, "path1.stat().group")
def test_chmod_simple_int(self, path1):
py.builtin.print_("path1 is", path1)
mode = path1.stat().mode
# Ensure that we actually change the mode to something different.
path1.chmod(mode == 0 and 1 or 0)
try:
print(path1.stat().mode)
print(mode)
assert path1.stat().mode != mode
finally:
path1.chmod(mode)
assert path1.stat().mode == mode
def test_path_comparison_lowercase_mixed(self, path1):
t1 = path1.join("a_path")
t2 = path1.join("A_path")
assert t1 == t1
assert t1 == t2
def test_relto_with_mixed_case(self, path1):
t1 = path1.join("a_path", "fiLe")
t2 = path1.join("A_path")
assert t1.relto(t2) == "fiLe"
def test_allow_unix_style_paths(self, path1):
t1 = path1.join('a_path')
assert t1 == str(path1) + '\\a_path'
t1 = path1.join('a_path/')
assert t1 == str(path1) + '\\a_path'
t1 = path1.join('dir/a_path')
assert t1 == str(path1) + '\\dir\\a_path'
def test_sysfind_in_currentdir(self, path1):
cmd = py.path.local.sysfind('cmd')
root = cmd.new(dirname='', basename='') # c:\ in most installations
with root.as_cwd():
x = py.path.local.sysfind(cmd.relto(root))
assert x.check(file=1)
def test_fnmatch_file_abspath_posix_pattern_on_win32(self, tmpdir):
# path-matching patterns might contain a posix path separator '/'
# Test that we can match that pattern on windows.
import posixpath
b = tmpdir.join("a", "b")
assert b.fnmatch(posixpath.sep.join("ab"))
pattern = posixpath.sep.join([str(tmpdir), "*", "b"])
assert b.fnmatch(pattern)
class TestPOSIXLocalPath:
pytestmark = skiponwin32
def test_hardlink(self, tmpdir):
linkpath = tmpdir.join('test')
filepath = tmpdir.join('file')
filepath.write("Hello")
nlink = filepath.stat().nlink
linkpath.mklinkto(filepath)
assert filepath.stat().nlink == nlink + 1
def test_symlink_are_identical(self, tmpdir):
filepath = tmpdir.join('file')
filepath.write("Hello")
linkpath = tmpdir.join('test')
linkpath.mksymlinkto(filepath)
assert linkpath.readlink() == str(filepath)
def test_symlink_isfile(self, tmpdir):
linkpath = tmpdir.join('test')
filepath = tmpdir.join('file')
filepath.write("")
linkpath.mksymlinkto(filepath)
assert linkpath.check(file=1)
assert not linkpath.check(link=0, file=1)
assert linkpath.islink()
def test_symlink_relative(self, tmpdir):
linkpath = tmpdir.join('test')
filepath = tmpdir.join('file')
filepath.write("Hello")
linkpath.mksymlinkto(filepath, absolute=False)
assert linkpath.readlink() == "file"
assert filepath.read() == linkpath.read()
def test_symlink_not_existing(self, tmpdir):
linkpath = tmpdir.join('testnotexisting')
assert not linkpath.check(link=1)
assert linkpath.check(link=0)
def test_relto_with_root(self, path1, tmpdir):
y = path1.join('x').relto(py.path.local('/'))
assert y[0] == str(path1)[1]
def test_visit_recursive_symlink(self, tmpdir):
linkpath = tmpdir.join('test')
linkpath.mksymlinkto(tmpdir)
visitor = tmpdir.visit(None, lambda x: x.check(link=0))
assert list(visitor) == [linkpath]
def test_symlink_isdir(self, tmpdir):
linkpath = tmpdir.join('test')
linkpath.mksymlinkto(tmpdir)
assert linkpath.check(dir=1)
assert not linkpath.check(link=0, dir=1)
def test_symlink_remove(self, tmpdir):
linkpath = tmpdir.join('test')
linkpath.mksymlinkto(linkpath) # point to itself
assert linkpath.check(link=1)
linkpath.remove()
assert not linkpath.check()
def test_realpath_file(self, tmpdir):
linkpath = tmpdir.join('test')
filepath = tmpdir.join('file')
filepath.write("")
linkpath.mksymlinkto(filepath)
realpath = linkpath.realpath()
assert realpath.basename == 'file'
def test_owner(self, path1, tmpdir):
from pwd import getpwuid
from grp import getgrgid
stat = path1.stat()
assert stat.path == path1
uid = stat.uid
gid = stat.gid
owner = getpwuid(uid)[0]
group = getgrgid(gid)[0]
assert uid == stat.uid
assert owner == stat.owner
assert gid == stat.gid
assert group == stat.group
def test_stat_helpers(self, tmpdir, monkeypatch):
path1 = tmpdir.ensure("file")
stat1 = path1.stat()
stat2 = tmpdir.stat()
assert stat1.isfile()
assert stat2.isdir()
assert not stat1.islink()
assert not stat2.islink()
def test_stat_non_raising(self, tmpdir):
path1 = tmpdir.join("file")
pytest.raises(py.error.ENOENT, lambda: path1.stat())
res = path1.stat(raising=False)
assert res is None
def test_atime(self, tmpdir):
import time
path = tmpdir.ensure('samplefile')
now = time.time()
atime1 = path.atime()
# we could wait here but timer resolution is very
# system dependent
path.read()
time.sleep(0.01)
atime2 = path.atime()
time.sleep(0.01)
duration = time.time() - now
assert (atime2-atime1) <= duration
def test_commondir(self, path1):
# XXX This is here in local until we find a way to implement this
# using the subversion command line api.
p1 = path1.join('something')
p2 = path1.join('otherthing')
assert p1.common(p2) == path1
assert p2.common(p1) == path1
def test_commondir_nocommon(self, path1):
# XXX This is here in local until we find a way to implement this
# using the subversion command line api.
p1 = path1.join('something')
p2 = py.path.local(path1.sep+'blabla')
assert p1.common(p2) == '/'
def test_join_to_root(self, path1):
root = path1.parts()[0]
assert len(str(root)) == 1
assert str(root.join('a')) == '//a' # posix allows two slashes
def test_join_root_to_root_with_no_abs(self, path1):
nroot = path1.join('/')
assert str(path1) == str(nroot)
assert path1 == nroot
def test_chmod_simple_int(self, path1):
mode = path1.stat().mode
path1.chmod(int(mode/2))
try:
assert path1.stat().mode != mode
finally:
path1.chmod(mode)
assert path1.stat().mode == mode
def test_chmod_rec_int(self, path1):
# XXX fragile test
recfilter = lambda x: x.check(dotfile=0, link=0)
oldmodes = {}
for x in path1.visit(rec=recfilter):
oldmodes[x] = x.stat().mode
path1.chmod(int("772", 8), rec=recfilter)
try:
for x in path1.visit(rec=recfilter):
assert x.stat().mode & int("777", 8) == int("772", 8)
finally:
for x,y in oldmodes.items():
x.chmod(y)
def test_copy_archiving(self, tmpdir):
unicode_fn = u"something-\342\200\223.txt"
f = tmpdir.ensure("a", unicode_fn)
a = f.dirpath()
oldmode = f.stat().mode
newmode = oldmode ^ 1
f.chmod(newmode)
b = tmpdir.join("b")
a.copy(b, mode=True)
assert b.join(f.basename).stat().mode == newmode
@failsonjython
def test_chown_identity(self, path1):
owner = path1.stat().owner
group = path1.stat().group
path1.chown(owner, group)
@failsonjython
def test_chown_dangling_link(self, path1):
owner = path1.stat().owner
group = path1.stat().group
x = path1.join('hello')
x.mksymlinkto('qlwkejqwlek')
try:
path1.chown(owner, group, rec=1)
finally:
x.remove(rec=0)
@failsonjython
def test_chown_identity_rec_mayfail(self, path1):
owner = path1.stat().owner
group = path1.stat().group
path1.chown(owner, group)
class TestUnicodePy2Py3:
def test_join_ensure(self, tmpdir, monkeypatch):
if sys.version_info >= (3,0) and "LANG" not in os.environ:
pytest.skip("cannot run test without locale")
x = py.path.local(tmpdir.strpath)
part = "hällo"
y = x.ensure(part)
assert x.join(part) == y
def test_listdir(self, tmpdir):
if sys.version_info >= (3,0) and "LANG" not in os.environ:
pytest.skip("cannot run test without locale")
x = py.path.local(tmpdir.strpath)
part = "hällo"
y = x.ensure(part)
assert x.listdir(part)[0] == y
@pytest.mark.xfail(reason="changing read/write might break existing usages")
def test_read_write(self, tmpdir):
x = tmpdir.join("hello")
part = py.builtin._totext("hällo", "utf8")
x.write(part)
assert x.read() == part
x.write(part.encode(sys.getdefaultencoding()))
assert x.read() == part.encode(sys.getdefaultencoding())
class TestBinaryAndTextMethods:
def test_read_binwrite(self, tmpdir):
x = tmpdir.join("hello")
part = py.builtin._totext("hällo", "utf8")
part_utf8 = part.encode("utf8")
x.write_binary(part_utf8)
assert x.read_binary() == part_utf8
s = x.read_text(encoding="utf8")
assert s == part
assert py.builtin._istext(s)
def test_read_textwrite(self, tmpdir):
x = tmpdir.join("hello")
part = py.builtin._totext("hällo", "utf8")
part_utf8 = part.encode("utf8")
x.write_text(part, encoding="utf8")
assert x.read_binary() == part_utf8
assert x.read_text(encoding="utf8") == part
def test_default_encoding(self, tmpdir):
x = tmpdir.join("hello")
# Can't use UTF8 as the default encoding (ASCII) doesn't support it
part = py.builtin._totext("hello", "ascii")
x.write_text(part, "ascii")
s = x.read_text("ascii")
assert s == part
assert type(s) == type(part)
| mpl-2.0 |
MwanzanFelipe/rockletonfortune | lib/django/contrib/gis/geoip2/base.py | 335 | 9054 | import os
import socket
import geoip2.database
from django.conf import settings
from django.core.validators import ipv4_re
from django.utils import six
from django.utils.ipv6 import is_valid_ipv6_address
from .resources import City, Country
# Creating the settings dictionary with any settings, if needed.
GEOIP_SETTINGS = {
'GEOIP_PATH': getattr(settings, 'GEOIP_PATH', None),
'GEOIP_CITY': getattr(settings, 'GEOIP_CITY', 'GeoLite2-City.mmdb'),
'GEOIP_COUNTRY': getattr(settings, 'GEOIP_COUNTRY', 'GeoLite2-Country.mmdb'),
}
class GeoIP2Exception(Exception):
pass
class GeoIP2(object):
# The flags for GeoIP memory caching.
# Try MODE_MMAP_EXT, MODE_MMAP, MODE_FILE in that order.
MODE_AUTO = 0
# Use the C extension with memory map.
MODE_MMAP_EXT = 1
# Read from memory map. Pure Python.
MODE_MMAP = 2
# Read database as standard file. Pure Python.
MODE_FILE = 4
# Load database into memory. Pure Python.
MODE_MEMORY = 8
cache_options = {opt: None for opt in (0, 1, 2, 4, 8)}
# Paths to the city & country binary databases.
_city_file = ''
_country_file = ''
# Initially, pointers to GeoIP file references are NULL.
_city = None
_country = None
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initialize the GeoIP object. No parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP datasets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.mmdb) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH setting.
* cache: The cache settings when opening up the GeoIP datasets. May be
an integer in (0, 1, 2, 4, 8) corresponding to the MODE_AUTO,
MODE_MMAP_EXT, MODE_MMAP, MODE_FILE, and MODE_MEMORY,
`GeoIPOptions` C API settings, respectively. Defaults to 0,
meaning MODE_AUTO.
* country: The name of the GeoIP country data file. Defaults to
'GeoLite2-Country.mmdb'; overrides the GEOIP_COUNTRY setting.
* city: The name of the GeoIP city data file. Defaults to
'GeoLite2-City.mmdb'; overrides the GEOIP_CITY setting.
"""
# Checking the given cache option.
if cache in self.cache_options:
self._cache = cache
else:
raise GeoIP2Exception('Invalid GeoIP caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS['GEOIP_PATH']
if not path:
raise GeoIP2Exception('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, six.string_types):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try to open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS['GEOIP_COUNTRY'])
if os.path.isfile(country_db):
self._country = geoip2.database.Reader(country_db, mode=cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS['GEOIP_CITY'])
if os.path.isfile(city_db):
self._city = geoip2.database.Reader(city_db, mode=cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure out
# whether the given database path is for the GeoIP country or city
# databases.
reader = geoip2.database.Reader(path, mode=cache)
db_type = reader.metadata().database_type
if db_type.endswith('City'):
# GeoLite City database detected.
self._city = reader
self._city_file = path
elif db_type.endswith('Country'):
# GeoIP Country database detected.
self._country = reader
self._country_file = path
else:
raise GeoIP2Exception('Unable to recognize database edition: %s' % db_type)
else:
raise GeoIP2Exception('GeoIP path must be a valid file or directory.')
@property
def _reader(self):
if self._country:
return self._country
else:
return self._city
@property
def _country_or_city(self):
if self._country:
return self._country.country
else:
return self._city.city
def __del__(self):
# Cleanup any GeoIP file handles lying around.
if self._reader:
self._reader.close()
def __repr__(self):
meta = self._reader.metadata()
version = '[v%s.%s]' % (meta.binary_format_major_version, meta.binary_format_minor_version)
return '<%(cls)s %(version)s _country_file="%(country)s", _city_file="%(city)s">' % {
'cls': self.__class__.__name__,
'version': version,
'country': self._country_file,
'city': self._city_file,
}
def _check_query(self, query, country=False, city=False, city_or_country=False):
"Helper routine for checking the query and database availability."
# Making sure a string was passed in for the query.
if not isinstance(query, six.string_types):
raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__)
# Extra checks for the existence of country and city databases.
if city_or_country and not (self._country or self._city):
raise GeoIP2Exception('Invalid GeoIP country and city data files.')
elif country and not self._country:
raise GeoIP2Exception('Invalid GeoIP country data file: %s' % self._country_file)
elif city and not self._city:
raise GeoIP2Exception('Invalid GeoIP city data file: %s' % self._city_file)
# Return the query string back to the caller. GeoIP2 only takes IP addresses.
if not (ipv4_re.match(query) or is_valid_ipv6_address(query)):
query = socket.gethostbyname(query)
return query
def city(self, query):
"""
Return a dictionary of city information for the given IP address or
Fully Qualified Domain Name (FQDN). Some information in the dictionary
may be undefined (None).
"""
enc_query = self._check_query(query, city=True)
return City(self._city.city(enc_query))
def country_code(self, query):
"Return the country code for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
return self.country(enc_query)['country_code']
def country_name(self, query):
"Return the country name for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
return self.country(enc_query)['country_name']
def country(self, query):
"""
Return a dictionary with the country code and name when given an
IP address or a Fully Qualified Domain Name (FQDN). For example, both
'24.124.1.80' and 'djangoproject.com' are valid parameters.
"""
# Returning the country code and name
enc_query = self._check_query(query, city_or_country=True)
return Country(self._country_or_city(enc_query))
# #### Coordinate retrieval routines ####
def coords(self, query, ordering=('longitude', 'latitude')):
cdict = self.city(query)
if cdict is None:
return None
else:
return tuple(cdict[o] for o in ordering)
def lon_lat(self, query):
"Return a tuple of the (longitude, latitude) for the given query."
return self.coords(query)
def lat_lon(self, query):
"Return a tuple of the (latitude, longitude) for the given query."
return self.coords(query, ('latitude', 'longitude'))
def geos(self, query):
"Return a GEOS Point object for the given query."
ll = self.lon_lat(query)
if ll:
from django.contrib.gis.geos import Point
return Point(ll, srid=4326)
else:
return None
# #### GeoIP Database Information Routines ####
@property
def info(self):
"Return information about the GeoIP library and databases in use."
meta = self._reader.metadata()
return 'GeoIP Library:\n\t%s.%s\n' % (meta.binary_format_major_version, meta.binary_format_minor_version)
@classmethod
def open(cls, full_path, cache):
return GeoIP2(full_path, cache)
| bsd-3-clause |
Asquera/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Debug.py | 61 | 6766 | """SCons.Debug
Code for debugging SCons internal things. Shouldn't be
needed by most users.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Debug.py 5134 2010/08/16 23:02:40 bdeegan"
import os
import sys
import time
import weakref
tracked_classes = {}
def logInstanceCreation(instance, name=None):
if name is None:
name = instance.__class__.__name__
if name not in tracked_classes:
tracked_classes[name] = []
tracked_classes[name].append(weakref.ref(instance))
def string_to_classes(s):
if s == '*':
return sorted(tracked_classes.keys())
else:
return s.split()
def fetchLoggedInstances(classes="*"):
classnames = string_to_classes(classes)
return [(cn, len(tracked_classes[cn])) for cn in classnames]
def countLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write("%s: %d\n" % (classname, len(tracked_classes[classname])))
def listLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write('\n%s:\n' % classname)
for ref in tracked_classes[classname]:
obj = ref()
if obj is not None:
file.write(' %s\n' % repr(obj))
def dumpLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write('\n%s:\n' % classname)
for ref in tracked_classes[classname]:
obj = ref()
if obj is not None:
file.write(' %s:\n' % obj)
for key, value in obj.__dict__.items():
file.write(' %20s : %s\n' % (key, value))
if sys.platform[:5] == "linux":
# Linux doesn't actually support memory usage stats from getrusage().
def memory():
mstr = open('/proc/self/stat').read()
mstr = mstr.split()[22]
return int(mstr)
elif sys.platform[:6] == 'darwin':
#TODO really get memory stats for OS X
def memory():
return 0
else:
try:
import resource
except ImportError:
try:
import win32process
import win32api
except ImportError:
def memory():
return 0
else:
def memory():
process_handle = win32api.GetCurrentProcess()
memory_info = win32process.GetProcessMemoryInfo( process_handle )
return memory_info['PeakWorkingSetSize']
else:
def memory():
res = resource.getrusage(resource.RUSAGE_SELF)
return res[4]
# returns caller's stack
def caller_stack(*backlist):
import traceback
if not backlist:
backlist = [0]
result = []
for back in backlist:
tb = traceback.extract_stack(limit=3+back)
key = tb[0][:3]
result.append('%s:%d(%s)' % func_shorten(key))
return result
caller_bases = {}
caller_dicts = {}
# trace a caller's stack
def caller_trace(back=0):
import traceback
tb = traceback.extract_stack(limit=3+back)
tb.reverse()
callee = tb[1][:3]
caller_bases[callee] = caller_bases.get(callee, 0) + 1
for caller in tb[2:]:
caller = callee + caller[:3]
try:
entry = caller_dicts[callee]
except KeyError:
caller_dicts[callee] = entry = {}
entry[caller] = entry.get(caller, 0) + 1
callee = caller
# print a single caller and its callers, if any
def _dump_one_caller(key, file, level=0):
leader = ' '*level
for v,c in sorted([(-v,c) for c,v in caller_dicts[key].items()]):
file.write("%s %6d %s:%d(%s)\n" % ((leader,-v) + func_shorten(c[-3:])))
if c in caller_dicts:
_dump_one_caller(c, file, level+1)
# print each call tree
def dump_caller_counts(file=sys.stdout):
for k in sorted(caller_bases.keys()):
file.write("Callers of %s:%d(%s), %d calls:\n"
% (func_shorten(k) + (caller_bases[k],)))
_dump_one_caller(k, file)
shorten_list = [
( '/scons/SCons/', 1),
( '/src/engine/SCons/', 1),
( '/usr/lib/python', 0),
]
if os.sep != '/':
shorten_list = [(t[0].replace('/', os.sep), t[1]) for t in shorten_list]
def func_shorten(func_tuple):
f = func_tuple[0]
for t in shorten_list:
i = f.find(t[0])
if i >= 0:
if t[1]:
i = i + len(t[0])
return (f[i:],)+func_tuple[1:]
return func_tuple
TraceFP = {}
if sys.platform == 'win32':
TraceDefault = 'con'
else:
TraceDefault = '/dev/tty'
TimeStampDefault = None
StartTime = time.time()
PreviousTime = StartTime
def Trace(msg, file=None, mode='w', tstamp=None):
"""Write a trace message to a file. Whenever a file is specified,
it becomes the default for the next call to Trace()."""
global TraceDefault
global TimeStampDefault
global PreviousTime
if file is None:
file = TraceDefault
else:
TraceDefault = file
if tstamp is None:
tstamp = TimeStampDefault
else:
TimeStampDefault = tstamp
try:
fp = TraceFP[file]
except KeyError:
try:
fp = TraceFP[file] = open(file, mode)
except TypeError:
# Assume we were passed an open file pointer.
fp = file
if tstamp:
now = time.time()
fp.write('%8.4f %8.4f: ' % (now - StartTime, now - PreviousTime))
PreviousTime = now
fp.write(msg)
fp.flush()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
RohitDas/cubeproject | lib/django/contrib/gis/geos/geometry.py | 82 | 24691 | """
This module contains the 'base' GEOSGeometry object -- all GEOS Geometries
inherit from this object.
"""
from __future__ import unicode_literals
# Python, ctypes and types dependencies.
from ctypes import addressof, byref, c_double
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.geometry.regex import hex_regex, json_regex, wkt_regex
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.base import GEOSBase, gdal
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException, GEOSIndexError
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.mutable_list import ListMixin
# These functions provide access to a thread-local instance
# of their corresponding GEOS I/O class.
from django.contrib.gis.geos.prototypes.io import (
ewkb_w, wkb_r, wkb_w, wkt_r, wkt_w,
)
from django.utils import six
from django.utils.encoding import force_bytes, force_text
class GEOSGeometry(GEOSBase, ListMixin):
"A class that, generally, encapsulates a GEOS geometry."
# Raise GEOSIndexError instead of plain IndexError
# (see ticket #4740 and GEOSIndexError docstring)
_IndexError = GEOSIndexError
ptr_type = GEOM_PTR
def __init__(self, geo_input, srid=None):
"""
The base constructor for GEOS geometry objects, and may take the
following inputs:
* strings:
- WKT
- HEXEWKB (a PostGIS-specific canonical form)
- GeoJSON (requires GDAL)
* buffer:
- WKB
The `srid` keyword is used to specify the Source Reference Identifier
(SRID) number for this Geometry. If not set, the SRID will be None.
"""
if isinstance(geo_input, bytes):
geo_input = force_text(geo_input)
if isinstance(geo_input, six.string_types):
wkt_m = wkt_regex.match(geo_input)
if wkt_m:
# Handling WKT input.
if wkt_m.group('srid'):
srid = int(wkt_m.group('srid'))
g = wkt_r().read(force_bytes(wkt_m.group('wkt')))
elif hex_regex.match(geo_input):
# Handling HEXEWKB input.
g = wkb_r().read(force_bytes(geo_input))
elif json_regex.match(geo_input):
# Handling GeoJSON input.
if not gdal.HAS_GDAL:
raise ValueError('Initializing geometry from JSON input requires GDAL.')
g = wkb_r().read(gdal.OGRGeometry(geo_input).wkb)
else:
raise ValueError('String or unicode input unrecognized as WKT EWKT, and HEXEWKB.')
elif isinstance(geo_input, GEOM_PTR):
# When the input is a pointer to a geometry (GEOM_PTR).
g = geo_input
elif isinstance(geo_input, six.memoryview):
# When the input is a buffer (WKB).
g = wkb_r().read(geo_input)
elif isinstance(geo_input, GEOSGeometry):
g = capi.geom_clone(geo_input.ptr)
else:
# Invalid geometry type.
raise TypeError('Improper geometry input type: %s' % str(type(geo_input)))
if g:
# Setting the pointer object with a valid pointer.
self.ptr = g
else:
raise GEOSException('Could not initialize GEOS Geometry with given input.')
# Post-initialization setup.
self._post_init(srid)
def _post_init(self, srid):
"Helper routine for performing post-initialization setup."
# Setting the SRID, if given.
if srid and isinstance(srid, int):
self.srid = srid
# Setting the class type (e.g., Point, Polygon, etc.)
self.__class__ = GEOS_CLASSES[self.geom_typeid]
# Setting the coordinate sequence for the geometry (will be None on
# geometries that do not have coordinate sequences)
self._set_cs()
def __del__(self):
"""
Destroys this Geometry; in other words, frees the memory used by the
GEOS C++ object.
"""
if self._ptr and capi:
capi.destroy_geom(self._ptr)
def __copy__(self):
"""
Returns a clone because the copy of a GEOSGeometry may contain an
invalid pointer location if the original is garbage collected.
"""
return self.clone()
def __deepcopy__(self, memodict):
"""
The `deepcopy` routine is used by the `Node` class of django.utils.tree;
thus, the protocol routine needs to be implemented to return correct
copies (clones) of these GEOS objects, which use C pointers.
"""
return self.clone()
def __str__(self):
"EWKT is used for the string representation."
return self.ewkt
def __repr__(self):
"Short-hand representation because WKT may be very large."
return '<%s object at %s>' % (self.geom_type, hex(addressof(self.ptr)))
# Pickling support
def __getstate__(self):
# The pickled state is simply a tuple of the WKB (in string form)
# and the SRID.
return bytes(self.wkb), self.srid
def __setstate__(self, state):
# Instantiating from the tuple state that was pickled.
wkb, srid = state
ptr = wkb_r().read(six.memoryview(wkb))
if not ptr:
raise GEOSException('Invalid Geometry loaded from pickled state.')
self.ptr = ptr
self._post_init(srid)
# Comparison operators
def __eq__(self, other):
"""
Equivalence testing, a Geometry may be compared with another Geometry
or a WKT representation.
"""
if isinstance(other, six.string_types):
return self.wkt == other
elif isinstance(other, GEOSGeometry):
return self.equals_exact(other)
else:
return False
def __ne__(self, other):
"The not equals operator."
return not (self == other)
# ### Geometry set-like operations ###
# Thanks to Sean Gillies for inspiration:
# http://lists.gispython.org/pipermail/community/2007-July/001034.html
# g = g1 | g2
def __or__(self, other):
"Returns the union of this Geometry and the other."
return self.union(other)
# g = g1 & g2
def __and__(self, other):
"Returns the intersection of this Geometry and the other."
return self.intersection(other)
# g = g1 - g2
def __sub__(self, other):
"Return the difference this Geometry and the other."
return self.difference(other)
# g = g1 ^ g2
def __xor__(self, other):
"Return the symmetric difference of this Geometry and the other."
return self.sym_difference(other)
# #### Coordinate Sequence Routines ####
@property
def has_cs(self):
"Returns True if this Geometry has a coordinate sequence, False if not."
# Only these geometries are allowed to have coordinate sequences.
if isinstance(self, (Point, LineString, LinearRing)):
return True
else:
return False
def _set_cs(self):
"Sets the coordinate sequence for this Geometry."
if self.has_cs:
self._cs = GEOSCoordSeq(capi.get_cs(self.ptr), self.hasz)
else:
self._cs = None
@property
def coord_seq(self):
"Returns a clone of the coordinate sequence for this Geometry."
if self.has_cs:
return self._cs.clone()
# #### Geometry Info ####
@property
def geom_type(self):
"Returns a string representing the Geometry type, e.g. 'Polygon'"
return capi.geos_type(self.ptr).decode()
@property
def geom_typeid(self):
"Returns an integer representing the Geometry type."
return capi.geos_typeid(self.ptr)
@property
def num_geom(self):
"Returns the number of geometries in the Geometry."
return capi.get_num_geoms(self.ptr)
@property
def num_coords(self):
"Returns the number of coordinates in the Geometry."
return capi.get_num_coords(self.ptr)
@property
def num_points(self):
"Returns the number points, or coordinates, in the Geometry."
return self.num_coords
@property
def dims(self):
"Returns the dimension of this Geometry (0=point, 1=line, 2=surface)."
return capi.get_dims(self.ptr)
def normalize(self):
"Converts this Geometry to normal form (or canonical form)."
return capi.geos_normalize(self.ptr)
# #### Unary predicates ####
@property
def empty(self):
"""
Returns a boolean indicating whether the set of points in this Geometry
are empty.
"""
return capi.geos_isempty(self.ptr)
@property
def hasz(self):
"Returns whether the geometry has a 3D dimension."
return capi.geos_hasz(self.ptr)
@property
def ring(self):
"Returns whether or not the geometry is a ring."
return capi.geos_isring(self.ptr)
@property
def simple(self):
"Returns false if the Geometry not simple."
return capi.geos_issimple(self.ptr)
@property
def valid(self):
"This property tests the validity of this Geometry."
return capi.geos_isvalid(self.ptr)
@property
def valid_reason(self):
"""
Returns a string containing the reason for any invalidity.
"""
return capi.geos_isvalidreason(self.ptr).decode()
# #### Binary predicates. ####
def contains(self, other):
"Returns true if other.within(this) returns true."
return capi.geos_contains(self.ptr, other.ptr)
def crosses(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T****** (for a point and a curve,a point and an area or a line and
an area) 0******** (for two curves).
"""
return capi.geos_crosses(self.ptr, other.ptr)
def disjoint(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FF*FF****.
"""
return capi.geos_disjoint(self.ptr, other.ptr)
def equals(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**FFF*.
"""
return capi.geos_equals(self.ptr, other.ptr)
def equals_exact(self, other, tolerance=0):
"""
Returns true if the two Geometries are exactly equal, up to a
specified tolerance.
"""
return capi.geos_equalsexact(self.ptr, other.ptr, float(tolerance))
def intersects(self, other):
"Returns true if disjoint returns false."
return capi.geos_intersects(self.ptr, other.ptr)
def overlaps(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T***T** (for two points or two surfaces) 1*T***T** (for two curves).
"""
return capi.geos_overlaps(self.ptr, other.ptr)
def relate_pattern(self, other, pattern):
"""
Returns true if the elements in the DE-9IM intersection matrix for the
two Geometries match the elements in pattern.
"""
if not isinstance(pattern, six.string_types) or len(pattern) > 9:
raise GEOSException('invalid intersection matrix pattern')
return capi.geos_relatepattern(self.ptr, other.ptr, force_bytes(pattern))
def touches(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FT*******, F**T***** or F***T****.
"""
return capi.geos_touches(self.ptr, other.ptr)
def within(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**F***.
"""
return capi.geos_within(self.ptr, other.ptr)
# #### SRID Routines ####
def get_srid(self):
"Gets the SRID for the geometry, returns None if no SRID is set."
s = capi.geos_get_srid(self.ptr)
if s == 0:
return None
else:
return s
def set_srid(self, srid):
"Sets the SRID for the geometry."
capi.geos_set_srid(self.ptr, srid)
srid = property(get_srid, set_srid)
# #### Output Routines ####
@property
def ewkt(self):
"""
Returns the EWKT (SRID + WKT) of the Geometry. Note that Z values
are only included in this representation if GEOS >= 3.3.0.
"""
if self.get_srid():
return 'SRID=%s;%s' % (self.srid, self.wkt)
else:
return self.wkt
@property
def wkt(self):
"Returns the WKT (Well-Known Text) representation of this Geometry."
return wkt_w(3 if self.hasz else 2).write(self).decode()
@property
def hex(self):
"""
Returns the WKB of this Geometry in hexadecimal form. Please note
that the SRID is not included in this representation because it is not
a part of the OGC specification (use the `hexewkb` property instead).
"""
# A possible faster, all-python, implementation:
# str(self.wkb).encode('hex')
return wkb_w(3 if self.hasz else 2).write_hex(self)
@property
def hexewkb(self):
"""
Returns the EWKB of this Geometry in hexadecimal form. This is an
extension of the WKB specification that includes SRID value that are
a part of this geometry.
"""
return ewkb_w(3 if self.hasz else 2).write_hex(self)
@property
def json(self):
"""
Returns GeoJSON representation of this Geometry if GDAL is installed.
"""
if gdal.HAS_GDAL:
return self.ogr.json
else:
raise GEOSException('GeoJSON output only supported when GDAL is installed.')
geojson = json
@property
def wkb(self):
"""
Returns the WKB (Well-Known Binary) representation of this Geometry
as a Python buffer. SRID and Z values are not included, use the
`ewkb` property instead.
"""
return wkb_w(3 if self.hasz else 2).write(self)
@property
def ewkb(self):
"""
Return the EWKB representation of this Geometry as a Python buffer.
This is an extension of the WKB specification that includes any SRID
value that are a part of this geometry.
"""
return ewkb_w(3 if self.hasz else 2).write(self)
@property
def kml(self):
"Returns the KML representation of this Geometry."
gtype = self.geom_type
return '<%s>%s</%s>' % (gtype, self.coord_seq.kml, gtype)
@property
def prepared(self):
"""
Returns a PreparedGeometry corresponding to this geometry -- it is
optimized for the contains, intersects, and covers operations.
"""
return PreparedGeometry(self)
# #### GDAL-specific output routines ####
@property
def ogr(self):
"Returns the OGR Geometry for this Geometry."
if not gdal.HAS_GDAL:
raise GEOSException('GDAL required to convert to an OGRGeometry.')
if self.srid:
try:
return gdal.OGRGeometry(self.wkb, self.srid)
except SRSException:
pass
return gdal.OGRGeometry(self.wkb)
@property
def srs(self):
"Returns the OSR SpatialReference for SRID of this Geometry."
if not gdal.HAS_GDAL:
raise GEOSException('GDAL required to return a SpatialReference object.')
if self.srid:
try:
return gdal.SpatialReference(self.srid)
except SRSException:
pass
return None
@property
def crs(self):
"Alias for `srs` property."
return self.srs
def transform(self, ct, clone=False):
"""
Requires GDAL. Transforms the geometry according to the given
transformation object, which may be an integer SRID, and WKT or
PROJ.4 string. By default, the geometry is transformed in-place and
nothing is returned. However if the `clone` keyword is set, then this
geometry will not be modified and a transformed clone will be returned
instead.
"""
srid = self.srid
if ct == srid:
# short-circuit where source & dest SRIDs match
if clone:
return self.clone()
else:
return
if (srid is None) or (srid < 0):
raise GEOSException("Calling transform() with no SRID set is not supported")
if not gdal.HAS_GDAL:
raise GEOSException("GDAL library is not available to transform() geometry.")
# Creating an OGR Geometry, which is then transformed.
g = self.ogr
g.transform(ct)
# Getting a new GEOS pointer
ptr = wkb_r().read(g.wkb)
if clone:
# User wants a cloned transformed geometry returned.
return GEOSGeometry(ptr, srid=g.srid)
if ptr:
# Reassigning pointer, and performing post-initialization setup
# again due to the reassignment.
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(g.srid)
else:
raise GEOSException('Transformed WKB was invalid.')
# #### Topology Routines ####
def _topology(self, gptr):
"Helper routine to return Geometry from the given pointer."
return GEOSGeometry(gptr, srid=self.srid)
@property
def boundary(self):
"Returns the boundary as a newly allocated Geometry object."
return self._topology(capi.geos_boundary(self.ptr))
def buffer(self, width, quadsegs=8):
"""
Returns a geometry that represents all points whose distance from this
Geometry is less than or equal to distance. Calculations are in the
Spatial Reference System of this Geometry. The optional third parameter sets
the number of segment used to approximate a quarter circle (defaults to 8).
(Text from PostGIS documentation at ch. 6.1.3)
"""
return self._topology(capi.geos_buffer(self.ptr, width, quadsegs))
@property
def centroid(self):
"""
The centroid is equal to the centroid of the set of component Geometries
of highest dimension (since the lower-dimension geometries contribute zero
"weight" to the centroid).
"""
return self._topology(capi.geos_centroid(self.ptr))
@property
def convex_hull(self):
"""
Returns the smallest convex Polygon that contains all the points
in the Geometry.
"""
return self._topology(capi.geos_convexhull(self.ptr))
def difference(self, other):
"""
Returns a Geometry representing the points making up this Geometry
that do not make up other.
"""
return self._topology(capi.geos_difference(self.ptr, other.ptr))
@property
def envelope(self):
"Return the envelope for this geometry (a polygon)."
return self._topology(capi.geos_envelope(self.ptr))
def interpolate(self, distance):
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('interpolate only works on LineString and MultiLineString geometries')
return self._topology(capi.geos_interpolate(self.ptr, distance))
def interpolate_normalized(self, distance):
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('interpolate only works on LineString and MultiLineString geometries')
return self._topology(capi.geos_interpolate_normalized(self.ptr, distance))
def intersection(self, other):
"Returns a Geometry representing the points shared by this Geometry and other."
return self._topology(capi.geos_intersection(self.ptr, other.ptr))
@property
def point_on_surface(self):
"Computes an interior point of this Geometry."
return self._topology(capi.geos_pointonsurface(self.ptr))
def project(self, point):
if not isinstance(point, Point):
raise TypeError('locate_point argument must be a Point')
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('locate_point only works on LineString and MultiLineString geometries')
return capi.geos_project(self.ptr, point.ptr)
def project_normalized(self, point):
if not isinstance(point, Point):
raise TypeError('locate_point argument must be a Point')
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('locate_point only works on LineString and MultiLineString geometries')
return capi.geos_project_normalized(self.ptr, point.ptr)
def relate(self, other):
"Returns the DE-9IM intersection matrix for this Geometry and the other."
return capi.geos_relate(self.ptr, other.ptr).decode()
def simplify(self, tolerance=0.0, preserve_topology=False):
"""
Returns the Geometry, simplified using the Douglas-Peucker algorithm
to the specified tolerance (higher tolerance => less points). If no
tolerance provided, defaults to 0.
By default, this function does not preserve topology - e.g. polygons can
be split, collapse to lines or disappear holes can be created or
disappear, and lines can cross. By specifying preserve_topology=True,
the result will have the same dimension and number of components as the
input. This is significantly slower.
"""
if preserve_topology:
return self._topology(capi.geos_preservesimplify(self.ptr, tolerance))
else:
return self._topology(capi.geos_simplify(self.ptr, tolerance))
def sym_difference(self, other):
"""
Returns a set combining the points in this Geometry not in other,
and the points in other not in this Geometry.
"""
return self._topology(capi.geos_symdifference(self.ptr, other.ptr))
def union(self, other):
"Returns a Geometry representing all the points in this Geometry and other."
return self._topology(capi.geos_union(self.ptr, other.ptr))
# #### Other Routines ####
@property
def area(self):
"Returns the area of the Geometry."
return capi.geos_area(self.ptr, byref(c_double()))
def distance(self, other):
"""
Returns the distance between the closest points on this Geometry
and the other. Units will be in those of the coordinate system of
the Geometry.
"""
if not isinstance(other, GEOSGeometry):
raise TypeError('distance() works only on other GEOS Geometries.')
return capi.geos_distance(self.ptr, other.ptr, byref(c_double()))
@property
def extent(self):
"""
Returns the extent of this geometry as a 4-tuple, consisting of
(xmin, ymin, xmax, ymax).
"""
env = self.envelope
if isinstance(env, Point):
xmin, ymin = env.tuple
xmax, ymax = xmin, ymin
else:
xmin, ymin = env[0][0]
xmax, ymax = env[0][2]
return (xmin, ymin, xmax, ymax)
@property
def length(self):
"""
Returns the length of this Geometry (e.g., 0 for point, or the
circumference of a Polygon).
"""
return capi.geos_length(self.ptr, byref(c_double()))
def clone(self):
"Clones this Geometry."
return GEOSGeometry(capi.geom_clone(self.ptr), srid=self.srid)
# Class mapping dictionary. Has to be at the end to avoid import
# conflicts with GEOSGeometry.
from django.contrib.gis.geos.linestring import LineString, LinearRing # isort:skip
from django.contrib.gis.geos.point import Point # isort:skip
from django.contrib.gis.geos.polygon import Polygon # isort:skip
from django.contrib.gis.geos.collections import ( # isort:skip
GeometryCollection, MultiPoint, MultiLineString, MultiPolygon)
from django.contrib.gis.geos.prepared import PreparedGeometry # isort:skip
GEOS_CLASSES = {
0: Point,
1: LineString,
2: LinearRing,
3: Polygon,
4: MultiPoint,
5: MultiLineString,
6: MultiPolygon,
7: GeometryCollection,
}
| bsd-3-clause |
williamFalcon/pytorch-lightning | tests/trainer/test_dataloaders.py | 1 | 9641 | import pytest
import tests.models.utils as tutils
from pytorch_lightning import Trainer
from tests.models import (
TestModelBase,
LightningTestModel,
LightEmptyTestStep,
LightValidationMultipleDataloadersMixin,
LightTestMultipleDataloadersMixin,
LightTestFitSingleTestDataloadersMixin,
LightTestFitMultipleTestDataloadersMixin,
LightValStepFitMultipleDataloadersMixin,
LightValStepFitSingleDataloaderMixin,
LightTrainDataloader,
)
from pytorch_lightning.utilities.debugging import MisconfigurationException
def test_multiple_val_dataloader(tmpdir):
"""Verify multiple val_dataloader."""
tutils.reset_seed()
class CurrentTestModel(
LightTrainDataloader,
LightValidationMultipleDataloadersMixin,
TestModelBase,
):
pass
hparams = tutils.get_hparams()
model = CurrentTestModel(hparams)
# logger file to get meta
trainer_options = dict(
default_save_path=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=1.0,
)
# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
# verify training completed
assert result == 1
# verify there are 2 val loaders
assert len(trainer.val_dataloaders) == 2, \
'Multiple val_dataloaders not initiated properly'
# make sure predictions are good for each val set
for dataloader in trainer.val_dataloaders:
tutils.run_prediction(dataloader, trainer.model)
def test_multiple_test_dataloader(tmpdir):
"""Verify multiple test_dataloader."""
tutils.reset_seed()
class CurrentTestModel(
LightTrainDataloader,
LightTestMultipleDataloadersMixin,
LightEmptyTestStep,
TestModelBase,
):
pass
hparams = tutils.get_hparams()
model = CurrentTestModel(hparams)
# logger file to get meta
trainer_options = dict(
default_save_path=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
# fit model
trainer = Trainer(**trainer_options)
trainer.fit(model)
trainer.test()
# verify there are 2 val loaders
assert len(trainer.test_dataloaders) == 2, \
'Multiple test_dataloaders not initiated properly'
# make sure predictions are good for each test set
for dataloader in trainer.test_dataloaders:
tutils.run_prediction(dataloader, trainer.model)
# run the test method
trainer.test()
def test_train_dataloaders_passed_to_fit(tmpdir):
""" Verify that train dataloader can be passed to fit """
tutils.reset_seed()
class CurrentTestModel(LightTrainDataloader, TestModelBase):
pass
hparams = tutils.get_hparams()
# logger file to get meta
trainer_options = dict(
default_save_path=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
# only train passed to fit
model = CurrentTestModel(hparams)
trainer = Trainer(**trainer_options)
fit_options = dict(train_dataloader=model._dataloader(train=True))
results = trainer.fit(model, **fit_options)
def test_train_val_dataloaders_passed_to_fit(tmpdir):
""" Verify that train & val dataloader can be passed to fit """
tutils.reset_seed()
class CurrentTestModel(
LightTrainDataloader,
LightValStepFitSingleDataloaderMixin,
TestModelBase,
):
pass
hparams = tutils.get_hparams()
# logger file to get meta
trainer_options = dict(
default_save_path=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
# train, val passed to fit
model = CurrentTestModel(hparams)
trainer = Trainer(**trainer_options)
fit_options = dict(train_dataloader=model._dataloader(train=True),
val_dataloaders=model._dataloader(train=False))
results = trainer.fit(model, **fit_options)
assert len(trainer.val_dataloaders) == 1, \
f"`val_dataloaders` not initiated properly, got {trainer.val_dataloaders}"
def test_all_dataloaders_passed_to_fit(tmpdir):
""" Verify train, val & test dataloader can be passed to fit """
tutils.reset_seed()
class CurrentTestModel(
LightTrainDataloader,
LightValStepFitSingleDataloaderMixin,
LightTestFitSingleTestDataloadersMixin,
LightEmptyTestStep,
TestModelBase,
):
pass
hparams = tutils.get_hparams()
# logger file to get meta
trainer_options = dict(
default_save_path=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
# train, val and test passed to fit
model = CurrentTestModel(hparams)
trainer = Trainer(**trainer_options)
fit_options = dict(train_dataloader=model._dataloader(train=True),
val_dataloaders=model._dataloader(train=False),
test_dataloaders=model._dataloader(train=False))
results = trainer.fit(model, **fit_options)
trainer.test()
assert len(trainer.val_dataloaders) == 1, \
f"val_dataloaders` not initiated properly, got {trainer.val_dataloaders}"
assert len(trainer.test_dataloaders) == 1, \
f"test_dataloaders` not initiated properly, got {trainer.test_dataloaders}"
def test_multiple_dataloaders_passed_to_fit(tmpdir):
"""Verify that multiple val & test dataloaders can be passed to fit."""
tutils.reset_seed()
class CurrentTestModel(
LightningTestModel,
LightValStepFitMultipleDataloadersMixin,
LightTestFitMultipleTestDataloadersMixin,
):
pass
hparams = tutils.get_hparams()
# logger file to get meta
trainer_options = dict(
default_save_path=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
# train, multiple val and multiple test passed to fit
model = CurrentTestModel(hparams)
trainer = Trainer(**trainer_options)
fit_options = dict(train_dataloader=model._dataloader(train=True),
val_dataloaders=[model._dataloader(train=False),
model._dataloader(train=False)],
test_dataloaders=[model._dataloader(train=False),
model._dataloader(train=False)])
results = trainer.fit(model, **fit_options)
trainer.test()
assert len(trainer.val_dataloaders) == 2, \
f"Multiple `val_dataloaders` not initiated properly, got {trainer.val_dataloaders}"
assert len(trainer.test_dataloaders) == 2, \
f"Multiple `test_dataloaders` not initiated properly, got {trainer.test_dataloaders}"
def test_mixing_of_dataloader_options(tmpdir):
"""Verify that dataloaders can be passed to fit"""
tutils.reset_seed()
class CurrentTestModel(
LightTrainDataloader,
LightValStepFitSingleDataloaderMixin,
LightTestFitSingleTestDataloadersMixin,
TestModelBase,
):
pass
hparams = tutils.get_hparams()
model = CurrentTestModel(hparams)
# logger file to get meta
trainer_options = dict(
default_save_path=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
# fit model
trainer = Trainer(**trainer_options)
fit_options = dict(val_dataloaders=model._dataloader(train=False))
results = trainer.fit(model, **fit_options)
# fit model
trainer = Trainer(**trainer_options)
fit_options = dict(val_dataloaders=model._dataloader(train=False),
test_dataloaders=model._dataloader(train=False))
_ = trainer.fit(model, **fit_options)
trainer.test()
assert len(trainer.val_dataloaders) == 1, \
f"`val_dataloaders` not initiated properly, got {trainer.val_dataloaders}"
assert len(trainer.test_dataloaders) == 1, \
f"test_dataloaders` not initiated properly, got {trainer.test_dataloaders}"
def test_inf_train_dataloader(tmpdir):
"""Test inf train data loader (e.g. IterableDataset)"""
tutils.reset_seed()
class CurrentTestModel(LightningTestModel):
def train_dataloader(self):
dataloader = self._dataloader(train=True)
class CustomInfDataLoader:
def __init__(self, dataloader):
self.dataloader = dataloader
self.iter = iter(dataloader)
self.count = 0
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count >= 5:
raise StopIteration
self.count = self.count + 1
try:
return next(self.iter)
except StopIteration:
self.iter = iter(self.dataloader)
return next(self.iter)
return CustomInfDataLoader(dataloader)
hparams = tutils.get_hparams()
model = CurrentTestModel(hparams)
# fit model
with pytest.raises(MisconfigurationException):
trainer = Trainer(
default_save_path=tmpdir,
max_epochs=1,
val_check_interval=0.5
)
trainer.fit(model)
# logger file to get meta
trainer = Trainer(
default_save_path=tmpdir,
max_epochs=1,
val_check_interval=50,
)
result = trainer.fit(model)
# verify training completed
assert result == 1
| apache-2.0 |
ModdedPA/android_external_chromium_org | tools/deep_memory_profiler/tests/mock_gsutil.py | 131 | 1558 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
import zipfile
def main():
ZIP_PATTERN = re.compile('dmprof......\.zip')
assert len(sys.argv) == 6
assert sys.argv[1] == 'cp'
assert sys.argv[2] == '-a'
assert sys.argv[3] == 'public-read'
assert ZIP_PATTERN.match(os.path.basename(sys.argv[4]))
assert sys.argv[5] == 'gs://test-storage/'
zip_file = zipfile.ZipFile(sys.argv[4], 'r')
expected_nameset = set(['heap.01234.0001.heap',
'heap.01234.0002.heap',
'heap.01234.0001.buckets',
'heap.01234.0002.buckets',
'heap.01234.symmap/maps',
'heap.01234.symmap/chrome.uvwxyz.readelf-e',
'heap.01234.symmap/chrome.abcdef.nm',
'heap.01234.symmap/files.json'])
assert set(zip_file.namelist()) == expected_nameset
heap_1 = zip_file.getinfo('heap.01234.0001.heap')
assert heap_1.CRC == 763099253
assert heap_1.file_size == 1107
buckets_1 = zip_file.getinfo('heap.01234.0001.buckets')
assert buckets_1.CRC == 2632528901
assert buckets_1.file_size == 2146
nm_chrome = zip_file.getinfo('heap.01234.symmap/chrome.abcdef.nm')
assert nm_chrome.CRC == 2717882373
assert nm_chrome.file_size == 131049
zip_file.close()
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
3manuek/kubernetes | examples/cluster-dns/images/backend/server.py | 468 | 1313 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
PORT_NUMBER = 8000
# This class will handles any incoming request.
class HTTPHandler(BaseHTTPRequestHandler):
# Handler for the GET requests
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write("Hello World!")
try:
# Create a web server and define the handler to manage the incoming request.
server = HTTPServer(('', PORT_NUMBER), HTTPHandler)
print 'Started httpserver on port ' , PORT_NUMBER
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
| apache-2.0 |
MiniPlayer/log-island | logisland-plugins/logisland-scripting-processors-plugin/src/main/resources/nltk/test/doctest_nose_plugin.py | 28 | 6008 | # -*- coding: utf-8 -*-
from __future__ import print_function
from nose.suite import ContextList
import re
import sys
import os
import codecs
import doctest
from nose.plugins.base import Plugin
from nose.util import tolist, anyp
from nose.plugins.doctests import Doctest, log, DocFileCase
ALLOW_UNICODE = doctest.register_optionflag('ALLOW_UNICODE')
class _UnicodeOutputChecker(doctest.OutputChecker):
_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
def _remove_u_prefixes(self, txt):
return re.sub(self._literal_re, r'\1\2', txt)
def check_output(self, want, got, optionflags):
res = doctest.OutputChecker.check_output(self, want, got, optionflags)
if res:
return True
if not (optionflags & ALLOW_UNICODE):
return False
# ALLOW_UNICODE is active and want != got
cleaned_want = self._remove_u_prefixes(want)
cleaned_got = self._remove_u_prefixes(got)
res = doctest.OutputChecker.check_output(self, cleaned_want, cleaned_got, optionflags)
return res
_checker = _UnicodeOutputChecker()
class DoctestPluginHelper(object):
"""
This mixin adds print_function future import to all test cases.
It also adds support for:
'#doctest +ALLOW_UNICODE' option that
makes DocTestCase think u'foo' == 'foo'.
'#doctest doctestencoding=utf-8' option that
changes the encoding of doctest files
"""
OPTION_BY_NAME = ('doctestencoding',)
def loadTestsFromFileUnicode(self, filename):
if self.extension and anyp(filename.endswith, self.extension):
name = os.path.basename(filename)
dh = codecs.open(filename, 'r', self.options.get('doctestencoding'))
try:
doc = dh.read()
finally:
dh.close()
fixture_context = None
globs = {'__file__': filename}
if self.fixtures:
base, ext = os.path.splitext(name)
dirname = os.path.dirname(filename)
sys.path.append(dirname)
fixt_mod = base + self.fixtures
try:
fixture_context = __import__(
fixt_mod, globals(), locals(), ["nop"])
except ImportError as e:
log.debug(
"Could not import %s: %s (%s)", fixt_mod, e, sys.path)
log.debug("Fixture module %s resolved to %s",
fixt_mod, fixture_context)
if hasattr(fixture_context, 'globs'):
globs = fixture_context.globs(globs)
parser = doctest.DocTestParser()
test = parser.get_doctest(
doc, globs=globs, name=name,
filename=filename, lineno=0)
if test.examples:
case = DocFileCase(
test,
optionflags=self.optionflags,
setUp=getattr(fixture_context, 'setup_test', None),
tearDown=getattr(fixture_context, 'teardown_test', None),
result_var=self.doctest_result_var)
if fixture_context:
yield ContextList((case,), context=fixture_context)
else:
yield case
else:
yield False # no tests to load
def loadTestsFromFile(self, filename):
cases = self.loadTestsFromFileUnicode(filename)
for case in cases:
if isinstance(case, ContextList):
yield ContextList([self._patchTestCase(c) for c in case], case.context)
else:
yield self._patchTestCase(case)
def loadTestsFromModule(self, module):
"""Load doctests from the module.
"""
for suite in super(DoctestPluginHelper, self).loadTestsFromModule(module):
cases = [self._patchTestCase(case) for case in suite._get_tests()]
yield self.suiteClass(cases, context=module, can_split=False)
def _patchTestCase(self, case):
if case:
case._dt_test.globs['print_function'] = print_function
case._dt_checker = _checker
return case
def configure(self, options, config):
# it is overriden in order to fix doctest options discovery
Plugin.configure(self, options, config)
self.doctest_result_var = options.doctest_result_var
self.doctest_tests = options.doctest_tests
self.extension = tolist(options.doctestExtension)
self.fixtures = options.doctestFixtures
self.finder = doctest.DocTestFinder()
#super(DoctestPluginHelper, self).configure(options, config)
self.optionflags = 0
self.options = {}
if options.doctestOptions:
stroptions = ",".join(options.doctestOptions).split(',')
for stroption in stroptions:
try:
if stroption.startswith('+'):
self.optionflags |= doctest.OPTIONFLAGS_BY_NAME[stroption[1:]]
continue
elif stroption.startswith('-'):
self.optionflags &= ~doctest.OPTIONFLAGS_BY_NAME[stroption[1:]]
continue
try:
key,value=stroption.split('=')
except ValueError:
pass
else:
if not key in self.OPTION_BY_NAME:
raise ValueError()
self.options[key]=value
continue
except (AttributeError, ValueError, KeyError):
raise ValueError("Unknown doctest option {}".format(stroption))
else:
raise ValueError("Doctest option is not a flag or a key/value pair: {} ".format(stroption))
class DoctestFix(DoctestPluginHelper, Doctest):
pass
| apache-2.0 |
mpasternak/pyglet-fix-issue-552 | experimental/mt_media/drivers/directsound/__init__.py | 28 | 18147 | #!/usr/bin/python
# $Id:$
import ctypes
import math
import sys
import threading
import time
import pyglet
_debug = pyglet.options['debug_media']
import mt_media
import lib_dsound as lib
from pyglet.window.win32 import _user32, _kernel32
class DirectSoundException(mt_media.MediaException):
pass
def _db(gain):
'''Convert linear gain in range [0.0, 1.0] to 100ths of dB.'''
if gain <= 0:
return -10000
return max(-10000, min(int(1000 * math.log(min(gain, 1))), 0))
class DirectSoundWorker(mt_media.MediaThread):
_min_write_size = 9600
# Time to wait if there are players, but they're all full.
_nap_time = 0.05
# Time to wait if there are no players.
_sleep_time = None
def __init__(self):
super(DirectSoundWorker, self).__init__()
self.players = set()
def run(self):
while True:
# This is a big lock, but ensures a player is not deleted while
# we're processing it -- this saves on extra checks in the
# player's methods that would otherwise have to check that it's
# still alive.
if _debug:
print 'DirectSoundWorker run attempt acquire'
self.condition.acquire()
if _debug:
print 'DirectSoundWorker run acquire'
if self.stopped:
self.condition.release()
break
sleep_time = -1
if self.players:
player = None
write_size = 0
for p in self.players:
s = p.get_write_size()
if s > write_size:
player = p
write_size = s
if write_size > self._min_write_size:
player.refill(write_size)
else:
sleep_time = self._nap_time
else:
sleep_time = self._sleep_time
self.condition.release()
if _debug:
print 'DirectSoundWorker run release'
if sleep_time != -1:
self.sleep(sleep_time)
if _debug:
print 'DirectSoundWorker exiting'
def add(self, player):
if _debug:
print 'DirectSoundWorker add', player
self.condition.acquire()
self.players.add(player)
self.condition.notify()
self.condition.release()
if _debug:
print 'return DirectSoundWorker add', player
def remove(self, player):
if _debug:
print 'DirectSoundWorker remove', player
self.condition.acquire()
try:
self.players.remove(player)
except KeyError:
pass
self.condition.notify()
self.condition.release()
if _debug:
print 'return DirectSoundWorker remove', player
class DirectSoundAudioPlayer(mt_media.AbstractAudioPlayer):
# How many bytes the ring buffer should be
_buffer_size = 44800 * 1
# Need to cache these because pyglet API allows update separately, but
# DSound requires both to be set at once.
_cone_inner_angle = 360
_cone_outer_angle = 360
def __init__(self, source_group, player):
super(DirectSoundAudioPlayer, self).__init__(source_group, player)
# Locking strategy:
# All DirectSound calls should be locked. All instance vars relating
# to buffering/filling/time/events should be locked (used by both
# application and worker thread). Other instance vars (consts and
# 3d vars) do not need to be locked.
self._lock = threading.RLock()
# Desired play state (may be actually paused due to underrun -- not
# implemented yet).
self._playing = False
# Up to one audio data may be buffered if too much data was received
# from the source that could not be written immediately into the
# buffer. See refill().
self._next_audio_data = None
# Theoretical write and play cursors for an infinite buffer. play
# cursor is always <= write cursor (when equal, underrun is
# happening).
self._write_cursor = 0
self._play_cursor = 0
# Cursor position of end of data. Silence is written after
# eos for one buffer size.
self._eos_cursor = None
# Indexes into DSound circular buffer. Complications ensue wrt each
# other to avoid writing over the play cursor. See get_write_size and
# write().
self._play_cursor_ring = 0
self._write_cursor_ring = 0
# List of (play_cursor, MediaEvent), in sort order
self._events = []
# List of (cursor, timestamp), in sort order (cursor gives expiry
# place of the timestamp)
self._timestamps = []
audio_format = source_group.audio_format
wfx = lib.WAVEFORMATEX()
wfx.wFormatTag = lib.WAVE_FORMAT_PCM
wfx.nChannels = audio_format.channels
wfx.nSamplesPerSec = audio_format.sample_rate
wfx.wBitsPerSample = audio_format.sample_size
wfx.nBlockAlign = wfx.wBitsPerSample * wfx.nChannels // 8
wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign
dsbdesc = lib.DSBUFFERDESC()
dsbdesc.dwSize = ctypes.sizeof(dsbdesc)
dsbdesc.dwFlags = (lib.DSBCAPS_GLOBALFOCUS |
lib.DSBCAPS_GETCURRENTPOSITION2 |
lib.DSBCAPS_CTRLFREQUENCY |
lib.DSBCAPS_CTRLVOLUME)
if audio_format.channels == 1:
dsbdesc.dwFlags |= lib.DSBCAPS_CTRL3D
dsbdesc.dwBufferBytes = self._buffer_size
dsbdesc.lpwfxFormat = ctypes.pointer(wfx)
# DSound buffer
self._buffer = lib.IDirectSoundBuffer()
driver._dsound.CreateSoundBuffer(dsbdesc,
ctypes.byref(self._buffer),
None)
if audio_format.channels == 1:
self._buffer3d = lib.IDirectSound3DBuffer()
self._buffer.QueryInterface(lib.IID_IDirectSound3DBuffer,
ctypes.byref(self._buffer3d))
else:
self._buffer3d = None
self._buffer.SetCurrentPosition(0)
self.refill(self._buffer_size)
def __del__(self):
try:
self.delete()
except:
pass
def delete(self):
if driver and driver.worker:
driver.worker.remove(self)
self.lock()
self._buffer.Stop()
self._buffer.Release()
self._buffer = None
if self._buffer3d:
self._buffer3d.Release()
self._buffer3d = None
self.unlock()
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
def play(self):
if _debug:
print 'DirectSound play'
driver.worker.add(self)
self.lock()
if not self._playing:
self._playing = True
self._buffer.Play(0, 0, lib.DSBPLAY_LOOPING)
self.unlock()
if _debug:
print 'return DirectSound play'
def stop(self):
if _debug:
print 'DirectSound stop'
driver.worker.remove(self)
self.lock()
if self._playing:
self._playing = False
self._buffer.Stop()
self.unlock()
if _debug:
print 'return DirectSound stop'
def clear(self):
if _debug:
print 'DirectSound clear'
self.lock()
self._buffer.SetCurrentPosition(0)
self._play_cursor_ring = self._write_cursor_ring = 0
self._play_cursor = self._write_cursor
self._eos_cursor = None
self._next_audio_data = None
del self._events[:]
del self._timestamps[:]
self.unlock()
def refill(self, write_size):
self.lock()
while write_size > 0:
if _debug:
print 'refill, write_size =', write_size
# Get next audio packet (or remains of last one)
if self._next_audio_data:
audio_data = self._next_audio_data
self._next_audio_data = None
else:
audio_data = self.source_group.get_audio_data(write_size)
# Write it, or silence if there are no more packets
if audio_data:
# Add events
for event in audio_data.events:
event_cursor = self._write_cursor + event.timestamp * \
self.source_group.audio_format.bytes_per_second
self._events.append((event_cursor, event))
# Add timestamp (at end of this data packet)
ts_cursor = self._write_cursor + audio_data.length
self._timestamps.append(
(ts_cursor, audio_data.timestamp + audio_data.duration))
# Write data
if _debug:
print 'write', audio_data.length
length = min(write_size, audio_data.length)
self.write(audio_data, length)
if audio_data.length:
self._next_audio_data = audio_data
write_size -= length
else:
# Write silence
if self._eos_cursor is None:
self._eos_cursor = self._write_cursor
self._events.append(
(self._eos_cursor,
mt_media.MediaEvent(0, 'on_eos')))
self._events.append(
(self._eos_cursor,
mt_media.MediaEvent(0, 'on_source_group_eos')))
self._events.sort()
if self._write_cursor > self._eos_cursor + self._buffer_size:
self.stop()
else:
self.write(None, write_size)
write_size = 0
self.unlock()
def update_play_cursor(self):
self.lock()
play_cursor_ring = lib.DWORD()
self._buffer.GetCurrentPosition(play_cursor_ring, None)
if play_cursor_ring.value < self._play_cursor_ring:
# Wrapped around
self._play_cursor += self._buffer_size - self._play_cursor_ring
self._play_cursor_ring = 0
self._play_cursor += play_cursor_ring.value - self._play_cursor_ring
self._play_cursor_ring = play_cursor_ring.value
# Dispatch pending events
pending_events = []
while self._events and self._events[0][0] <= self._play_cursor:
_, event = self._events.pop(0)
pending_events.append(event)
if _debug:
print 'Dispatching pending events:', pending_events
print 'Remaining events:', self._events
# Remove expired timestamps
while self._timestamps and self._timestamps[0][0] < self._play_cursor:
del self._timestamps[0]
self.unlock()
for event in pending_events:
event._sync_dispatch_to_player(self.player)
def get_write_size(self):
self.update_play_cursor()
self.lock()
play_cursor = self._play_cursor
write_cursor = self._write_cursor
self.unlock()
return self._buffer_size - (write_cursor - play_cursor)
def write(self, audio_data, length):
# Pass audio_data=None to write silence
if length == 0:
return 0
self.lock()
p1 = ctypes.c_void_p()
l1 = lib.DWORD()
p2 = ctypes.c_void_p()
l2 = lib.DWORD()
self._buffer.Lock(self._write_cursor_ring, length,
ctypes.byref(p1), l1, ctypes.byref(p2), l2, 0)
assert length == l1.value + l2.value
if audio_data:
ctypes.memmove(p1, audio_data.data, l1.value)
audio_data.consume(l1.value, self.source_group.audio_format)
if l2.value:
ctypes.memmove(p2, audio_data.data, l2.value)
audio_data.consume(l2.value, self.source_group.audio_format)
else:
ctypes.memset(p1, 0, l1.value)
if l2.value:
ctypes.memset(p2, 0, l2.value)
self._buffer.Unlock(p1, l1, p2, l2)
self._write_cursor += length
self._write_cursor_ring += length
self._write_cursor_ring %= self._buffer_size
self.unlock()
def get_time(self):
self.lock()
if self._timestamps:
cursor, ts = self._timestamps[0]
result = ts + (self._play_cursor - cursor) / \
float(self.source_group.audio_format.bytes_per_second)
else:
result = None
self.unlock()
return result
def set_volume(self, volume):
volume = _db(volume)
self.lock()
self._buffer.SetVolume(volume)
self.unlock()
def set_position(self, position):
if self._buffer3d:
x, y, z = position
self.lock()
self._buffer3d.SetPosition(x, y, -z, lib.DS3D_IMMEDIATE)
self.unlock()
def set_min_distance(self, min_distance):
if self._buffer3d:
self.lock()
self._buffer3d.SetMinDistance(min_distance, lib.DS3D_IMMEDIATE)
self.unlock()
def set_max_distance(self, max_distance):
if self._buffer3d:
self.lock()
self._buffer3d.SetMaxDistance(max_distance, lib.DS3D_IMMEDIATE)
self.unlock()
def set_pitch(self, pitch):
frequency = int(pitch * self.audio_format.sample_rate)
self.lock()
self._buffer.SetFrequency(frequency)
self.unlock()
def set_cone_orientation(self, cone_orientation):
if self._buffer3d:
x, y, z = cone_orientation
self.lock()
self._buffer3d.SetConeOrientation(x, y, -z, lib.DS3D_IMMEDIATE)
self.unlock()
def set_cone_inner_angle(self, cone_inner_angle):
if self._buffer3d:
self._cone_inner_angle = int(cone_inner_angle)
self._set_cone_angles()
def set_cone_outer_angle(self, cone_outer_angle):
if self._buffer3d:
self._cone_outer_angle = int(cone_outer_angle)
self._set_cone_angles()
def _set_cone_angles(self):
inner = min(self._cone_inner_angle, self._cone_outer_angle)
outer = max(self._cone_inner_angle, self._cone_outer_angle)
self.lock()
self._buffer3d.SetConeAngles(inner, outer, lib.DS3D_IMMEDIATE)
self.unlock()
def set_cone_outer_gain(self, cone_outer_gain):
if self._buffer3d:
volume = _db(cone_outer_gain)
self.lock()
self._buffer3d.SetConeOutsideVolume(volume, lib.DS3D_IMMEDIATE)
self.unlock()
class DirectSoundDriver(mt_media.AbstractAudioDriver):
def __init__(self):
self._dsound = lib.IDirectSound()
lib.DirectSoundCreate(None, ctypes.byref(self._dsound), None)
# A trick used by mplayer.. use desktop as window handle since it
# would be complex to use pyglet window handles (and what to do when
# application is audio only?).
hwnd = _user32.GetDesktopWindow()
self._dsound.SetCooperativeLevel(hwnd, lib.DSSCL_NORMAL)
# Create primary buffer with 3D and volume capabilities
self._buffer = lib.IDirectSoundBuffer()
dsbd = lib.DSBUFFERDESC()
dsbd.dwSize = ctypes.sizeof(dsbd)
dsbd.dwFlags = (lib.DSBCAPS_CTRL3D |
lib.DSBCAPS_CTRLVOLUME |
lib.DSBCAPS_PRIMARYBUFFER)
self._dsound.CreateSoundBuffer(dsbd, ctypes.byref(self._buffer), None)
# Create listener
self._listener = lib.IDirectSound3DListener()
self._buffer.QueryInterface(lib.IID_IDirectSound3DListener,
ctypes.byref(self._listener))
# Create worker thread
self.worker = DirectSoundWorker()
self.worker.start()
def __del__(self):
try:
if self._buffer:
self.delete()
except:
pass
def create_audio_player(self, source_group, player):
return DirectSoundAudioPlayer(source_group, player)
def delete(self):
self.worker.stop()
self._buffer.Release()
self._buffer = None
self._listener.Release()
self._listener = None
# Listener API
def _set_volume(self, volume):
self._volume = volume
self._buffer.SetVolume(_db(volume))
def _set_position(self, position):
self._position = position
x, y, z = position
self._listener.SetPosition(x, y, -z, lib.DS3D_IMMEDIATE)
def _set_forward_orientation(self, orientation):
self._forward_orientation = orientation
self._set_orientation()
def _set_up_orientation(self, orientation):
self._up_orientation = orientation
self._set_orientation()
def _set_orientation(self):
x, y, z = self._forward_orientation
ux, uy, uz = self._up_orientation
self._listener.SetOrientation(x, y, -z, ux, uy, -uz, lib.DS3D_IMMEDIATE)
def create_audio_driver():
global driver
driver = DirectSoundDriver()
return driver
# Global driver needed for access to worker thread and _dsound
driver = None
| bsd-3-clause |
JCBarahona/edX | common/djangoapps/monkey_patch/django_utils_translation.py | 128 | 2819 | """
Monkey-patch `django.utils.translation` to not dump header info
Modify Django's translation module, such that the *gettext functions
always return an empty string when attempting to translate an empty
string. This overrides the default behavior [0]:
> It is convention with GNU gettext to include meta-data as the
> translation for the empty string.
Affected Methods:
- gettext
- ugettext
Note: The *ngettext and *pgettext functions are intentionally omitted,
as they already behave as expected. The *_lazy functions are implicitly
patched, as they wrap their nonlazy equivalents.
Django's translation module contains a good deal of indirection. For us
to patch the module with our own functions, we have to patch
`django.utils.translation._trans`. This ensures that the patched
behavior will still be used, even if code elsewhere caches a reference
to one of the translation functions. If you're curious, check out
Django's source code [1].
[0] https://docs.python.org/2.7/library/gettext.html#the-gnutranslations-class
[1] https://github.com/django/django/blob/1.4.8/django/utils/translation/__init__.py#L66
"""
from django.utils.translation import _trans as translation
import monkey_patch
ATTRIBUTES = [
'gettext',
'ugettext',
]
def is_patched():
"""
Check if the translation module has been monkey-patched
"""
patched = True
for attribute in ATTRIBUTES:
if not monkey_patch.is_patched(translation, attribute):
patched = False
break
return patched
def patch():
"""
Monkey-patch the translation functions
Affected Methods:
- gettext
- ugettext
"""
def decorate(function, message_default=u''):
"""
Decorate a translation function
Default message is a unicode string, but gettext overrides this
value to return a UTF8 string.
"""
def dont_translate_empty_string(message):
"""
Return the empty string when passed a falsey message
"""
if message:
message = function(message)
else:
message = message_default
return message
return dont_translate_empty_string
gettext = decorate(translation.gettext, '')
ugettext = decorate(translation.ugettext)
monkey_patch.patch(translation, 'gettext', gettext)
monkey_patch.patch(translation, 'ugettext', ugettext)
return is_patched()
def unpatch():
"""
Un-monkey-patch the translation functions
"""
was_patched = False
for name in ATTRIBUTES:
# was_patched must be the second half of the or-clause, to avoid
# short-circuiting the expression
was_patched = monkey_patch.unpatch(translation, name) or was_patched
return was_patched
| agpl-3.0 |
rmcgibbo/mdtraj | tests/test_xtc.py | 5 | 12672 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import sys
import numpy as np
from mdtraj import io
from mdtraj.formats import XTCTrajectoryFile
from mdtraj.testing import eq
import pytest
@pytest.fixture()
def fn_xtc(get_fn):
return get_fn('frame0.xtc')
@pytest.fixture()
def pdb(get_fn):
return get_fn('native.pdb')
strides = (1, 2, 3, 4, 5, 7, 10, 11)
def test_read_chunk1(get_fn, fn_xtc):
with XTCTrajectoryFile(fn_xtc, 'r', chunk_size_multiplier=0.5) as f:
xyz, time, step, box = f.read()
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
assert eq(xyz, iofile['xyz'])
assert eq(step, iofile['step'])
assert eq(box, iofile['box'])
assert eq(time, iofile['time'])
def test_read_stride(get_fn, fn_xtc):
# read xtc with stride
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
for s in strides:
with XTCTrajectoryFile(fn_xtc) as f:
xyz, time, step, box = f.read(stride=s)
assert eq(xyz, iofile['xyz'][::s])
assert eq(step, iofile['step'][::s])
assert eq(box, iofile['box'][::s])
assert eq(time, iofile['time'][::s])
def test_read_stride_n_frames(get_fn, fn_xtc):
# read xtc with stride with n_frames
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
for s in strides:
with XTCTrajectoryFile(fn_xtc) as f:
xyz, time, step, box = f.read(n_frames=1000, stride=s)
assert eq(xyz, iofile['xyz'][::s])
assert eq(step, iofile['step'][::s])
assert eq(box, iofile['box'][::s])
assert eq(time, iofile['time'][::s])
def test_read_stride_offsets(get_fn, fn_xtc):
# read xtc with stride and offsets
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
for s in strides:
with XTCTrajectoryFile(fn_xtc) as f:
f.offsets # pre-compute byte offsets between frames
xyz, time, step, box = f.read(stride=s)
assert eq(xyz, iofile['xyz'][::s])
assert eq(step, iofile['step'][::s])
assert eq(box, iofile['box'][::s])
assert eq(time, iofile['time'][::s])
def test_read_stride_n_frames_offsets(get_fn, fn_xtc):
# read xtc with stride with n_frames and offsets
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
for s in strides:
with XTCTrajectoryFile(fn_xtc) as f:
f.offsets # pre-compute byte offsets between frames
xyz, time, step, box = f.read(n_frames=1000, stride=s)
assert eq(xyz, iofile['xyz'][::s])
assert eq(step, iofile['step'][::s])
assert eq(box, iofile['box'][::s])
assert eq(time, iofile['time'][::s])
def test_read_stride_switching_offsets(get_fn, fn_xtc):
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
with XTCTrajectoryFile(fn_xtc) as f:
f.offsets # pre-compute byte offsets between frames
# read the first 10 frames with stride of 2
s = 2
n_frames = 10
xyz, time, step, box = f.read(n_frames=n_frames, stride=s)
assert eq(xyz, iofile['xyz'][:n_frames*s:s])
assert eq(step, iofile['step'][:n_frames*s:s])
assert eq(box, iofile['box'][:n_frames*s:s])
assert eq(time, iofile['time'][:n_frames*s:s])
# now read the rest with stride 3, should start from frame index 8.
# eg. np.arange(0, n_frames*s + 1, 2)[-1] == 20
offset = f.tell()
assert offset == 20
s = 3
xyz, time, step, box = f.read(n_frames=None, stride=s)
assert eq(xyz, iofile['xyz'][offset::s])
assert eq(step, iofile['step'][offset::s])
assert eq(box, iofile['box'][offset::s])
assert eq(time, iofile['time'][offset::s])
def test_read_atomindices_1(get_fn, fn_xtc):
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
with XTCTrajectoryFile(fn_xtc) as f:
xyz, time, step, box = f.read(atom_indices=[0, 1, 2])
assert eq(xyz, iofile['xyz'][:, [0, 1, 2]])
assert eq(step, iofile['step'])
assert eq(box, iofile['box'])
assert eq(time, iofile['time'])
def test_read_atomindices_w_stride(get_fn, fn_xtc):
# test case for bug: https://github.com/mdtraj/mdtraj/issues/1394
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
for stride in strides:
with XTCTrajectoryFile(fn_xtc) as f:
xyz, time, step, box = f.read(atom_indices=[0, 1, 2], stride=stride)
assert eq(xyz, iofile['xyz'][:, [0, 1, 2]][::stride])
assert eq(step, iofile['step'][::stride])
assert eq(box, iofile['box'][::stride])
assert eq(time, iofile['time'][::stride])
def test_read_atomindices_2(get_fn, fn_xtc):
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
with XTCTrajectoryFile(fn_xtc) as f:
xyz, time, step, box = f.read(atom_indices=slice(None, None, 2))
assert eq(xyz, iofile['xyz'][:, ::2])
assert eq(step, iofile['step'])
assert eq(box, iofile['box'])
assert eq(time, iofile['time'])
def test_read_chunk2(get_fn, fn_xtc):
with XTCTrajectoryFile(fn_xtc, 'r', chunk_size_multiplier=1) as f:
xyz, time, step, box = f.read()
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
assert eq(xyz, iofile['xyz'])
assert eq(step, iofile['step'])
assert eq(box, iofile['box'])
assert eq(time, iofile['time'])
def test_read_chunk3(get_fn, fn_xtc):
with XTCTrajectoryFile(fn_xtc, chunk_size_multiplier=2) as f:
xyz, time, step, box = f.read(n_frames=100)
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
assert eq(xyz, iofile['xyz'][:100])
assert eq(step, iofile['step'][:100])
assert eq(box, iofile['box'][:100])
assert eq(time, iofile['time'][:100])
def test_write_0(tmpdir, fn_xtc):
with XTCTrajectoryFile(fn_xtc) as f:
xyz = f.read()[0]
tmpfn = '{}/traj.xtc'.format(tmpdir)
f = XTCTrajectoryFile(tmpfn, 'w')
f.write(xyz)
f.close()
with XTCTrajectoryFile(tmpfn) as f:
xyz2, time2, step2, box2 = f.read()
eq(xyz, xyz2)
def test_write_1(tmpdir):
xyz = np.asarray(np.around(np.random.randn(100, 10, 3), 3), dtype=np.float32)
time = np.asarray(np.random.randn(100), dtype=np.float32)
step = np.arange(100)
box = np.asarray(np.random.randn(100, 3, 3), dtype=np.float32)
tmpfn = '{}/traj.xtc'.format(tmpdir)
with XTCTrajectoryFile(tmpfn, 'w') as f:
f.write(xyz, time=time, step=step, box=box)
with XTCTrajectoryFile(tmpfn) as f:
xyz2, time2, step2, box2 = f.read()
eq(xyz, xyz2)
eq(time, time2)
eq(step, step2)
eq(box, box2)
def test_write_2(tmpdir):
xyz = np.asarray(np.around(np.random.randn(100, 10, 3), 3), dtype=np.float32)
time = np.asarray(np.random.randn(100), dtype=np.float32)
step = np.arange(100)
box = np.asarray(np.random.randn(100, 3, 3), dtype=np.float32)
tmpfn = '{}/traj.xtc'.format(tmpdir)
with XTCTrajectoryFile(tmpfn, 'w') as f:
for i in range(len(xyz)):
f.write(xyz[i], time=time[i], step=step[i], box=box[i])
with XTCTrajectoryFile(tmpfn) as f:
xyz2, time2, step2, box2 = f.read()
eq(xyz, xyz2)
eq(time, time2)
eq(step, step2)
eq(box, box2)
def test_read_error_0(tmpdir):
tmpfn = '{}/traj.xtc'.format(tmpdir)
with pytest.raises(IOError):
with XTCTrajectoryFile(tmpfn, 'r') as f:
f.read()
def test_write_error_0(tmpdir):
xyz = np.asarray(np.random.randn(100, 3, 3), dtype=np.float32)
tmpfn = '{}/traj.xtc'.format(tmpdir)
with XTCTrajectoryFile(tmpfn, 'w') as f:
with pytest.raises(ValueError):
f.read(xyz)
def test_read_error_1():
with pytest.raises(IOError):
XTCTrajectoryFile('/tmp/sdfsdfsdf')
def test_read_error_2(get_fn):
with pytest.raises(IOError):
XTCTrajectoryFile(get_fn('frame0.dcd')).read()
def test_xtc_write_wierd_0(tmpdir):
x0 = np.asarray(np.random.randn(100, 3, 3), dtype=np.float32)
x1 = np.asarray(np.random.randn(100, 9, 3), dtype=np.float32)
tmpfn = '{}/traj.xtc'.format(tmpdir)
with XTCTrajectoryFile(tmpfn, 'w') as f:
f.write(x0)
with pytest.raises(ValueError):
f.write(x1)
xr = XTCTrajectoryFile(tmpfn).read()[0]
print(xr.shape)
def test_tell(get_fn):
with XTCTrajectoryFile(get_fn('frame0.xtc')) as f:
eq(f.tell(), 0)
f.read(101)
eq(f.tell(), 101)
f.read(3)
eq(f.tell(), 104)
def test_seek(get_fn):
reference = XTCTrajectoryFile(get_fn('frame0.xtc')).read()[0]
with XTCTrajectoryFile(get_fn('frame0.xtc')) as f:
eq(f.tell(), 0)
eq(f.read(1)[0][0], reference[0])
eq(f.tell(), 1)
xyz = f.read(1)[0][0]
eq(xyz, reference[1])
eq(f.tell(), 2)
f.seek(0)
eq(f.tell(), 0)
xyz = f.read(1)[0][0]
eq(f.tell(), 1)
eq(xyz, reference[0])
f.seek(5) # offset array is going to be built
assert len(f.offsets) == len(reference)
eq(f.read(1)[0][0], reference[5])
eq(f.tell(), 6)
f.seek(-5, 1)
eq(f.tell(), 1)
eq(f.read(1)[0][0], reference[1])
def test_seek_natoms9(tmpdir, get_fn):
# create a xtc file with 9 atoms and seek it.
with XTCTrajectoryFile(get_fn('frame0.xtc'), 'r') as fh:
xyz = fh.read()[0][:, :9, :]
tmpfn = '{}/traj.xtc'.format(tmpdir)
with XTCTrajectoryFile(tmpfn, 'w', force_overwrite=True) as f:
f.write(xyz)
with XTCTrajectoryFile(tmpfn, 'r') as f:
eq(f.read(1)[0].shape, (1, 9, 3))
eq(f.tell(), 1)
f.seek(99)
eq(f.read(1)[0].squeeze(), xyz[99])
# seek relative
f.seek(-1, 1)
eq(f.read(1)[0].squeeze(), xyz[99])
f.seek(0, 0)
eq(f.read(1)[0].squeeze(), xyz[0])
def test_seek_out_of_bounds(get_fn):
with XTCTrajectoryFile(get_fn('frame0.xtc'), 'r') as fh:
with pytest.raises(IOError):
fh.seek(10000000)
def test_ragged_1(tmpdir):
# try first writing no box vectors,, and then adding some
xyz = np.random.randn(100, 5, 3)
time = np.random.randn(100)
box = np.random.randn(100, 3, 3)
tmpfn = '{}/traj.xtc'.format(tmpdir)
with XTCTrajectoryFile(tmpfn, 'w', force_overwrite=True) as f:
f.write(xyz)
with pytest.raises(ValueError):
f.write(xyz, time, box)
def test_ragged_2(tmpdir):
# try first writing no box vectors, and then adding some
xyz = np.random.randn(100, 5, 3)
time = np.random.randn(100)
box = np.random.randn(100, 3, 3)
tmpfn = '{}/traj.xtc'.format(tmpdir)
with XTCTrajectoryFile(tmpfn, 'w', force_overwrite=True) as f:
f.write(xyz, time=time, box=box)
with pytest.raises(ValueError):
f.write(xyz)
def test_short_traj(tmpdir):
tmpfn = '{}/traj.xtc'.format(tmpdir)
with XTCTrajectoryFile(tmpfn, 'w') as f:
f.write(np.random.uniform(size=(5, 100000, 3)))
with XTCTrajectoryFile(tmpfn, 'r') as f:
assert len(f) == 5, len(f)
not_on_win = pytest.mark.skipif(sys.platform.startswith('win'),
reason='Can not open file being written again due to file locking.')
@not_on_win
def test_flush(tmpdir):
tmpfn = '{}/traj.xtc'.format(tmpdir)
data = np.random.random((5, 100, 3))
with XTCTrajectoryFile(tmpfn, 'w') as f:
f.write(data)
f.flush()
# note that f is still open, so we can now try to read the contents flushed to disk.
with XTCTrajectoryFile(tmpfn, 'r') as f2:
out = f2.read()
np.testing.assert_allclose(out[0], data, atol=1E-3)
| lgpl-2.1 |
mujiansu/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/encodings/mac_arabic.py | 593 | 36723 | """ Python Character Mapping Codec generated from 'VENDORS/APPLE/ARABIC.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-arabic',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x0081: 0x00a0, # NO-BREAK SPACE, right-left
0x0082: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0083: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0084: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x0085: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x0086: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x0087: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x0088: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0089: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x008a: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x008b: 0x06ba, # ARABIC LETTER NOON GHUNNA
0x008c: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
0x008d: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x008e: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x008f: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x0090: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0091: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x0092: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x0093: 0x2026, # HORIZONTAL ELLIPSIS, right-left
0x0094: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x0095: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x0096: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x0097: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x0098: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
0x0099: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x009a: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x009b: 0x00f7, # DIVISION SIGN, right-left
0x009c: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x009d: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x009e: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x009f: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x00a0: 0x0020, # SPACE, right-left
0x00a1: 0x0021, # EXCLAMATION MARK, right-left
0x00a2: 0x0022, # QUOTATION MARK, right-left
0x00a3: 0x0023, # NUMBER SIGN, right-left
0x00a4: 0x0024, # DOLLAR SIGN, right-left
0x00a5: 0x066a, # ARABIC PERCENT SIGN
0x00a6: 0x0026, # AMPERSAND, right-left
0x00a7: 0x0027, # APOSTROPHE, right-left
0x00a8: 0x0028, # LEFT PARENTHESIS, right-left
0x00a9: 0x0029, # RIGHT PARENTHESIS, right-left
0x00aa: 0x002a, # ASTERISK, right-left
0x00ab: 0x002b, # PLUS SIGN, right-left
0x00ac: 0x060c, # ARABIC COMMA
0x00ad: 0x002d, # HYPHEN-MINUS, right-left
0x00ae: 0x002e, # FULL STOP, right-left
0x00af: 0x002f, # SOLIDUS, right-left
0x00b0: 0x0660, # ARABIC-INDIC DIGIT ZERO, right-left (need override)
0x00b1: 0x0661, # ARABIC-INDIC DIGIT ONE, right-left (need override)
0x00b2: 0x0662, # ARABIC-INDIC DIGIT TWO, right-left (need override)
0x00b3: 0x0663, # ARABIC-INDIC DIGIT THREE, right-left (need override)
0x00b4: 0x0664, # ARABIC-INDIC DIGIT FOUR, right-left (need override)
0x00b5: 0x0665, # ARABIC-INDIC DIGIT FIVE, right-left (need override)
0x00b6: 0x0666, # ARABIC-INDIC DIGIT SIX, right-left (need override)
0x00b7: 0x0667, # ARABIC-INDIC DIGIT SEVEN, right-left (need override)
0x00b8: 0x0668, # ARABIC-INDIC DIGIT EIGHT, right-left (need override)
0x00b9: 0x0669, # ARABIC-INDIC DIGIT NINE, right-left (need override)
0x00ba: 0x003a, # COLON, right-left
0x00bb: 0x061b, # ARABIC SEMICOLON
0x00bc: 0x003c, # LESS-THAN SIGN, right-left
0x00bd: 0x003d, # EQUALS SIGN, right-left
0x00be: 0x003e, # GREATER-THAN SIGN, right-left
0x00bf: 0x061f, # ARABIC QUESTION MARK
0x00c0: 0x274a, # EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
0x00c1: 0x0621, # ARABIC LETTER HAMZA
0x00c2: 0x0622, # ARABIC LETTER ALEF WITH MADDA ABOVE
0x00c3: 0x0623, # ARABIC LETTER ALEF WITH HAMZA ABOVE
0x00c4: 0x0624, # ARABIC LETTER WAW WITH HAMZA ABOVE
0x00c5: 0x0625, # ARABIC LETTER ALEF WITH HAMZA BELOW
0x00c6: 0x0626, # ARABIC LETTER YEH WITH HAMZA ABOVE
0x00c7: 0x0627, # ARABIC LETTER ALEF
0x00c8: 0x0628, # ARABIC LETTER BEH
0x00c9: 0x0629, # ARABIC LETTER TEH MARBUTA
0x00ca: 0x062a, # ARABIC LETTER TEH
0x00cb: 0x062b, # ARABIC LETTER THEH
0x00cc: 0x062c, # ARABIC LETTER JEEM
0x00cd: 0x062d, # ARABIC LETTER HAH
0x00ce: 0x062e, # ARABIC LETTER KHAH
0x00cf: 0x062f, # ARABIC LETTER DAL
0x00d0: 0x0630, # ARABIC LETTER THAL
0x00d1: 0x0631, # ARABIC LETTER REH
0x00d2: 0x0632, # ARABIC LETTER ZAIN
0x00d3: 0x0633, # ARABIC LETTER SEEN
0x00d4: 0x0634, # ARABIC LETTER SHEEN
0x00d5: 0x0635, # ARABIC LETTER SAD
0x00d6: 0x0636, # ARABIC LETTER DAD
0x00d7: 0x0637, # ARABIC LETTER TAH
0x00d8: 0x0638, # ARABIC LETTER ZAH
0x00d9: 0x0639, # ARABIC LETTER AIN
0x00da: 0x063a, # ARABIC LETTER GHAIN
0x00db: 0x005b, # LEFT SQUARE BRACKET, right-left
0x00dc: 0x005c, # REVERSE SOLIDUS, right-left
0x00dd: 0x005d, # RIGHT SQUARE BRACKET, right-left
0x00de: 0x005e, # CIRCUMFLEX ACCENT, right-left
0x00df: 0x005f, # LOW LINE, right-left
0x00e0: 0x0640, # ARABIC TATWEEL
0x00e1: 0x0641, # ARABIC LETTER FEH
0x00e2: 0x0642, # ARABIC LETTER QAF
0x00e3: 0x0643, # ARABIC LETTER KAF
0x00e4: 0x0644, # ARABIC LETTER LAM
0x00e5: 0x0645, # ARABIC LETTER MEEM
0x00e6: 0x0646, # ARABIC LETTER NOON
0x00e7: 0x0647, # ARABIC LETTER HEH
0x00e8: 0x0648, # ARABIC LETTER WAW
0x00e9: 0x0649, # ARABIC LETTER ALEF MAKSURA
0x00ea: 0x064a, # ARABIC LETTER YEH
0x00eb: 0x064b, # ARABIC FATHATAN
0x00ec: 0x064c, # ARABIC DAMMATAN
0x00ed: 0x064d, # ARABIC KASRATAN
0x00ee: 0x064e, # ARABIC FATHA
0x00ef: 0x064f, # ARABIC DAMMA
0x00f0: 0x0650, # ARABIC KASRA
0x00f1: 0x0651, # ARABIC SHADDA
0x00f2: 0x0652, # ARABIC SUKUN
0x00f3: 0x067e, # ARABIC LETTER PEH
0x00f4: 0x0679, # ARABIC LETTER TTEH
0x00f5: 0x0686, # ARABIC LETTER TCHEH
0x00f6: 0x06d5, # ARABIC LETTER AE
0x00f7: 0x06a4, # ARABIC LETTER VEH
0x00f8: 0x06af, # ARABIC LETTER GAF
0x00f9: 0x0688, # ARABIC LETTER DDAL
0x00fa: 0x0691, # ARABIC LETTER RREH
0x00fb: 0x007b, # LEFT CURLY BRACKET, right-left
0x00fc: 0x007c, # VERTICAL LINE, right-left
0x00fd: 0x007d, # RIGHT CURLY BRACKET, right-left
0x00fe: 0x0698, # ARABIC LETTER JEH
0x00ff: 0x06d2, # ARABIC LETTER YEH BARREE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> CONTROL CHARACTER
u'\x01' # 0x0001 -> CONTROL CHARACTER
u'\x02' # 0x0002 -> CONTROL CHARACTER
u'\x03' # 0x0003 -> CONTROL CHARACTER
u'\x04' # 0x0004 -> CONTROL CHARACTER
u'\x05' # 0x0005 -> CONTROL CHARACTER
u'\x06' # 0x0006 -> CONTROL CHARACTER
u'\x07' # 0x0007 -> CONTROL CHARACTER
u'\x08' # 0x0008 -> CONTROL CHARACTER
u'\t' # 0x0009 -> CONTROL CHARACTER
u'\n' # 0x000a -> CONTROL CHARACTER
u'\x0b' # 0x000b -> CONTROL CHARACTER
u'\x0c' # 0x000c -> CONTROL CHARACTER
u'\r' # 0x000d -> CONTROL CHARACTER
u'\x0e' # 0x000e -> CONTROL CHARACTER
u'\x0f' # 0x000f -> CONTROL CHARACTER
u'\x10' # 0x0010 -> CONTROL CHARACTER
u'\x11' # 0x0011 -> CONTROL CHARACTER
u'\x12' # 0x0012 -> CONTROL CHARACTER
u'\x13' # 0x0013 -> CONTROL CHARACTER
u'\x14' # 0x0014 -> CONTROL CHARACTER
u'\x15' # 0x0015 -> CONTROL CHARACTER
u'\x16' # 0x0016 -> CONTROL CHARACTER
u'\x17' # 0x0017 -> CONTROL CHARACTER
u'\x18' # 0x0018 -> CONTROL CHARACTER
u'\x19' # 0x0019 -> CONTROL CHARACTER
u'\x1a' # 0x001a -> CONTROL CHARACTER
u'\x1b' # 0x001b -> CONTROL CHARACTER
u'\x1c' # 0x001c -> CONTROL CHARACTER
u'\x1d' # 0x001d -> CONTROL CHARACTER
u'\x1e' # 0x001e -> CONTROL CHARACTER
u'\x1f' # 0x001f -> CONTROL CHARACTER
u' ' # 0x0020 -> SPACE, left-right
u'!' # 0x0021 -> EXCLAMATION MARK, left-right
u'"' # 0x0022 -> QUOTATION MARK, left-right
u'#' # 0x0023 -> NUMBER SIGN, left-right
u'$' # 0x0024 -> DOLLAR SIGN, left-right
u'%' # 0x0025 -> PERCENT SIGN, left-right
u'&' # 0x0026 -> AMPERSAND, left-right
u"'" # 0x0027 -> APOSTROPHE, left-right
u'(' # 0x0028 -> LEFT PARENTHESIS, left-right
u')' # 0x0029 -> RIGHT PARENTHESIS, left-right
u'*' # 0x002a -> ASTERISK, left-right
u'+' # 0x002b -> PLUS SIGN, left-right
u',' # 0x002c -> COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR
u'-' # 0x002d -> HYPHEN-MINUS, left-right
u'.' # 0x002e -> FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR
u'/' # 0x002f -> SOLIDUS, left-right
u'0' # 0x0030 -> DIGIT ZERO; in Arabic-script context, displayed as 0x0660 ARABIC-INDIC DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE; in Arabic-script context, displayed as 0x0661 ARABIC-INDIC DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO; in Arabic-script context, displayed as 0x0662 ARABIC-INDIC DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE; in Arabic-script context, displayed as 0x0663 ARABIC-INDIC DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR; in Arabic-script context, displayed as 0x0664 ARABIC-INDIC DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE; in Arabic-script context, displayed as 0x0665 ARABIC-INDIC DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX; in Arabic-script context, displayed as 0x0666 ARABIC-INDIC DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN; in Arabic-script context, displayed as 0x0667 ARABIC-INDIC DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT; in Arabic-script context, displayed as 0x0668 ARABIC-INDIC DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE; in Arabic-script context, displayed as 0x0669 ARABIC-INDIC DIGIT NINE
u':' # 0x003a -> COLON, left-right
u';' # 0x003b -> SEMICOLON, left-right
u'<' # 0x003c -> LESS-THAN SIGN, left-right
u'=' # 0x003d -> EQUALS SIGN, left-right
u'>' # 0x003e -> GREATER-THAN SIGN, left-right
u'?' # 0x003f -> QUESTION MARK, left-right
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET, left-right
u'\\' # 0x005c -> REVERSE SOLIDUS, left-right
u']' # 0x005d -> RIGHT SQUARE BRACKET, left-right
u'^' # 0x005e -> CIRCUMFLEX ACCENT, left-right
u'_' # 0x005f -> LOW LINE, left-right
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET, left-right
u'|' # 0x007c -> VERTICAL LINE, left-right
u'}' # 0x007d -> RIGHT CURLY BRACKET, left-right
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> CONTROL CHARACTER
u'\xc4' # 0x0080 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xa0' # 0x0081 -> NO-BREAK SPACE, right-left
u'\xc7' # 0x0082 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x0083 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x0084 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x0085 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x0086 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x0087 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x0088 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x0089 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x008a -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u06ba' # 0x008b -> ARABIC LETTER NOON GHUNNA
u'\xab' # 0x008c -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
u'\xe7' # 0x008d -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x008e -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x008f -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x0090 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0091 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x0092 -> LATIN SMALL LETTER I WITH ACUTE
u'\u2026' # 0x0093 -> HORIZONTAL ELLIPSIS, right-left
u'\xee' # 0x0094 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x0095 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x0096 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x0097 -> LATIN SMALL LETTER O WITH ACUTE
u'\xbb' # 0x0098 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
u'\xf4' # 0x0099 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x009a -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0x009b -> DIVISION SIGN, right-left
u'\xfa' # 0x009c -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x009d -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x009e -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x009f -> LATIN SMALL LETTER U WITH DIAERESIS
u' ' # 0x00a0 -> SPACE, right-left
u'!' # 0x00a1 -> EXCLAMATION MARK, right-left
u'"' # 0x00a2 -> QUOTATION MARK, right-left
u'#' # 0x00a3 -> NUMBER SIGN, right-left
u'$' # 0x00a4 -> DOLLAR SIGN, right-left
u'\u066a' # 0x00a5 -> ARABIC PERCENT SIGN
u'&' # 0x00a6 -> AMPERSAND, right-left
u"'" # 0x00a7 -> APOSTROPHE, right-left
u'(' # 0x00a8 -> LEFT PARENTHESIS, right-left
u')' # 0x00a9 -> RIGHT PARENTHESIS, right-left
u'*' # 0x00aa -> ASTERISK, right-left
u'+' # 0x00ab -> PLUS SIGN, right-left
u'\u060c' # 0x00ac -> ARABIC COMMA
u'-' # 0x00ad -> HYPHEN-MINUS, right-left
u'.' # 0x00ae -> FULL STOP, right-left
u'/' # 0x00af -> SOLIDUS, right-left
u'\u0660' # 0x00b0 -> ARABIC-INDIC DIGIT ZERO, right-left (need override)
u'\u0661' # 0x00b1 -> ARABIC-INDIC DIGIT ONE, right-left (need override)
u'\u0662' # 0x00b2 -> ARABIC-INDIC DIGIT TWO, right-left (need override)
u'\u0663' # 0x00b3 -> ARABIC-INDIC DIGIT THREE, right-left (need override)
u'\u0664' # 0x00b4 -> ARABIC-INDIC DIGIT FOUR, right-left (need override)
u'\u0665' # 0x00b5 -> ARABIC-INDIC DIGIT FIVE, right-left (need override)
u'\u0666' # 0x00b6 -> ARABIC-INDIC DIGIT SIX, right-left (need override)
u'\u0667' # 0x00b7 -> ARABIC-INDIC DIGIT SEVEN, right-left (need override)
u'\u0668' # 0x00b8 -> ARABIC-INDIC DIGIT EIGHT, right-left (need override)
u'\u0669' # 0x00b9 -> ARABIC-INDIC DIGIT NINE, right-left (need override)
u':' # 0x00ba -> COLON, right-left
u'\u061b' # 0x00bb -> ARABIC SEMICOLON
u'<' # 0x00bc -> LESS-THAN SIGN, right-left
u'=' # 0x00bd -> EQUALS SIGN, right-left
u'>' # 0x00be -> GREATER-THAN SIGN, right-left
u'\u061f' # 0x00bf -> ARABIC QUESTION MARK
u'\u274a' # 0x00c0 -> EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
u'\u0621' # 0x00c1 -> ARABIC LETTER HAMZA
u'\u0622' # 0x00c2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
u'\u0623' # 0x00c3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
u'\u0624' # 0x00c4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
u'\u0625' # 0x00c5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
u'\u0626' # 0x00c6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
u'\u0627' # 0x00c7 -> ARABIC LETTER ALEF
u'\u0628' # 0x00c8 -> ARABIC LETTER BEH
u'\u0629' # 0x00c9 -> ARABIC LETTER TEH MARBUTA
u'\u062a' # 0x00ca -> ARABIC LETTER TEH
u'\u062b' # 0x00cb -> ARABIC LETTER THEH
u'\u062c' # 0x00cc -> ARABIC LETTER JEEM
u'\u062d' # 0x00cd -> ARABIC LETTER HAH
u'\u062e' # 0x00ce -> ARABIC LETTER KHAH
u'\u062f' # 0x00cf -> ARABIC LETTER DAL
u'\u0630' # 0x00d0 -> ARABIC LETTER THAL
u'\u0631' # 0x00d1 -> ARABIC LETTER REH
u'\u0632' # 0x00d2 -> ARABIC LETTER ZAIN
u'\u0633' # 0x00d3 -> ARABIC LETTER SEEN
u'\u0634' # 0x00d4 -> ARABIC LETTER SHEEN
u'\u0635' # 0x00d5 -> ARABIC LETTER SAD
u'\u0636' # 0x00d6 -> ARABIC LETTER DAD
u'\u0637' # 0x00d7 -> ARABIC LETTER TAH
u'\u0638' # 0x00d8 -> ARABIC LETTER ZAH
u'\u0639' # 0x00d9 -> ARABIC LETTER AIN
u'\u063a' # 0x00da -> ARABIC LETTER GHAIN
u'[' # 0x00db -> LEFT SQUARE BRACKET, right-left
u'\\' # 0x00dc -> REVERSE SOLIDUS, right-left
u']' # 0x00dd -> RIGHT SQUARE BRACKET, right-left
u'^' # 0x00de -> CIRCUMFLEX ACCENT, right-left
u'_' # 0x00df -> LOW LINE, right-left
u'\u0640' # 0x00e0 -> ARABIC TATWEEL
u'\u0641' # 0x00e1 -> ARABIC LETTER FEH
u'\u0642' # 0x00e2 -> ARABIC LETTER QAF
u'\u0643' # 0x00e3 -> ARABIC LETTER KAF
u'\u0644' # 0x00e4 -> ARABIC LETTER LAM
u'\u0645' # 0x00e5 -> ARABIC LETTER MEEM
u'\u0646' # 0x00e6 -> ARABIC LETTER NOON
u'\u0647' # 0x00e7 -> ARABIC LETTER HEH
u'\u0648' # 0x00e8 -> ARABIC LETTER WAW
u'\u0649' # 0x00e9 -> ARABIC LETTER ALEF MAKSURA
u'\u064a' # 0x00ea -> ARABIC LETTER YEH
u'\u064b' # 0x00eb -> ARABIC FATHATAN
u'\u064c' # 0x00ec -> ARABIC DAMMATAN
u'\u064d' # 0x00ed -> ARABIC KASRATAN
u'\u064e' # 0x00ee -> ARABIC FATHA
u'\u064f' # 0x00ef -> ARABIC DAMMA
u'\u0650' # 0x00f0 -> ARABIC KASRA
u'\u0651' # 0x00f1 -> ARABIC SHADDA
u'\u0652' # 0x00f2 -> ARABIC SUKUN
u'\u067e' # 0x00f3 -> ARABIC LETTER PEH
u'\u0679' # 0x00f4 -> ARABIC LETTER TTEH
u'\u0686' # 0x00f5 -> ARABIC LETTER TCHEH
u'\u06d5' # 0x00f6 -> ARABIC LETTER AE
u'\u06a4' # 0x00f7 -> ARABIC LETTER VEH
u'\u06af' # 0x00f8 -> ARABIC LETTER GAF
u'\u0688' # 0x00f9 -> ARABIC LETTER DDAL
u'\u0691' # 0x00fa -> ARABIC LETTER RREH
u'{' # 0x00fb -> LEFT CURLY BRACKET, right-left
u'|' # 0x00fc -> VERTICAL LINE, right-left
u'}' # 0x00fd -> RIGHT CURLY BRACKET, right-left
u'\u0698' # 0x00fe -> ARABIC LETTER JEH
u'\u06d2' # 0x00ff -> ARABIC LETTER YEH BARREE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # CONTROL CHARACTER
0x0001: 0x0001, # CONTROL CHARACTER
0x0002: 0x0002, # CONTROL CHARACTER
0x0003: 0x0003, # CONTROL CHARACTER
0x0004: 0x0004, # CONTROL CHARACTER
0x0005: 0x0005, # CONTROL CHARACTER
0x0006: 0x0006, # CONTROL CHARACTER
0x0007: 0x0007, # CONTROL CHARACTER
0x0008: 0x0008, # CONTROL CHARACTER
0x0009: 0x0009, # CONTROL CHARACTER
0x000a: 0x000a, # CONTROL CHARACTER
0x000b: 0x000b, # CONTROL CHARACTER
0x000c: 0x000c, # CONTROL CHARACTER
0x000d: 0x000d, # CONTROL CHARACTER
0x000e: 0x000e, # CONTROL CHARACTER
0x000f: 0x000f, # CONTROL CHARACTER
0x0010: 0x0010, # CONTROL CHARACTER
0x0011: 0x0011, # CONTROL CHARACTER
0x0012: 0x0012, # CONTROL CHARACTER
0x0013: 0x0013, # CONTROL CHARACTER
0x0014: 0x0014, # CONTROL CHARACTER
0x0015: 0x0015, # CONTROL CHARACTER
0x0016: 0x0016, # CONTROL CHARACTER
0x0017: 0x0017, # CONTROL CHARACTER
0x0018: 0x0018, # CONTROL CHARACTER
0x0019: 0x0019, # CONTROL CHARACTER
0x001a: 0x001a, # CONTROL CHARACTER
0x001b: 0x001b, # CONTROL CHARACTER
0x001c: 0x001c, # CONTROL CHARACTER
0x001d: 0x001d, # CONTROL CHARACTER
0x001e: 0x001e, # CONTROL CHARACTER
0x001f: 0x001f, # CONTROL CHARACTER
0x0020: 0x0020, # SPACE, left-right
0x0020: 0x00a0, # SPACE, right-left
0x0021: 0x0021, # EXCLAMATION MARK, left-right
0x0021: 0x00a1, # EXCLAMATION MARK, right-left
0x0022: 0x0022, # QUOTATION MARK, left-right
0x0022: 0x00a2, # QUOTATION MARK, right-left
0x0023: 0x0023, # NUMBER SIGN, left-right
0x0023: 0x00a3, # NUMBER SIGN, right-left
0x0024: 0x0024, # DOLLAR SIGN, left-right
0x0024: 0x00a4, # DOLLAR SIGN, right-left
0x0025: 0x0025, # PERCENT SIGN, left-right
0x0026: 0x0026, # AMPERSAND, left-right
0x0026: 0x00a6, # AMPERSAND, right-left
0x0027: 0x0027, # APOSTROPHE, left-right
0x0027: 0x00a7, # APOSTROPHE, right-left
0x0028: 0x0028, # LEFT PARENTHESIS, left-right
0x0028: 0x00a8, # LEFT PARENTHESIS, right-left
0x0029: 0x0029, # RIGHT PARENTHESIS, left-right
0x0029: 0x00a9, # RIGHT PARENTHESIS, right-left
0x002a: 0x002a, # ASTERISK, left-right
0x002a: 0x00aa, # ASTERISK, right-left
0x002b: 0x002b, # PLUS SIGN, left-right
0x002b: 0x00ab, # PLUS SIGN, right-left
0x002c: 0x002c, # COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR
0x002d: 0x002d, # HYPHEN-MINUS, left-right
0x002d: 0x00ad, # HYPHEN-MINUS, right-left
0x002e: 0x002e, # FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR
0x002e: 0x00ae, # FULL STOP, right-left
0x002f: 0x002f, # SOLIDUS, left-right
0x002f: 0x00af, # SOLIDUS, right-left
0x0030: 0x0030, # DIGIT ZERO; in Arabic-script context, displayed as 0x0660 ARABIC-INDIC DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE; in Arabic-script context, displayed as 0x0661 ARABIC-INDIC DIGIT ONE
0x0032: 0x0032, # DIGIT TWO; in Arabic-script context, displayed as 0x0662 ARABIC-INDIC DIGIT TWO
0x0033: 0x0033, # DIGIT THREE; in Arabic-script context, displayed as 0x0663 ARABIC-INDIC DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR; in Arabic-script context, displayed as 0x0664 ARABIC-INDIC DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE; in Arabic-script context, displayed as 0x0665 ARABIC-INDIC DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX; in Arabic-script context, displayed as 0x0666 ARABIC-INDIC DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN; in Arabic-script context, displayed as 0x0667 ARABIC-INDIC DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT; in Arabic-script context, displayed as 0x0668 ARABIC-INDIC DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE; in Arabic-script context, displayed as 0x0669 ARABIC-INDIC DIGIT NINE
0x003a: 0x003a, # COLON, left-right
0x003a: 0x00ba, # COLON, right-left
0x003b: 0x003b, # SEMICOLON, left-right
0x003c: 0x003c, # LESS-THAN SIGN, left-right
0x003c: 0x00bc, # LESS-THAN SIGN, right-left
0x003d: 0x003d, # EQUALS SIGN, left-right
0x003d: 0x00bd, # EQUALS SIGN, right-left
0x003e: 0x003e, # GREATER-THAN SIGN, left-right
0x003e: 0x00be, # GREATER-THAN SIGN, right-left
0x003f: 0x003f, # QUESTION MARK, left-right
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET, left-right
0x005b: 0x00db, # LEFT SQUARE BRACKET, right-left
0x005c: 0x005c, # REVERSE SOLIDUS, left-right
0x005c: 0x00dc, # REVERSE SOLIDUS, right-left
0x005d: 0x005d, # RIGHT SQUARE BRACKET, left-right
0x005d: 0x00dd, # RIGHT SQUARE BRACKET, right-left
0x005e: 0x005e, # CIRCUMFLEX ACCENT, left-right
0x005e: 0x00de, # CIRCUMFLEX ACCENT, right-left
0x005f: 0x005f, # LOW LINE, left-right
0x005f: 0x00df, # LOW LINE, right-left
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET, left-right
0x007b: 0x00fb, # LEFT CURLY BRACKET, right-left
0x007c: 0x007c, # VERTICAL LINE, left-right
0x007c: 0x00fc, # VERTICAL LINE, right-left
0x007d: 0x007d, # RIGHT CURLY BRACKET, left-right
0x007d: 0x00fd, # RIGHT CURLY BRACKET, right-left
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # CONTROL CHARACTER
0x00a0: 0x0081, # NO-BREAK SPACE, right-left
0x00ab: 0x008c, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
0x00bb: 0x0098, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
0x00c4: 0x0080, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c7: 0x0082, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0083, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d1: 0x0084, # LATIN CAPITAL LETTER N WITH TILDE
0x00d6: 0x0085, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00dc: 0x0086, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00e0: 0x0088, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x0087, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0089, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x008a, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e7: 0x008d, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008f, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x008e, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0090, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0091, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x0092, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x0094, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x0095, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x0096, # LATIN SMALL LETTER N WITH TILDE
0x00f3: 0x0097, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0099, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x009a, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x009b, # DIVISION SIGN, right-left
0x00f9: 0x009d, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x009c, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x009e, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x009f, # LATIN SMALL LETTER U WITH DIAERESIS
0x060c: 0x00ac, # ARABIC COMMA
0x061b: 0x00bb, # ARABIC SEMICOLON
0x061f: 0x00bf, # ARABIC QUESTION MARK
0x0621: 0x00c1, # ARABIC LETTER HAMZA
0x0622: 0x00c2, # ARABIC LETTER ALEF WITH MADDA ABOVE
0x0623: 0x00c3, # ARABIC LETTER ALEF WITH HAMZA ABOVE
0x0624: 0x00c4, # ARABIC LETTER WAW WITH HAMZA ABOVE
0x0625: 0x00c5, # ARABIC LETTER ALEF WITH HAMZA BELOW
0x0626: 0x00c6, # ARABIC LETTER YEH WITH HAMZA ABOVE
0x0627: 0x00c7, # ARABIC LETTER ALEF
0x0628: 0x00c8, # ARABIC LETTER BEH
0x0629: 0x00c9, # ARABIC LETTER TEH MARBUTA
0x062a: 0x00ca, # ARABIC LETTER TEH
0x062b: 0x00cb, # ARABIC LETTER THEH
0x062c: 0x00cc, # ARABIC LETTER JEEM
0x062d: 0x00cd, # ARABIC LETTER HAH
0x062e: 0x00ce, # ARABIC LETTER KHAH
0x062f: 0x00cf, # ARABIC LETTER DAL
0x0630: 0x00d0, # ARABIC LETTER THAL
0x0631: 0x00d1, # ARABIC LETTER REH
0x0632: 0x00d2, # ARABIC LETTER ZAIN
0x0633: 0x00d3, # ARABIC LETTER SEEN
0x0634: 0x00d4, # ARABIC LETTER SHEEN
0x0635: 0x00d5, # ARABIC LETTER SAD
0x0636: 0x00d6, # ARABIC LETTER DAD
0x0637: 0x00d7, # ARABIC LETTER TAH
0x0638: 0x00d8, # ARABIC LETTER ZAH
0x0639: 0x00d9, # ARABIC LETTER AIN
0x063a: 0x00da, # ARABIC LETTER GHAIN
0x0640: 0x00e0, # ARABIC TATWEEL
0x0641: 0x00e1, # ARABIC LETTER FEH
0x0642: 0x00e2, # ARABIC LETTER QAF
0x0643: 0x00e3, # ARABIC LETTER KAF
0x0644: 0x00e4, # ARABIC LETTER LAM
0x0645: 0x00e5, # ARABIC LETTER MEEM
0x0646: 0x00e6, # ARABIC LETTER NOON
0x0647: 0x00e7, # ARABIC LETTER HEH
0x0648: 0x00e8, # ARABIC LETTER WAW
0x0649: 0x00e9, # ARABIC LETTER ALEF MAKSURA
0x064a: 0x00ea, # ARABIC LETTER YEH
0x064b: 0x00eb, # ARABIC FATHATAN
0x064c: 0x00ec, # ARABIC DAMMATAN
0x064d: 0x00ed, # ARABIC KASRATAN
0x064e: 0x00ee, # ARABIC FATHA
0x064f: 0x00ef, # ARABIC DAMMA
0x0650: 0x00f0, # ARABIC KASRA
0x0651: 0x00f1, # ARABIC SHADDA
0x0652: 0x00f2, # ARABIC SUKUN
0x0660: 0x00b0, # ARABIC-INDIC DIGIT ZERO, right-left (need override)
0x0661: 0x00b1, # ARABIC-INDIC DIGIT ONE, right-left (need override)
0x0662: 0x00b2, # ARABIC-INDIC DIGIT TWO, right-left (need override)
0x0663: 0x00b3, # ARABIC-INDIC DIGIT THREE, right-left (need override)
0x0664: 0x00b4, # ARABIC-INDIC DIGIT FOUR, right-left (need override)
0x0665: 0x00b5, # ARABIC-INDIC DIGIT FIVE, right-left (need override)
0x0666: 0x00b6, # ARABIC-INDIC DIGIT SIX, right-left (need override)
0x0667: 0x00b7, # ARABIC-INDIC DIGIT SEVEN, right-left (need override)
0x0668: 0x00b8, # ARABIC-INDIC DIGIT EIGHT, right-left (need override)
0x0669: 0x00b9, # ARABIC-INDIC DIGIT NINE, right-left (need override)
0x066a: 0x00a5, # ARABIC PERCENT SIGN
0x0679: 0x00f4, # ARABIC LETTER TTEH
0x067e: 0x00f3, # ARABIC LETTER PEH
0x0686: 0x00f5, # ARABIC LETTER TCHEH
0x0688: 0x00f9, # ARABIC LETTER DDAL
0x0691: 0x00fa, # ARABIC LETTER RREH
0x0698: 0x00fe, # ARABIC LETTER JEH
0x06a4: 0x00f7, # ARABIC LETTER VEH
0x06af: 0x00f8, # ARABIC LETTER GAF
0x06ba: 0x008b, # ARABIC LETTER NOON GHUNNA
0x06d2: 0x00ff, # ARABIC LETTER YEH BARREE
0x06d5: 0x00f6, # ARABIC LETTER AE
0x2026: 0x0093, # HORIZONTAL ELLIPSIS, right-left
0x274a: 0x00c0, # EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
}
| apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.